##// END OF EJS Templates
move % out of translatable strings...
Martin Geisler -
r6913:580d5e6b default
parent child Browse files
Show More
@@ -1,337 +1,337 b''
1 1 # convcmd - convert extension commands definition
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from common import NoRepo, MissingTool, SKIPREV, mapfile
9 9 from cvs import convert_cvs
10 10 from darcs import darcs_source
11 11 from git import convert_git
12 12 from hg import mercurial_source, mercurial_sink
13 13 from subversion import debugsvnlog, svn_source, svn_sink
14 14 from monotone import monotone_source
15 15 from gnuarch import gnuarch_source
16 16 import filemap
17 17
18 18 import os, shutil
19 19 from mercurial import hg, util
20 20 from mercurial.i18n import _
21 21
22 22 orig_encoding = 'ascii'
23 23
24 24 def recode(s):
25 25 if isinstance(s, unicode):
26 26 return s.encode(orig_encoding, 'replace')
27 27 else:
28 28 return s.decode('utf-8').encode(orig_encoding, 'replace')
29 29
30 30 source_converters = [
31 31 ('cvs', convert_cvs),
32 32 ('git', convert_git),
33 33 ('svn', svn_source),
34 34 ('hg', mercurial_source),
35 35 ('darcs', darcs_source),
36 36 ('mtn', monotone_source),
37 37 ('gnuarch', gnuarch_source),
38 38 ]
39 39
40 40 sink_converters = [
41 41 ('hg', mercurial_sink),
42 42 ('svn', svn_sink),
43 43 ]
44 44
45 45 def convertsource(ui, path, type, rev):
46 46 exceptions = []
47 47 for name, source in source_converters:
48 48 try:
49 49 if not type or name == type:
50 50 return source(ui, path, rev)
51 51 except (NoRepo, MissingTool), inst:
52 52 exceptions.append(inst)
53 53 if not ui.quiet:
54 54 for inst in exceptions:
55 ui.write(_("%s\n") % inst)
56 raise util.Abort('%s: unknown repository type' % path)
55 ui.write("%s\n" % inst)
56 raise util.Abort(_('%s: unknown repository type') % path)
57 57
58 58 def convertsink(ui, path, type):
59 59 for name, sink in sink_converters:
60 60 try:
61 61 if not type or name == type:
62 62 return sink(ui, path)
63 63 except NoRepo, inst:
64 64 ui.note(_("convert: %s\n") % inst)
65 raise util.Abort('%s: unknown repository type' % path)
65 raise util.Abort(_('%s: unknown repository type') % path)
66 66
67 67 class converter(object):
68 68 def __init__(self, ui, source, dest, revmapfile, opts):
69 69
70 70 self.source = source
71 71 self.dest = dest
72 72 self.ui = ui
73 73 self.opts = opts
74 74 self.commitcache = {}
75 75 self.authors = {}
76 76 self.authorfile = None
77 77
78 78 self.map = mapfile(ui, revmapfile)
79 79
80 80 # Read first the dst author map if any
81 81 authorfile = self.dest.authorfile()
82 82 if authorfile and os.path.exists(authorfile):
83 83 self.readauthormap(authorfile)
84 84 # Extend/Override with new author map if necessary
85 85 if opts.get('authors'):
86 86 self.readauthormap(opts.get('authors'))
87 87 self.authorfile = self.dest.authorfile()
88 88
89 89 self.splicemap = mapfile(ui, opts.get('splicemap'))
90 90
91 91 def walktree(self, heads):
92 92 '''Return a mapping that identifies the uncommitted parents of every
93 93 uncommitted changeset.'''
94 94 visit = heads
95 95 known = {}
96 96 parents = {}
97 97 while visit:
98 98 n = visit.pop(0)
99 99 if n in known or n in self.map: continue
100 100 known[n] = 1
101 101 commit = self.cachecommit(n)
102 102 parents[n] = []
103 103 for p in commit.parents:
104 104 parents[n].append(p)
105 105 visit.append(p)
106 106
107 107 return parents
108 108
109 109 def toposort(self, parents):
110 110 '''Return an ordering such that every uncommitted changeset is
111 111 preceeded by all its uncommitted ancestors.'''
112 112 visit = parents.keys()
113 113 seen = {}
114 114 children = {}
115 115 actives = []
116 116
117 117 while visit:
118 118 n = visit.pop(0)
119 119 if n in seen: continue
120 120 seen[n] = 1
121 121 # Ensure that nodes without parents are present in the 'children'
122 122 # mapping.
123 123 children.setdefault(n, [])
124 124 hasparent = False
125 125 for p in parents[n]:
126 126 if not p in self.map:
127 127 visit.append(p)
128 128 hasparent = True
129 129 children.setdefault(p, []).append(n)
130 130 if not hasparent:
131 131 actives.append(n)
132 132
133 133 del seen
134 134 del visit
135 135
136 136 if self.opts.get('datesort'):
137 137 dates = {}
138 138 def getdate(n):
139 139 if n not in dates:
140 140 dates[n] = util.parsedate(self.commitcache[n].date)
141 141 return dates[n]
142 142
143 143 def picknext(nodes):
144 144 return min([(getdate(n), n) for n in nodes])[1]
145 145 else:
146 146 prev = [None]
147 147 def picknext(nodes):
148 148 # Return the first eligible child of the previously converted
149 149 # revision, or any of them.
150 150 next = nodes[0]
151 151 for n in nodes:
152 152 if prev[0] in parents[n]:
153 153 next = n
154 154 break
155 155 prev[0] = next
156 156 return next
157 157
158 158 s = []
159 159 pendings = {}
160 160 while actives:
161 161 n = picknext(actives)
162 162 actives.remove(n)
163 163 s.append(n)
164 164
165 165 # Update dependents list
166 166 for c in children.get(n, []):
167 167 if c not in pendings:
168 168 pendings[c] = [p for p in parents[c] if p not in self.map]
169 169 try:
170 170 pendings[c].remove(n)
171 171 except ValueError:
172 172 raise util.Abort(_('cycle detected between %s and %s')
173 173 % (recode(c), recode(n)))
174 174 if not pendings[c]:
175 175 # Parents are converted, node is eligible
176 176 actives.insert(0, c)
177 177 pendings[c] = None
178 178
179 179 if len(s) != len(parents):
180 180 raise util.Abort(_("not all revisions were sorted"))
181 181
182 182 return s
183 183
184 184 def writeauthormap(self):
185 185 authorfile = self.authorfile
186 186 if authorfile:
187 187 self.ui.status('Writing author map file %s\n' % authorfile)
188 188 ofile = open(authorfile, 'w+')
189 189 for author in self.authors:
190 190 ofile.write("%s=%s\n" % (author, self.authors[author]))
191 191 ofile.close()
192 192
193 193 def readauthormap(self, authorfile):
194 194 afile = open(authorfile, 'r')
195 195 for line in afile:
196 196 if line.strip() == '':
197 197 continue
198 198 try:
199 199 srcauthor, dstauthor = line.split('=', 1)
200 200 srcauthor = srcauthor.strip()
201 201 dstauthor = dstauthor.strip()
202 202 if srcauthor in self.authors and dstauthor != self.authors[srcauthor]:
203 203 self.ui.status(
204 204 'Overriding mapping for author %s, was %s, will be %s\n'
205 205 % (srcauthor, self.authors[srcauthor], dstauthor))
206 206 else:
207 207 self.ui.debug('Mapping author %s to %s\n'
208 208 % (srcauthor, dstauthor))
209 209 self.authors[srcauthor] = dstauthor
210 210 except IndexError:
211 211 self.ui.warn(
212 212 'Ignoring bad line in author map file %s: %s\n'
213 213 % (authorfile, line.rstrip()))
214 214 afile.close()
215 215
216 216 def cachecommit(self, rev):
217 217 commit = self.source.getcommit(rev)
218 218 commit.author = self.authors.get(commit.author, commit.author)
219 219 self.commitcache[rev] = commit
220 220 return commit
221 221
222 222 def copy(self, rev):
223 223 commit = self.commitcache[rev]
224 224
225 225 changes = self.source.getchanges(rev)
226 226 if isinstance(changes, basestring):
227 227 if changes == SKIPREV:
228 228 dest = SKIPREV
229 229 else:
230 230 dest = self.map[changes]
231 231 self.map[rev] = dest
232 232 return
233 233 files, copies = changes
234 234 pbranches = []
235 235 if commit.parents:
236 236 for prev in commit.parents:
237 237 if prev not in self.commitcache:
238 238 self.cachecommit(prev)
239 239 pbranches.append((self.map[prev],
240 240 self.commitcache[prev].branch))
241 241 self.dest.setbranch(commit.branch, pbranches)
242 242 try:
243 243 parents = self.splicemap[rev].replace(',', ' ').split()
244 244 self.ui.status('spliced in %s as parents of %s\n' %
245 245 (parents, rev))
246 246 parents = [self.map.get(p, p) for p in parents]
247 247 except KeyError:
248 248 parents = [b[0] for b in pbranches]
249 249 newnode = self.dest.putcommit(files, copies, parents, commit, self.source)
250 250 self.source.converted(rev, newnode)
251 251 self.map[rev] = newnode
252 252
253 253 def convert(self):
254 254
255 255 try:
256 256 self.source.before()
257 257 self.dest.before()
258 258 self.source.setrevmap(self.map)
259 259 self.ui.status("scanning source...\n")
260 260 heads = self.source.getheads()
261 261 parents = self.walktree(heads)
262 262 self.ui.status("sorting...\n")
263 263 t = self.toposort(parents)
264 264 num = len(t)
265 265 c = None
266 266
267 267 self.ui.status("converting...\n")
268 268 for c in t:
269 269 num -= 1
270 270 desc = self.commitcache[c].desc
271 271 if "\n" in desc:
272 272 desc = desc.splitlines()[0]
273 273 # convert log message to local encoding without using
274 274 # tolocal() because util._encoding conver() use it as
275 275 # 'utf-8'
276 276 self.ui.status("%d %s\n" % (num, recode(desc)))
277 self.ui.note(_("source: %s\n" % recode(c)))
277 self.ui.note(_("source: %s\n") % recode(c))
278 278 self.copy(c)
279 279
280 280 tags = self.source.gettags()
281 281 ctags = {}
282 282 for k in tags:
283 283 v = tags[k]
284 284 if self.map.get(v, SKIPREV) != SKIPREV:
285 285 ctags[k] = self.map[v]
286 286
287 287 if c and ctags:
288 288 nrev = self.dest.puttags(ctags)
289 289 # write another hash correspondence to override the previous
290 290 # one so we don't end up with extra tag heads
291 291 if nrev:
292 292 self.map[c] = nrev
293 293
294 294 self.writeauthormap()
295 295 finally:
296 296 self.cleanup()
297 297
298 298 def cleanup(self):
299 299 try:
300 300 self.dest.after()
301 301 finally:
302 302 self.source.after()
303 303 self.map.close()
304 304
305 305 def convert(ui, src, dest=None, revmapfile=None, **opts):
306 306 global orig_encoding
307 307 orig_encoding = util._encoding
308 308 util._encoding = 'UTF-8'
309 309
310 310 if not dest:
311 311 dest = hg.defaultdest(src) + "-hg"
312 312 ui.status("assuming destination %s\n" % dest)
313 313
314 314 destc = convertsink(ui, dest, opts.get('dest_type'))
315 315
316 316 try:
317 317 srcc = convertsource(ui, src, opts.get('source_type'),
318 318 opts.get('rev'))
319 319 except Exception:
320 320 for path in destc.created:
321 321 shutil.rmtree(path, True)
322 322 raise
323 323
324 324 fmap = opts.get('filemap')
325 325 if fmap:
326 326 srcc = filemap.filemap_source(ui, srcc, fmap)
327 327 destc.setfilemapmode(True)
328 328
329 329 if not revmapfile:
330 330 try:
331 331 revmapfile = destc.revmapfile()
332 332 except:
333 333 revmapfile = os.path.join(destc, "map")
334 334
335 335 c = converter(ui, srcc, destc, revmapfile, opts)
336 336 c.convert()
337 337
@@ -1,299 +1,299 b''
1 1 # GNU Arch support for the convert extension
2 2
3 3 from common import NoRepo, commandline, commit, converter_source
4 4 from mercurial.i18n import _
5 5 from mercurial import util
6 6 import os, shutil, tempfile, stat
7 7
8 8 class gnuarch_source(converter_source, commandline):
9 9
10 10 class gnuarch_rev:
11 11 def __init__(self, rev):
12 12 self.rev = rev
13 13 self.summary = ''
14 14 self.date = None
15 15 self.author = ''
16 16 self.add_files = []
17 17 self.mod_files = []
18 18 self.del_files = []
19 19 self.ren_files = {}
20 20 self.ren_dirs = {}
21 21
22 22 def __init__(self, ui, path, rev=None):
23 23 super(gnuarch_source, self).__init__(ui, path, rev=rev)
24 24
25 25 if not os.path.exists(os.path.join(path, '{arch}')):
26 raise NoRepo(_("%s does not look like a GNU Arch repo" % path))
26 raise NoRepo(_("%s does not look like a GNU Arch repo") % path)
27 27
28 28 # Could use checktool, but we want to check for baz or tla.
29 29 self.execmd = None
30 30 if util.find_exe('baz'):
31 31 self.execmd = 'baz'
32 32 else:
33 33 if util.find_exe('tla'):
34 34 self.execmd = 'tla'
35 35 else:
36 36 raise util.Abort(_('cannot find a GNU Arch tool'))
37 37
38 38 commandline.__init__(self, ui, self.execmd)
39 39
40 40 self.path = os.path.realpath(path)
41 41 self.tmppath = None
42 42
43 43 self.treeversion = None
44 44 self.lastrev = None
45 45 self.changes = {}
46 46 self.parents = {}
47 47 self.tags = {}
48 48 self.modecache = {}
49 49
50 50 def before(self):
51 51 if self.execmd == 'tla':
52 52 output = self.run0('tree-version', self.path)
53 53 else:
54 54 output = self.run0('tree-version', '-d', self.path)
55 55 self.treeversion = output.strip()
56 56
57 self.ui.status(_('analyzing tree version %s...\n' % self.treeversion))
57 self.ui.status(_('analyzing tree version %s...\n') % self.treeversion)
58 58
59 59 # Get name of temporary directory
60 60 version = self.treeversion.split('/')
61 61 self.tmppath = os.path.join(tempfile.gettempdir(),
62 62 'hg-%s' % version[1])
63 63
64 64 # Generate parents dictionary
65 65 child = []
66 66 output, status = self.runlines('revisions', self.treeversion)
67 67 self.checkexit(status, 'archive registered?')
68 68 for l in output:
69 69 rev = l.strip()
70 70 self.changes[rev] = self.gnuarch_rev(rev)
71 71
72 72 # Read author, date and summary
73 73 catlog = self.runlines0('cat-log', '-d', self.path, rev)
74 74 self._parsecatlog(catlog, rev)
75 75
76 76 self.parents[rev] = child
77 77 child = [rev]
78 78 if rev == self.rev:
79 79 break
80 80 self.parents[None] = child
81 81
82 82 def after(self):
83 self.ui.debug(_('cleaning up %s\n' % self.tmppath))
83 self.ui.debug(_('cleaning up %s\n') % self.tmppath)
84 84 shutil.rmtree(self.tmppath, ignore_errors=True)
85 85
86 86 def getheads(self):
87 87 return self.parents[None]
88 88
89 89 def getfile(self, name, rev):
90 90 if rev != self.lastrev:
91 91 raise util.Abort(_('internal calling inconsistency'))
92 92
93 93 # Raise IOError if necessary (i.e. deleted files).
94 94 if not os.path.exists(os.path.join(self.tmppath, name)):
95 95 raise IOError
96 96
97 97 data, mode = self._getfile(name, rev)
98 98 self.modecache[(name, rev)] = mode
99 99
100 100 return data
101 101
102 102 def getmode(self, name, rev):
103 103 return self.modecache[(name, rev)]
104 104
105 105 def getchanges(self, rev):
106 106 self.modecache = {}
107 107 self._update(rev)
108 108 changes = []
109 109 copies = {}
110 110
111 111 for f in self.changes[rev].add_files:
112 112 changes.append((f, rev))
113 113
114 114 for f in self.changes[rev].mod_files:
115 115 changes.append((f, rev))
116 116
117 117 for f in self.changes[rev].del_files:
118 118 changes.append((f, rev))
119 119
120 120 for src in self.changes[rev].ren_files:
121 121 to = self.changes[rev].ren_files[src]
122 122 changes.append((src, rev))
123 123 changes.append((to, rev))
124 124 copies[src] = to
125 125
126 126 for src in self.changes[rev].ren_dirs:
127 127 to = self.changes[rev].ren_dirs[src]
128 128 chgs, cps = self._rendirchanges(src, to);
129 129 changes += [(f, rev) for f in chgs]
130 130 for c in cps:
131 131 copies[c] = cps[c]
132 132
133 133 self.lastrev = rev
134 134 return util.sort(changes), copies
135 135
136 136 def getcommit(self, rev):
137 137 changes = self.changes[rev]
138 138 return commit(author = changes.author, date = changes.date,
139 139 desc = changes.summary, parents = self.parents[rev])
140 140
141 141 def gettags(self):
142 142 return self.tags
143 143
144 144 def _execute(self, cmd, *args, **kwargs):
145 145 cmdline = [self.execmd, cmd]
146 146 cmdline += args
147 147 cmdline = [util.shellquote(arg) for arg in cmdline]
148 148 cmdline += ['>', util.nulldev, '2>', util.nulldev]
149 149 cmdline = util.quotecommand(' '.join(cmdline))
150 150 self.ui.debug(cmdline, '\n')
151 151 return os.system(cmdline)
152 152
153 153 def _update(self, rev):
154 154 if rev == 'base-0':
155 155 # Initialise 'base-0' revision
156 156 self._obtainrevision(rev)
157 157 else:
158 self.ui.debug(_('applying revision %s...\n' % rev))
158 self.ui.debug(_('applying revision %s...\n') % rev)
159 159 revision = '%s--%s' % (self.treeversion, rev)
160 160 changeset, status = self.runlines('replay', '-d', self.tmppath,
161 161 revision)
162 162 if status:
163 163 # Something went wrong while merging (baz or tla
164 164 # issue?), get latest revision and try from there
165 165 shutil.rmtree(self.tmppath, ignore_errors=True)
166 166 self._obtainrevision(rev)
167 167 else:
168 168 old_rev = self.parents[rev][0]
169 self.ui.debug(_('computing changeset between %s and %s...\n' \
170 % (old_rev, rev)))
169 self.ui.debug(_('computing changeset between %s and %s...\n')
170 % (old_rev, rev))
171 171 rev_a = '%s--%s' % (self.treeversion, old_rev)
172 172 rev_b = '%s--%s' % (self.treeversion, rev)
173 173 self._parsechangeset(changeset, rev)
174 174
175 175 def _getfile(self, name, rev):
176 176 mode = os.lstat(os.path.join(self.tmppath, name)).st_mode
177 177 if stat.S_ISLNK(mode):
178 178 data = os.readlink(os.path.join(self.tmppath, name))
179 179 mode = mode and 'l' or ''
180 180 else:
181 181 data = open(os.path.join(self.tmppath, name), 'rb').read()
182 182 mode = (mode & 0111) and 'x' or ''
183 183 return data, mode
184 184
185 185 def _exclude(self, name):
186 186 exclude = [ '{arch}', '.arch-ids', '.arch-inventory' ]
187 187 for exc in exclude:
188 188 if name.find(exc) != -1:
189 189 return True
190 190 return False
191 191
192 192 def _readcontents(self, path):
193 193 files = []
194 194 contents = os.listdir(path)
195 195 while len(contents) > 0:
196 196 c = contents.pop()
197 197 p = os.path.join(path, c)
198 198 # os.walk could be used, but here we avoid internal GNU
199 199 # Arch files and directories, thus saving a lot time.
200 200 if not self._exclude(p):
201 201 if os.path.isdir(p):
202 202 contents += [os.path.join(c, f) for f in os.listdir(p)]
203 203 else:
204 204 files.append(c)
205 205 return files
206 206
207 207 def _rendirchanges(self, src, dest):
208 208 changes = []
209 209 copies = {}
210 210 files = self._readcontents(os.path.join(self.tmppath, dest))
211 211 for f in files:
212 212 s = os.path.join(src, f)
213 213 d = os.path.join(dest, f)
214 214 changes.append(s)
215 215 changes.append(d)
216 216 copies[s] = d
217 217 return changes, copies
218 218
219 219 def _obtainrevision(self, rev):
220 self.ui.debug(_('obtaining revision %s...\n' % rev))
220 self.ui.debug(_('obtaining revision %s...\n') % rev)
221 221 revision = '%s--%s' % (self.treeversion, rev)
222 222 output = self._execute('get', revision, self.tmppath)
223 223 self.checkexit(output)
224 self.ui.debug(_('analysing revision %s...\n' % rev))
224 self.ui.debug(_('analysing revision %s...\n') % rev)
225 225 files = self._readcontents(self.tmppath)
226 226 self.changes[rev].add_files += files
227 227
228 228 def _stripbasepath(self, path):
229 229 if path.startswith('./'):
230 230 return path[2:]
231 231 return path
232 232
233 233 def _parsecatlog(self, data, rev):
234 234 summary = []
235 235 for l in data:
236 236 l = l.strip()
237 237 if summary:
238 238 summary.append(l)
239 239 elif l.startswith('Summary:'):
240 240 summary.append(l[len('Summary: '):])
241 241 elif l.startswith('Standard-date:'):
242 242 date = l[len('Standard-date: '):]
243 243 strdate = util.strdate(date, '%Y-%m-%d %H:%M:%S')
244 244 self.changes[rev].date = util.datestr(strdate)
245 245 elif l.startswith('Creator:'):
246 246 self.changes[rev].author = l[len('Creator: '):]
247 247 self.changes[rev].summary = '\n'.join(summary)
248 248
249 249 def _parsechangeset(self, data, rev):
250 250 for l in data:
251 251 l = l.strip()
252 252 # Added file (ignore added directory)
253 253 if l.startswith('A') and not l.startswith('A/'):
254 254 file = self._stripbasepath(l[1:].strip())
255 255 if not self._exclude(file):
256 256 self.changes[rev].add_files.append(file)
257 257 # Deleted file (ignore deleted directory)
258 258 elif l.startswith('D') and not l.startswith('D/'):
259 259 file = self._stripbasepath(l[1:].strip())
260 260 if not self._exclude(file):
261 261 self.changes[rev].del_files.append(file)
262 262 # Modified binary file
263 263 elif l.startswith('Mb'):
264 264 file = self._stripbasepath(l[2:].strip())
265 265 if not self._exclude(file):
266 266 self.changes[rev].mod_files.append(file)
267 267 # Modified link
268 268 elif l.startswith('M->'):
269 269 file = self._stripbasepath(l[3:].strip())
270 270 if not self._exclude(file):
271 271 self.changes[rev].mod_files.append(file)
272 272 # Modified file
273 273 elif l.startswith('M'):
274 274 file = self._stripbasepath(l[1:].strip())
275 275 if not self._exclude(file):
276 276 self.changes[rev].mod_files.append(file)
277 277 # Renamed file (or link)
278 278 elif l.startswith('=>'):
279 279 files = l[2:].strip().split(' ')
280 280 if len(files) == 1:
281 281 files = l[2:].strip().split('\t')
282 282 src = self._stripbasepath(files[0])
283 283 dst = self._stripbasepath(files[1])
284 284 if not self._exclude(src) and not self._exclude(dst):
285 285 self.changes[rev].ren_files[src] = dst
286 286 # Conversion from file to link or from link to file (modified)
287 287 elif l.startswith('ch'):
288 288 file = self._stripbasepath(l[2:].strip())
289 289 if not self._exclude(file):
290 290 self.changes[rev].mod_files.append(file)
291 291 # Renamed directory
292 292 elif l.startswith('/>'):
293 293 dirs = l[2:].strip().split(' ')
294 294 if len(dirs) == 1:
295 295 dirs = l[2:].strip().split('\t')
296 296 src = self._stripbasepath(dirs[0])
297 297 dst = self._stripbasepath(dirs[1])
298 298 if not self._exclude(src) and not self._exclude(dst):
299 299 self.changes[rev].ren_dirs[src] = dst
@@ -1,224 +1,224 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of
6 6 # the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 from node import hex
10 10 import cStringIO, os, stat, tarfile, time, util, zipfile
11 11 import zlib, gzip
12 12
13 13 def tidyprefix(dest, prefix, suffixes):
14 14 '''choose prefix to use for names in archive. make sure prefix is
15 15 safe for consumers.'''
16 16
17 17 if prefix:
18 18 prefix = util.normpath(prefix)
19 19 else:
20 20 if not isinstance(dest, str):
21 21 raise ValueError('dest must be string if no prefix')
22 22 prefix = os.path.basename(dest)
23 23 lower = prefix.lower()
24 24 for sfx in suffixes:
25 25 if lower.endswith(sfx):
26 26 prefix = prefix[:-len(sfx)]
27 27 break
28 28 lpfx = os.path.normpath(util.localpath(prefix))
29 29 prefix = util.pconvert(lpfx)
30 30 if not prefix.endswith('/'):
31 31 prefix += '/'
32 32 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
33 33 raise util.Abort(_('archive prefix contains illegal components'))
34 34 return prefix
35 35
36 36 class tarit:
37 37 '''write archive to tar file or stream. can write uncompressed,
38 38 or compress with gzip or bzip2.'''
39 39
40 40 class GzipFileWithTime(gzip.GzipFile):
41 41
42 42 def __init__(self, *args, **kw):
43 43 timestamp = None
44 44 if 'timestamp' in kw:
45 45 timestamp = kw.pop('timestamp')
46 46 if timestamp == None:
47 47 self.timestamp = time.time()
48 48 else:
49 49 self.timestamp = timestamp
50 50 gzip.GzipFile.__init__(self, *args, **kw)
51 51
52 52 def _write_gzip_header(self):
53 53 self.fileobj.write('\037\213') # magic header
54 54 self.fileobj.write('\010') # compression method
55 55 # Python 2.6 deprecates self.filename
56 56 fname = getattr(self, 'name', None) or self.filename
57 57 flags = 0
58 58 if fname:
59 59 flags = gzip.FNAME
60 60 self.fileobj.write(chr(flags))
61 61 gzip.write32u(self.fileobj, long(self.timestamp))
62 62 self.fileobj.write('\002')
63 63 self.fileobj.write('\377')
64 64 if fname:
65 65 self.fileobj.write(fname + '\000')
66 66
67 67 def __init__(self, dest, prefix, mtime, kind=''):
68 68 self.prefix = tidyprefix(dest, prefix, ['.tar', '.tar.bz2', '.tar.gz',
69 69 '.tgz', '.tbz2'])
70 70 self.mtime = mtime
71 71
72 72 def taropen(name, mode, fileobj=None):
73 73 if kind == 'gz':
74 74 mode = mode[0]
75 75 if not fileobj:
76 76 fileobj = open(name, mode + 'b')
77 77 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
78 78 zlib.Z_BEST_COMPRESSION,
79 79 fileobj, timestamp=mtime)
80 80 return tarfile.TarFile.taropen(name, mode, gzfileobj)
81 81 else:
82 82 return tarfile.open(name, mode + kind, fileobj)
83 83
84 84 if isinstance(dest, str):
85 85 self.z = taropen(dest, mode='w:')
86 86 else:
87 87 # Python 2.5-2.5.1 have a regression that requires a name arg
88 88 self.z = taropen(name='', mode='w|', fileobj=dest)
89 89
90 90 def addfile(self, name, mode, islink, data):
91 91 i = tarfile.TarInfo(self.prefix + name)
92 92 i.mtime = self.mtime
93 93 i.size = len(data)
94 94 if islink:
95 95 i.type = tarfile.SYMTYPE
96 96 i.mode = 0777
97 97 i.linkname = data
98 98 data = None
99 99 else:
100 100 i.mode = mode
101 101 data = cStringIO.StringIO(data)
102 102 self.z.addfile(i, data)
103 103
104 104 def done(self):
105 105 self.z.close()
106 106
107 107 class tellable:
108 108 '''provide tell method for zipfile.ZipFile when writing to http
109 109 response file object.'''
110 110
111 111 def __init__(self, fp):
112 112 self.fp = fp
113 113 self.offset = 0
114 114
115 115 def __getattr__(self, key):
116 116 return getattr(self.fp, key)
117 117
118 118 def write(self, s):
119 119 self.fp.write(s)
120 120 self.offset += len(s)
121 121
122 122 def tell(self):
123 123 return self.offset
124 124
125 125 class zipit:
126 126 '''write archive to zip file or stream. can write uncompressed,
127 127 or compressed with deflate.'''
128 128
129 129 def __init__(self, dest, prefix, mtime, compress=True):
130 130 self.prefix = tidyprefix(dest, prefix, ('.zip',))
131 131 if not isinstance(dest, str):
132 132 try:
133 133 dest.tell()
134 134 except (AttributeError, IOError):
135 135 dest = tellable(dest)
136 136 self.z = zipfile.ZipFile(dest, 'w',
137 137 compress and zipfile.ZIP_DEFLATED or
138 138 zipfile.ZIP_STORED)
139 139 self.date_time = time.gmtime(mtime)[:6]
140 140
141 141 def addfile(self, name, mode, islink, data):
142 142 i = zipfile.ZipInfo(self.prefix + name, self.date_time)
143 143 i.compress_type = self.z.compression
144 144 # unzip will not honor unix file modes unless file creator is
145 145 # set to unix (id 3).
146 146 i.create_system = 3
147 147 ftype = stat.S_IFREG
148 148 if islink:
149 149 mode = 0777
150 150 ftype = stat.S_IFLNK
151 151 i.external_attr = (mode | ftype) << 16L
152 152 self.z.writestr(i, data)
153 153
154 154 def done(self):
155 155 self.z.close()
156 156
157 157 class fileit:
158 158 '''write archive as files in directory.'''
159 159
160 160 def __init__(self, name, prefix, mtime):
161 161 if prefix:
162 162 raise util.Abort(_('cannot give prefix when archiving to files'))
163 163 self.basedir = name
164 164 self.opener = util.opener(self.basedir)
165 165
166 166 def addfile(self, name, mode, islink, data):
167 167 if islink:
168 168 self.opener.symlink(data, name)
169 169 return
170 170 f = self.opener(name, "w", atomictemp=True)
171 171 f.write(data)
172 172 f.rename()
173 173 destfile = os.path.join(self.basedir, name)
174 174 os.chmod(destfile, mode)
175 175
176 176 def done(self):
177 177 pass
178 178
179 179 archivers = {
180 180 'files': fileit,
181 181 'tar': tarit,
182 182 'tbz2': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'bz2'),
183 183 'tgz': lambda name, prefix, mtime: tarit(name, prefix, mtime, 'gz'),
184 184 'uzip': lambda name, prefix, mtime: zipit(name, prefix, mtime, False),
185 185 'zip': zipit,
186 186 }
187 187
188 188 def archive(repo, dest, node, kind, decode=True, matchfn=None,
189 189 prefix=None, mtime=None):
190 190 '''create archive of repo as it was at node.
191 191
192 192 dest can be name of directory, name of archive file, or file
193 193 object to write archive to.
194 194
195 195 kind is type of archive to create.
196 196
197 197 decode tells whether to put files through decode filters from
198 198 hgrc.
199 199
200 200 matchfn is function to filter names of files to write to archive.
201 201
202 202 prefix is name of path to put before every archive member.'''
203 203
204 204 def write(name, mode, islink, getdata):
205 205 if matchfn and not matchfn(name): return
206 206 data = getdata()
207 207 if decode:
208 208 data = repo.wwritedata(name, data)
209 209 archiver.addfile(name, mode, islink, data)
210 210
211 211 if kind not in archivers:
212 raise util.Abort(_("unknown archive type '%s'" % kind))
212 raise util.Abort(_("unknown archive type '%s'") % kind)
213 213
214 214 ctx = repo[node]
215 215 archiver = archivers[kind](dest, prefix, mtime or ctx.date()[0])
216 216
217 217 if repo.ui.configbool("ui", "archivemeta", True):
218 218 write('.hg_archival.txt', 0644, False,
219 219 lambda: 'repo: %s\nnode: %s\n' % (
220 220 hex(repo.changelog.node(0)), hex(node)))
221 221 for f in ctx:
222 222 ff = ctx.flags(f)
223 223 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
224 224 archiver.done()
@@ -1,289 +1,289 b''
1 1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os
10 10 from mercurial.i18n import gettext as _
11 11 from mercurial.repo import RepoError
12 12 from mercurial import ui, hg, util, templater, templatefilters
13 13 from common import ErrorResponse, get_mtime, staticfile, style_map, paritygen,\
14 14 get_contact, HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
15 15 from hgweb_mod import hgweb
16 16 from request import wsgirequest
17 17
18 18 # This is a stopgap
19 19 class hgwebdir(object):
20 20 def __init__(self, config, parentui=None):
21 21 def cleannames(items):
22 22 return [(util.pconvert(name).strip('/'), path)
23 23 for name, path in items]
24 24
25 25 self.parentui = parentui or ui.ui(report_untrusted=False,
26 26 interactive = False)
27 27 self.motd = None
28 28 self.style = None
29 29 self.stripecount = None
30 30 self.repos_sorted = ('name', False)
31 31 self._baseurl = None
32 32 if isinstance(config, (list, tuple)):
33 33 self.repos = cleannames(config)
34 34 self.repos_sorted = ('', False)
35 35 elif isinstance(config, dict):
36 36 self.repos = util.sort(cleannames(config.items()))
37 37 else:
38 38 if isinstance(config, util.configparser):
39 39 cp = config
40 40 else:
41 41 cp = util.configparser()
42 42 cp.read(config)
43 43 self.repos = []
44 44 if cp.has_section('web'):
45 45 if cp.has_option('web', 'motd'):
46 46 self.motd = cp.get('web', 'motd')
47 47 if cp.has_option('web', 'style'):
48 48 self.style = cp.get('web', 'style')
49 49 if cp.has_option('web', 'stripes'):
50 50 self.stripecount = int(cp.get('web', 'stripes'))
51 51 if cp.has_option('web', 'baseurl'):
52 52 self._baseurl = cp.get('web', 'baseurl')
53 53 if cp.has_section('paths'):
54 54 self.repos.extend(cleannames(cp.items('paths')))
55 55 if cp.has_section('collections'):
56 56 for prefix, root in cp.items('collections'):
57 57 for path in util.walkrepos(root, followsym=True):
58 58 repo = os.path.normpath(path)
59 59 name = repo
60 60 if name.startswith(prefix):
61 61 name = name[len(prefix):]
62 62 self.repos.append((name.lstrip(os.sep), repo))
63 63 self.repos.sort()
64 64
65 65 def run(self):
66 66 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
67 67 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
68 68 import mercurial.hgweb.wsgicgi as wsgicgi
69 69 wsgicgi.launch(self)
70 70
71 71 def __call__(self, env, respond):
72 72 req = wsgirequest(env, respond)
73 73 return self.run_wsgi(req)
74 74
75 75 def run_wsgi(self, req):
76 76
77 77 try:
78 78 try:
79 79
80 80 virtual = req.env.get("PATH_INFO", "").strip('/')
81 81 tmpl = self.templater(req)
82 82 ctype = tmpl('mimetype', encoding=util._encoding)
83 83 ctype = templater.stringify(ctype)
84 84
85 85 # a static file
86 86 if virtual.startswith('static/') or 'static' in req.form:
87 87 static = os.path.join(templater.templatepath(), 'static')
88 88 if virtual.startswith('static/'):
89 89 fname = virtual[7:]
90 90 else:
91 91 fname = req.form['static'][0]
92 92 req.write(staticfile(static, fname, req))
93 93 return []
94 94
95 95 # top-level index
96 96 elif not virtual:
97 97 req.respond(HTTP_OK, ctype)
98 98 req.write(self.makeindex(req, tmpl))
99 99 return []
100 100
101 101 # nested indexes and hgwebs
102 102
103 103 repos = dict(self.repos)
104 104 while virtual:
105 105 real = repos.get(virtual)
106 106 if real:
107 107 req.env['REPO_NAME'] = virtual
108 108 try:
109 109 repo = hg.repository(self.parentui, real)
110 110 return hgweb(repo).run_wsgi(req)
111 111 except IOError, inst:
112 112 msg = inst.strerror
113 113 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
114 114 except RepoError, inst:
115 115 raise ErrorResponse(HTTP_SERVER_ERROR, str(inst))
116 116
117 117 # browse subdirectories
118 118 subdir = virtual + '/'
119 119 if [r for r in repos if r.startswith(subdir)]:
120 120 req.respond(HTTP_OK, ctype)
121 121 req.write(self.makeindex(req, tmpl, subdir))
122 122 return []
123 123
124 124 up = virtual.rfind('/')
125 125 if up < 0:
126 126 break
127 127 virtual = virtual[:up]
128 128
129 129 # prefixes not found
130 130 req.respond(HTTP_NOT_FOUND, ctype)
131 131 req.write(tmpl("notfound", repo=virtual))
132 132 return []
133 133
134 134 except ErrorResponse, err:
135 135 req.respond(err.code, ctype)
136 136 req.write(tmpl('error', error=err.message or ''))
137 137 return []
138 138 finally:
139 139 tmpl = None
140 140
141 141 def makeindex(self, req, tmpl, subdir=""):
142 142
143 143 def archivelist(ui, nodeid, url):
144 144 allowed = ui.configlist("web", "allow_archive", untrusted=True)
145 145 for i in [('zip', '.zip'), ('gz', '.tar.gz'), ('bz2', '.tar.bz2')]:
146 146 if i[0] in allowed or ui.configbool("web", "allow" + i[0],
147 147 untrusted=True):
148 148 yield {"type" : i[0], "extension": i[1],
149 149 "node": nodeid, "url": url}
150 150
151 151 def entries(sortcolumn="", descending=False, subdir="", **map):
152 152 def sessionvars(**map):
153 153 fields = []
154 154 if 'style' in req.form:
155 155 style = req.form['style'][0]
156 156 if style != get('web', 'style', ''):
157 157 fields.append(('style', style))
158 158
159 159 separator = url[-1] == '?' and ';' or '?'
160 160 for name, value in fields:
161 161 yield dict(name=name, value=value, separator=separator)
162 162 separator = ';'
163 163
164 164 rows = []
165 165 parity = paritygen(self.stripecount)
166 166 for name, path in self.repos:
167 167 if not name.startswith(subdir):
168 168 continue
169 169 name = name[len(subdir):]
170 170
171 171 u = ui.ui(parentui=self.parentui)
172 172 try:
173 173 u.readconfig(os.path.join(path, '.hg', 'hgrc'))
174 174 except Exception, e:
175 u.warn(_('error reading %s/.hg/hgrc: %s\n' % (path, e)))
175 u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e))
176 176 continue
177 177 def get(section, name, default=None):
178 178 return u.config(section, name, default, untrusted=True)
179 179
180 180 if u.configbool("web", "hidden", untrusted=True):
181 181 continue
182 182
183 183 parts = [name]
184 184 if 'PATH_INFO' in req.env:
185 185 parts.insert(0, req.env['PATH_INFO'].rstrip('/'))
186 186 if req.env['SCRIPT_NAME']:
187 187 parts.insert(0, req.env['SCRIPT_NAME'])
188 188 url = ('/'.join(parts).replace("//", "/")) + '/'
189 189
190 190 # update time with local timezone
191 191 try:
192 192 d = (get_mtime(path), util.makedate()[1])
193 193 except OSError:
194 194 continue
195 195
196 196 contact = get_contact(get)
197 197 description = get("web", "description", "")
198 198 name = get("web", "name", name)
199 199 row = dict(contact=contact or "unknown",
200 200 contact_sort=contact.upper() or "unknown",
201 201 name=name,
202 202 name_sort=name,
203 203 url=url,
204 204 description=description or "unknown",
205 205 description_sort=description.upper() or "unknown",
206 206 lastchange=d,
207 207 lastchange_sort=d[1]-d[0],
208 208 sessionvars=sessionvars,
209 209 archives=archivelist(u, "tip", url))
210 210 if (not sortcolumn
211 211 or (sortcolumn, descending) == self.repos_sorted):
212 212 # fast path for unsorted output
213 213 row['parity'] = parity.next()
214 214 yield row
215 215 else:
216 216 rows.append((row["%s_sort" % sortcolumn], row))
217 217 if rows:
218 218 rows.sort()
219 219 if descending:
220 220 rows.reverse()
221 221 for key, row in rows:
222 222 row['parity'] = parity.next()
223 223 yield row
224 224
225 225 sortable = ["name", "description", "contact", "lastchange"]
226 226 sortcolumn, descending = self.repos_sorted
227 227 if 'sort' in req.form:
228 228 sortcolumn = req.form['sort'][0]
229 229 descending = sortcolumn.startswith('-')
230 230 if descending:
231 231 sortcolumn = sortcolumn[1:]
232 232 if sortcolumn not in sortable:
233 233 sortcolumn = ""
234 234
235 235 sort = [("sort_%s" % column,
236 236 "%s%s" % ((not descending and column == sortcolumn)
237 237 and "-" or "", column))
238 238 for column in sortable]
239 239
240 240 if self._baseurl is not None:
241 241 req.env['SCRIPT_NAME'] = self._baseurl
242 242
243 243 return tmpl("index", entries=entries, subdir=subdir,
244 244 sortcolumn=sortcolumn, descending=descending,
245 245 **dict(sort))
246 246
247 247 def templater(self, req):
248 248
249 249 def header(**map):
250 250 yield tmpl('header', encoding=util._encoding, **map)
251 251
252 252 def footer(**map):
253 253 yield tmpl("footer", **map)
254 254
255 255 def motd(**map):
256 256 if self.motd is not None:
257 257 yield self.motd
258 258 else:
259 259 yield config('web', 'motd', '')
260 260
261 261 def config(section, name, default=None, untrusted=True):
262 262 return self.parentui.config(section, name, default, untrusted)
263 263
264 264 if self._baseurl is not None:
265 265 req.env['SCRIPT_NAME'] = self._baseurl
266 266
267 267 url = req.env.get('SCRIPT_NAME', '')
268 268 if not url.endswith('/'):
269 269 url += '/'
270 270
271 271 staticurl = config('web', 'staticurl') or url + 'static/'
272 272 if not staticurl.endswith('/'):
273 273 staticurl += '/'
274 274
275 275 style = self.style
276 276 if style is None:
277 277 style = config('web', 'style', '')
278 278 if 'style' in req.form:
279 279 style = req.form['style'][0]
280 280 if self.stripecount is None:
281 281 self.stripecount = int(config('web', 'stripes', 1))
282 282 mapfile = style_map(templater.templatepath(), style)
283 283 tmpl = templater.templater(mapfile, templatefilters.filters,
284 284 defaults={"header": header,
285 285 "footer": footer,
286 286 "motd": motd,
287 287 "url": url,
288 288 "staticurl": staticurl})
289 289 return tmpl
General Comments 0
You need to be logged in to leave comments. Login now