##// END OF EJS Templates
errors: move revlog errors...
Matt Mackall -
r7633:08cabecf default
parent child Browse files
Show More
@@ -0,0 +1,26 b''
1 """
2 error.py - Mercurial exceptions
3
4 This allows us to catch exceptions at higher levels without forcing imports
5
6 Copyright 2005-2008 Matt Mackall <mpm@selenic.com>
7
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
10 """
11
12 # Do not import anything here, please
13
14 class RevlogError(Exception):
15 pass
16
17 class LookupError(RevlogError, KeyError):
18 def __init__(self, name, index, message):
19 self.name = name
20 if isinstance(name, str) and len(name) == 20:
21 from node import short
22 name = short(name)
23 RevlogError.__init__(self, '%s@%s: %s' % (index, name, message))
24
25 def __str__(self):
26 return RevlogError.__str__(self)
@@ -1,336 +1,336 b''
1 1 # hg backend for convert extension
2 2
3 3 # Notes for hg->hg conversion:
4 4 #
5 5 # * Old versions of Mercurial didn't trim the whitespace from the ends
6 6 # of commit messages, but new versions do. Changesets created by
7 7 # those older versions, then converted, may thus have different
8 8 # hashes for changesets that are otherwise identical.
9 9 #
10 10 # * By default, the source revision is stored in the converted
11 11 # revision. This will cause the converted revision to have a
12 12 # different identity than the source. To avoid this, use the
13 13 # following option: "--config convert.hg.saverev=false"
14 14
15 15
16 16 import os, time
17 17 from mercurial.i18n import _
18 18 from mercurial.repo import RepoError
19 19 from mercurial.node import bin, hex, nullid
20 from mercurial import hg, revlog, util, context
20 from mercurial import hg, util, context, error
21 21
22 22 from common import NoRepo, commit, converter_source, converter_sink
23 23
24 24 class mercurial_sink(converter_sink):
25 25 def __init__(self, ui, path):
26 26 converter_sink.__init__(self, ui, path)
27 27 self.branchnames = ui.configbool('convert', 'hg.usebranchnames', True)
28 28 self.clonebranches = ui.configbool('convert', 'hg.clonebranches', False)
29 29 self.tagsbranch = ui.config('convert', 'hg.tagsbranch', 'default')
30 30 self.lastbranch = None
31 31 if os.path.isdir(path) and len(os.listdir(path)) > 0:
32 32 try:
33 33 self.repo = hg.repository(self.ui, path)
34 34 if not self.repo.local():
35 35 raise NoRepo(_('%s is not a local Mercurial repo') % path)
36 36 except RepoError, err:
37 37 ui.print_exc()
38 38 raise NoRepo(err.args[0])
39 39 else:
40 40 try:
41 41 ui.status(_('initializing destination %s repository\n') % path)
42 42 self.repo = hg.repository(self.ui, path, create=True)
43 43 if not self.repo.local():
44 44 raise NoRepo(_('%s is not a local Mercurial repo') % path)
45 45 self.created.append(path)
46 46 except RepoError, err:
47 47 ui.print_exc()
48 48 raise NoRepo("could not create hg repo %s as sink" % path)
49 49 self.lock = None
50 50 self.wlock = None
51 51 self.filemapmode = False
52 52
53 53 def before(self):
54 54 self.ui.debug(_('run hg sink pre-conversion action\n'))
55 55 self.wlock = self.repo.wlock()
56 56 self.lock = self.repo.lock()
57 57
58 58 def after(self):
59 59 self.ui.debug(_('run hg sink post-conversion action\n'))
60 60 self.lock = None
61 61 self.wlock = None
62 62
63 63 def revmapfile(self):
64 64 return os.path.join(self.path, ".hg", "shamap")
65 65
66 66 def authorfile(self):
67 67 return os.path.join(self.path, ".hg", "authormap")
68 68
69 69 def getheads(self):
70 70 h = self.repo.changelog.heads()
71 71 return [ hex(x) for x in h ]
72 72
73 73 def setbranch(self, branch, pbranches):
74 74 if not self.clonebranches:
75 75 return
76 76
77 77 setbranch = (branch != self.lastbranch)
78 78 self.lastbranch = branch
79 79 if not branch:
80 80 branch = 'default'
81 81 pbranches = [(b[0], b[1] and b[1] or 'default') for b in pbranches]
82 82 pbranch = pbranches and pbranches[0][1] or 'default'
83 83
84 84 branchpath = os.path.join(self.path, branch)
85 85 if setbranch:
86 86 self.after()
87 87 try:
88 88 self.repo = hg.repository(self.ui, branchpath)
89 89 except:
90 90 self.repo = hg.repository(self.ui, branchpath, create=True)
91 91 self.before()
92 92
93 93 # pbranches may bring revisions from other branches (merge parents)
94 94 # Make sure we have them, or pull them.
95 95 missings = {}
96 96 for b in pbranches:
97 97 try:
98 98 self.repo.lookup(b[0])
99 99 except:
100 100 missings.setdefault(b[1], []).append(b[0])
101 101
102 102 if missings:
103 103 self.after()
104 104 for pbranch, heads in missings.iteritems():
105 105 pbranchpath = os.path.join(self.path, pbranch)
106 106 prepo = hg.repository(self.ui, pbranchpath)
107 107 self.ui.note(_('pulling from %s into %s\n') % (pbranch, branch))
108 108 self.repo.pull(prepo, [prepo.lookup(h) for h in heads])
109 109 self.before()
110 110
111 111 def putcommit(self, files, copies, parents, commit, source):
112 112
113 113 files = dict(files)
114 114 def getfilectx(repo, memctx, f):
115 115 v = files[f]
116 116 data = source.getfile(f, v)
117 117 e = source.getmode(f, v)
118 118 return context.memfilectx(f, data, 'l' in e, 'x' in e, copies.get(f))
119 119
120 120 pl = []
121 121 for p in parents:
122 122 if p not in pl:
123 123 pl.append(p)
124 124 parents = pl
125 125 nparents = len(parents)
126 126 if self.filemapmode and nparents == 1:
127 127 m1node = self.repo.changelog.read(bin(parents[0]))[0]
128 128 parent = parents[0]
129 129
130 130 if len(parents) < 2: parents.append("0" * 40)
131 131 if len(parents) < 2: parents.append("0" * 40)
132 132 p2 = parents.pop(0)
133 133
134 134 text = commit.desc
135 135 extra = commit.extra.copy()
136 136 if self.branchnames and commit.branch:
137 137 extra['branch'] = commit.branch
138 138 if commit.rev:
139 139 extra['convert_revision'] = commit.rev
140 140
141 141 while parents:
142 142 p1 = p2
143 143 p2 = parents.pop(0)
144 144 ctx = context.memctx(self.repo, (p1, p2), text, files.keys(), getfilectx,
145 145 commit.author, commit.date, extra)
146 146 a = self.repo.commitctx(ctx)
147 147 text = "(octopus merge fixup)\n"
148 148 p2 = hex(self.repo.changelog.tip())
149 149
150 150 if self.filemapmode and nparents == 1:
151 151 man = self.repo.manifest
152 152 mnode = self.repo.changelog.read(bin(p2))[0]
153 153 if not man.cmp(m1node, man.revision(mnode)):
154 154 self.repo.rollback()
155 155 return parent
156 156 return p2
157 157
158 158 def puttags(self, tags):
159 159 try:
160 160 parentctx = self.repo[self.tagsbranch]
161 161 tagparent = parentctx.node()
162 162 except RepoError, inst:
163 163 parentctx = None
164 164 tagparent = nullid
165 165
166 166 try:
167 167 oldlines = util.sort(parentctx['.hgtags'].data().splitlines(1))
168 168 except:
169 169 oldlines = []
170 170
171 171 newlines = util.sort([("%s %s\n" % (tags[tag], tag)) for tag in tags])
172 172
173 173 if newlines == oldlines:
174 174 return None
175 175 data = "".join(newlines)
176 176
177 177 def getfilectx(repo, memctx, f):
178 178 return context.memfilectx(f, data, False, False, None)
179 179
180 180 self.ui.status(_("updating tags\n"))
181 181 date = "%s 0" % int(time.mktime(time.gmtime()))
182 182 extra = {'branch': self.tagsbranch}
183 183 ctx = context.memctx(self.repo, (tagparent, None), "update tags",
184 184 [".hgtags"], getfilectx, "convert-repo", date,
185 185 extra)
186 186 self.repo.commitctx(ctx)
187 187 return hex(self.repo.changelog.tip())
188 188
189 189 def setfilemapmode(self, active):
190 190 self.filemapmode = active
191 191
192 192 class mercurial_source(converter_source):
193 193 def __init__(self, ui, path, rev=None):
194 194 converter_source.__init__(self, ui, path, rev)
195 195 self.ignoreerrors = ui.configbool('convert', 'hg.ignoreerrors', False)
196 196 self.ignored = {}
197 197 self.saverev = ui.configbool('convert', 'hg.saverev', True)
198 198 try:
199 199 self.repo = hg.repository(self.ui, path)
200 200 # try to provoke an exception if this isn't really a hg
201 201 # repo, but some other bogus compatible-looking url
202 202 if not self.repo.local():
203 203 raise RepoError()
204 204 except RepoError:
205 205 ui.print_exc()
206 206 raise NoRepo("%s is not a local Mercurial repo" % path)
207 207 self.lastrev = None
208 208 self.lastctx = None
209 209 self._changescache = None
210 210 self.convertfp = None
211 211 # Restrict converted revisions to startrev descendants
212 212 startnode = ui.config('convert', 'hg.startrev')
213 213 if startnode is not None:
214 214 try:
215 215 startnode = self.repo.lookup(startnode)
216 216 except repo.RepoError:
217 217 raise util.Abort(_('%s is not a valid start revision')
218 218 % startnode)
219 219 startrev = self.repo.changelog.rev(startnode)
220 220 children = {startnode: 1}
221 221 for rev in self.repo.changelog.descendants(startrev):
222 222 children[self.repo.changelog.node(rev)] = 1
223 223 self.keep = children.__contains__
224 224 else:
225 225 self.keep = util.always
226 226
227 227 def changectx(self, rev):
228 228 if self.lastrev != rev:
229 229 self.lastctx = self.repo[rev]
230 230 self.lastrev = rev
231 231 return self.lastctx
232 232
233 233 def parents(self, ctx):
234 234 return [p.node() for p in ctx.parents()
235 235 if p and self.keep(p.node())]
236 236
237 237 def getheads(self):
238 238 if self.rev:
239 239 heads = [self.repo[self.rev].node()]
240 240 else:
241 241 heads = self.repo.heads()
242 242 return [hex(h) for h in heads if self.keep(h)]
243 243
244 244 def getfile(self, name, rev):
245 245 try:
246 246 return self.changectx(rev)[name].data()
247 except revlog.LookupError, err:
247 except error.LookupError, err:
248 248 raise IOError(err)
249 249
250 250 def getmode(self, name, rev):
251 251 return self.changectx(rev).manifest().flags(name)
252 252
253 253 def getchanges(self, rev):
254 254 ctx = self.changectx(rev)
255 255 parents = self.parents(ctx)
256 256 if not parents:
257 257 files = util.sort(ctx.manifest().keys())
258 258 if self.ignoreerrors:
259 259 # calling getcopies() is a simple way to detect missing
260 260 # revlogs and populate self.ignored
261 261 self.getcopies(ctx, files)
262 262 return [(f, rev) for f in files if f not in self.ignored], {}
263 263 if self._changescache and self._changescache[0] == rev:
264 264 m, a, r = self._changescache[1]
265 265 else:
266 266 m, a, r = self.repo.status(parents[0], ctx.node())[:3]
267 267 # getcopies() detects missing revlogs early, run it before
268 268 # filtering the changes.
269 269 copies = self.getcopies(ctx, m + a)
270 270 changes = [(name, rev) for name in m + a + r
271 271 if name not in self.ignored]
272 272 return util.sort(changes), copies
273 273
274 274 def getcopies(self, ctx, files):
275 275 copies = {}
276 276 for name in files:
277 277 if name in self.ignored:
278 278 continue
279 279 try:
280 280 copysource, copynode = ctx.filectx(name).renamed()
281 281 if copysource in self.ignored or not self.keep(copynode):
282 282 continue
283 283 copies[name] = copysource
284 284 except TypeError:
285 285 pass
286 except revlog.LookupError, e:
286 except error.LookupError, e:
287 287 if not self.ignoreerrors:
288 288 raise
289 289 self.ignored[name] = 1
290 290 self.ui.warn(_('ignoring: %s\n') % e)
291 291 return copies
292 292
293 293 def getcommit(self, rev):
294 294 ctx = self.changectx(rev)
295 295 parents = [hex(p) for p in self.parents(ctx)]
296 296 if self.saverev:
297 297 crev = rev
298 298 else:
299 299 crev = None
300 300 return commit(author=ctx.user(), date=util.datestr(ctx.date()),
301 301 desc=ctx.description(), rev=crev, parents=parents,
302 302 branch=ctx.branch(), extra=ctx.extra())
303 303
304 304 def gettags(self):
305 305 tags = [t for t in self.repo.tagslist() if t[0] != 'tip']
306 306 return dict([(name, hex(node)) for name, node in tags
307 307 if self.keep(node)])
308 308
309 309 def getchangedfiles(self, rev, i):
310 310 ctx = self.changectx(rev)
311 311 parents = self.parents(ctx)
312 312 if not parents and i is None:
313 313 i = 0
314 314 changes = [], ctx.manifest().keys(), []
315 315 else:
316 316 i = i or 0
317 317 changes = self.repo.status(parents[i], ctx.node())[:3]
318 318 changes = [[f for f in l if f not in self.ignored] for l in changes]
319 319
320 320 if i == 0:
321 321 self._changescache = (rev, changes)
322 322
323 323 return changes[0] + changes[1] + changes[2]
324 324
325 325 def converted(self, rev, destrev):
326 326 if self.convertfp is None:
327 327 self.convertfp = open(os.path.join(self.path, '.hg', 'shamap'),
328 328 'a')
329 329 self.convertfp.write('%s %s\n' % (destrev, rev))
330 330 self.convertfp.flush()
331 331
332 332 def before(self):
333 333 self.ui.debug(_('run hg source pre-conversion action\n'))
334 334
335 335 def after(self):
336 336 self.ui.debug(_('run hg source post-conversion action\n'))
@@ -1,407 +1,407 b''
1 1 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
2 2 # Published under the GNU GPL
3 3
4 4 '''
5 5 imerge - interactive merge
6 6 '''
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex, short
10 10 from mercurial import commands, cmdutil, dispatch, fancyopts
11 from mercurial import hg, filemerge, util, revlog
11 from mercurial import hg, filemerge, util, error
12 12 import os, tarfile
13 13
14 14 class InvalidStateFileException(Exception): pass
15 15
16 16 class ImergeStateFile(object):
17 17 def __init__(self, im):
18 18 self.im = im
19 19
20 20 def save(self, dest):
21 21 tf = tarfile.open(dest, 'w:gz')
22 22
23 23 st = os.path.join(self.im.path, 'status')
24 24 tf.add(st, os.path.join('.hg', 'imerge', 'status'))
25 25
26 26 for f in self.im.resolved:
27 27 (fd, fo) = self.im.conflicts[f]
28 28 abssrc = self.im.repo.wjoin(fd)
29 29 tf.add(abssrc, fd)
30 30
31 31 tf.close()
32 32
33 33 def load(self, source):
34 34 wlock = self.im.repo.wlock()
35 35 lock = self.im.repo.lock()
36 36
37 37 tf = tarfile.open(source, 'r')
38 38 contents = tf.getnames()
39 39 # tarfile normalizes path separators to '/'
40 40 statusfile = '.hg/imerge/status'
41 41 if statusfile not in contents:
42 42 raise InvalidStateFileException('no status file')
43 43
44 44 tf.extract(statusfile, self.im.repo.root)
45 45 p1, p2 = self.im.load()
46 46 if self.im.repo.dirstate.parents()[0] != p1.node():
47 47 hg.clean(self.im.repo, p1.node())
48 48 self.im.start(p2.node())
49 49 for tarinfo in tf:
50 50 tf.extract(tarinfo, self.im.repo.root)
51 51 self.im.load()
52 52
53 53 class Imerge(object):
54 54 def __init__(self, ui, repo):
55 55 self.ui = ui
56 56 self.repo = repo
57 57
58 58 self.path = repo.join('imerge')
59 59 self.opener = util.opener(self.path)
60 60
61 61 self.wctx = self.repo.workingctx()
62 62 self.conflicts = {}
63 63 self.resolved = []
64 64
65 65 def merging(self):
66 66 return len(self.wctx.parents()) > 1
67 67
68 68 def load(self):
69 69 # status format. \0-delimited file, fields are
70 70 # p1, p2, conflict count, conflict filenames, resolved filenames
71 71 # conflict filenames are tuples of localname, remoteorig, remotenew
72 72
73 73 statusfile = self.opener('status')
74 74
75 75 status = statusfile.read().split('\0')
76 76 if len(status) < 3:
77 77 raise util.Abort(_('invalid imerge status file'))
78 78
79 79 try:
80 80 parents = [self.repo.changectx(n) for n in status[:2]]
81 except revlog.LookupError, e:
81 except error.LookupError, e:
82 82 raise util.Abort(_('merge parent %s not in repository') %
83 83 short(e.name))
84 84
85 85 status = status[2:]
86 86 conflicts = int(status.pop(0)) * 3
87 87 self.resolved = status[conflicts:]
88 88 for i in xrange(0, conflicts, 3):
89 89 self.conflicts[status[i]] = (status[i+1], status[i+2])
90 90
91 91 return parents
92 92
93 93 def save(self):
94 94 lock = self.repo.lock()
95 95
96 96 if not os.path.isdir(self.path):
97 97 os.mkdir(self.path)
98 98 statusfile = self.opener('status', 'wb')
99 99
100 100 out = [hex(n.node()) for n in self.wctx.parents()]
101 101 out.append(str(len(self.conflicts)))
102 102 conflicts = self.conflicts.items()
103 103 conflicts.sort()
104 104 for fw, fd_fo in conflicts:
105 105 out.append(fw)
106 106 out.extend(fd_fo)
107 107 out.extend(self.resolved)
108 108
109 109 statusfile.write('\0'.join(out))
110 110
111 111 def remaining(self):
112 112 return [f for f in self.conflicts if f not in self.resolved]
113 113
114 114 def filemerge(self, fn, interactive=True):
115 115 wlock = self.repo.wlock()
116 116
117 117 (fd, fo) = self.conflicts[fn]
118 118 p1, p2 = self.wctx.parents()
119 119
120 120 # this could be greatly improved
121 121 realmerge = os.environ.get('HGMERGE')
122 122 if not interactive:
123 123 os.environ['HGMERGE'] = 'internal:merge'
124 124
125 125 # The filemerge ancestor algorithm does not work if self.wctx
126 126 # already has two parents (in normal merge it doesn't yet). But
127 127 # this is very dirty.
128 128 self.wctx._parents.pop()
129 129 try:
130 130 # TODO: we should probably revert the file if merge fails
131 131 return filemerge.filemerge(self.repo, fn, fd, fo, self.wctx, p2)
132 132 finally:
133 133 self.wctx._parents.append(p2)
134 134 if realmerge:
135 135 os.environ['HGMERGE'] = realmerge
136 136 elif not interactive:
137 137 del os.environ['HGMERGE']
138 138
139 139 def start(self, rev=None):
140 140 _filemerge = filemerge.filemerge
141 141 def filemerge_(repo, fw, fd, fo, wctx, mctx):
142 142 self.conflicts[fw] = (fd, fo)
143 143
144 144 filemerge.filemerge = filemerge_
145 145 commands.merge(self.ui, self.repo, rev=rev)
146 146 filemerge.filemerge = _filemerge
147 147
148 148 self.wctx = self.repo.workingctx()
149 149 self.save()
150 150
151 151 def resume(self):
152 152 self.load()
153 153
154 154 dp = self.repo.dirstate.parents()
155 155 p1, p2 = self.wctx.parents()
156 156 if p1.node() != dp[0] or p2.node() != dp[1]:
157 157 raise util.Abort(_('imerge state does not match working directory'))
158 158
159 159 def next(self):
160 160 remaining = self.remaining()
161 161 return remaining and remaining[0]
162 162
163 163 def resolve(self, files):
164 164 resolved = dict.fromkeys(self.resolved)
165 165 for fn in files:
166 166 if fn not in self.conflicts:
167 167 raise util.Abort(_('%s is not in the merge set') % fn)
168 168 resolved[fn] = True
169 169 self.resolved = resolved.keys()
170 170 self.resolved.sort()
171 171 self.save()
172 172 return 0
173 173
174 174 def unresolve(self, files):
175 175 resolved = dict.fromkeys(self.resolved)
176 176 for fn in files:
177 177 if fn not in resolved:
178 178 raise util.Abort(_('%s is not resolved') % fn)
179 179 del resolved[fn]
180 180 self.resolved = resolved.keys()
181 181 self.resolved.sort()
182 182 self.save()
183 183 return 0
184 184
185 185 def pickle(self, dest):
186 186 '''write current merge state to file to be resumed elsewhere'''
187 187 state = ImergeStateFile(self)
188 188 return state.save(dest)
189 189
190 190 def unpickle(self, source):
191 191 '''read merge state from file'''
192 192 state = ImergeStateFile(self)
193 193 return state.load(source)
194 194
195 195 def load(im, source):
196 196 if im.merging():
197 197 raise util.Abort(_('there is already a merge in progress '
198 198 '(update -C <rev> to abort it)'))
199 199 m, a, r, d = im.repo.status()[:4]
200 200 if m or a or r or d:
201 201 raise util.Abort(_('working directory has uncommitted changes'))
202 202
203 203 rc = im.unpickle(source)
204 204 if not rc:
205 205 status(im)
206 206 return rc
207 207
208 208 def merge_(im, filename=None, auto=False):
209 209 success = True
210 210 if auto and not filename:
211 211 for fn in im.remaining():
212 212 rc = im.filemerge(fn, interactive=False)
213 213 if rc:
214 214 success = False
215 215 else:
216 216 im.resolve([fn])
217 217 if success:
218 218 im.ui.write('all conflicts resolved\n')
219 219 else:
220 220 status(im)
221 221 return 0
222 222
223 223 if not filename:
224 224 filename = im.next()
225 225 if not filename:
226 226 im.ui.write('all conflicts resolved\n')
227 227 return 0
228 228
229 229 rc = im.filemerge(filename, interactive=not auto)
230 230 if not rc:
231 231 im.resolve([filename])
232 232 if not im.next():
233 233 im.ui.write('all conflicts resolved\n')
234 234 return rc
235 235
236 236 def next(im):
237 237 n = im.next()
238 238 if n:
239 239 im.ui.write('%s\n' % n)
240 240 else:
241 241 im.ui.write('all conflicts resolved\n')
242 242 return 0
243 243
244 244 def resolve(im, *files):
245 245 if not files:
246 246 raise util.Abort(_('resolve requires at least one filename'))
247 247 return im.resolve(files)
248 248
249 249 def save(im, dest):
250 250 return im.pickle(dest)
251 251
252 252 def status(im, **opts):
253 253 if not opts.get('resolved') and not opts.get('unresolved'):
254 254 opts['resolved'] = True
255 255 opts['unresolved'] = True
256 256
257 257 if im.ui.verbose:
258 258 p1, p2 = [short(p.node()) for p in im.wctx.parents()]
259 259 im.ui.note(_('merging %s and %s\n') % (p1, p2))
260 260
261 261 conflicts = im.conflicts.keys()
262 262 conflicts.sort()
263 263 remaining = dict.fromkeys(im.remaining())
264 264 st = []
265 265 for fn in conflicts:
266 266 if opts.get('no_status'):
267 267 mode = ''
268 268 elif fn in remaining:
269 269 mode = 'U '
270 270 else:
271 271 mode = 'R '
272 272 if ((opts.get('resolved') and fn not in remaining)
273 273 or (opts.get('unresolved') and fn in remaining)):
274 274 st.append((mode, fn))
275 275 st.sort()
276 276 for (mode, fn) in st:
277 277 if im.ui.verbose:
278 278 fo, fd = im.conflicts[fn]
279 279 if fd != fn:
280 280 fn = '%s (%s)' % (fn, fd)
281 281 im.ui.write('%s%s\n' % (mode, fn))
282 282 if opts.get('unresolved') and not remaining:
283 283 im.ui.write(_('all conflicts resolved\n'))
284 284
285 285 return 0
286 286
287 287 def unresolve(im, *files):
288 288 if not files:
289 289 raise util.Abort(_('unresolve requires at least one filename'))
290 290 return im.unresolve(files)
291 291
292 292 subcmdtable = {
293 293 'load': (load, []),
294 294 'merge':
295 295 (merge_,
296 296 [('a', 'auto', None, _('automatically resolve if possible'))]),
297 297 'next': (next, []),
298 298 'resolve': (resolve, []),
299 299 'save': (save, []),
300 300 'status':
301 301 (status,
302 302 [('n', 'no-status', None, _('hide status prefix')),
303 303 ('', 'resolved', None, _('only show resolved conflicts')),
304 304 ('', 'unresolved', None, _('only show unresolved conflicts'))]),
305 305 'unresolve': (unresolve, [])
306 306 }
307 307
308 308 def dispatch_(im, args, opts):
309 309 def complete(s, choices):
310 310 candidates = []
311 311 for choice in choices:
312 312 if choice.startswith(s):
313 313 candidates.append(choice)
314 314 return candidates
315 315
316 316 c, args = args[0], list(args[1:])
317 317 cmd = complete(c, subcmdtable.keys())
318 318 if not cmd:
319 319 raise cmdutil.UnknownCommand('imerge ' + c)
320 320 if len(cmd) > 1:
321 321 cmd.sort()
322 322 raise cmdutil.AmbiguousCommand('imerge ' + c, cmd)
323 323 cmd = cmd[0]
324 324
325 325 func, optlist = subcmdtable[cmd]
326 326 opts = {}
327 327 try:
328 328 args = fancyopts.fancyopts(args, optlist, opts)
329 329 return func(im, *args, **opts)
330 330 except fancyopts.getopt.GetoptError, inst:
331 331 raise dispatch.ParseError('imerge', '%s: %s' % (cmd, inst))
332 332 except TypeError:
333 333 raise dispatch.ParseError('imerge', _('%s: invalid arguments') % cmd)
334 334
335 335 def imerge(ui, repo, *args, **opts):
336 336 '''interactive merge
337 337
338 338 imerge lets you split a merge into pieces. When you start a merge
339 339 with imerge, the names of all files with conflicts are recorded.
340 340 You can then merge any of these files, and if the merge is
341 341 successful, they will be marked as resolved. When all files are
342 342 resolved, the merge is complete.
343 343
344 344 If no merge is in progress, hg imerge [rev] will merge the working
345 345 directory with rev (defaulting to the other head if the repository
346 346 only has two heads). You may also resume a saved merge with
347 347 hg imerge load <file>.
348 348
349 349 If a merge is in progress, hg imerge will default to merging the
350 350 next unresolved file.
351 351
352 352 The following subcommands are available:
353 353
354 354 status:
355 355 show the current state of the merge
356 356 options:
357 357 -n --no-status: do not print the status prefix
358 358 --resolved: only print resolved conflicts
359 359 --unresolved: only print unresolved conflicts
360 360 next:
361 361 show the next unresolved file merge
362 362 merge [<file>]:
363 363 merge <file>. If the file merge is successful, the file will be
364 364 recorded as resolved. If no file is given, the next unresolved
365 365 file will be merged.
366 366 resolve <file>...:
367 367 mark files as successfully merged
368 368 unresolve <file>...:
369 369 mark files as requiring merging.
370 370 save <file>:
371 371 save the state of the merge to a file to be resumed elsewhere
372 372 load <file>:
373 373 load the state of the merge from a file created by save
374 374 '''
375 375
376 376 im = Imerge(ui, repo)
377 377
378 378 if im.merging():
379 379 im.resume()
380 380 else:
381 381 rev = opts.get('rev')
382 382 if rev and args:
383 383 raise util.Abort(_('please specify just one revision'))
384 384
385 385 if len(args) == 2 and args[0] == 'load':
386 386 pass
387 387 else:
388 388 if args:
389 389 rev = args[0]
390 390 im.start(rev=rev)
391 391 if opts.get('auto'):
392 392 args = ['merge', '--auto']
393 393 else:
394 394 args = ['status']
395 395
396 396 if not args:
397 397 args = ['merge']
398 398
399 399 return dispatch_(im, args, opts)
400 400
401 401 cmdtable = {
402 402 '^imerge':
403 403 (imerge,
404 404 [('r', 'rev', '', _('revision to merge')),
405 405 ('a', 'auto', None, _('automatically merge where possible'))],
406 406 _('hg imerge [command]'))
407 407 }
@@ -1,587 +1,587 b''
1 1 # Patch transplanting extension for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch transplanting tool
9 9
10 10 This extension allows you to transplant patches from another branch.
11 11
12 12 Transplanted patches are recorded in .hg/transplant/transplants, as a map
13 13 from a changeset hash to its hash in the source repository.
14 14 '''
15 15
16 16 from mercurial.i18n import _
17 17 import os, tempfile
18 18 from mercurial import bundlerepo, changegroup, cmdutil, hg, merge
19 from mercurial import patch, revlog, util
19 from mercurial import patch, revlog, util, error
20 20
21 21 class transplantentry:
22 22 def __init__(self, lnode, rnode):
23 23 self.lnode = lnode
24 24 self.rnode = rnode
25 25
26 26 class transplants:
27 27 def __init__(self, path=None, transplantfile=None, opener=None):
28 28 self.path = path
29 29 self.transplantfile = transplantfile
30 30 self.opener = opener
31 31
32 32 if not opener:
33 33 self.opener = util.opener(self.path)
34 34 self.transplants = []
35 35 self.dirty = False
36 36 self.read()
37 37
38 38 def read(self):
39 39 abspath = os.path.join(self.path, self.transplantfile)
40 40 if self.transplantfile and os.path.exists(abspath):
41 41 for line in self.opener(self.transplantfile).read().splitlines():
42 42 lnode, rnode = map(revlog.bin, line.split(':'))
43 43 self.transplants.append(transplantentry(lnode, rnode))
44 44
45 45 def write(self):
46 46 if self.dirty and self.transplantfile:
47 47 if not os.path.isdir(self.path):
48 48 os.mkdir(self.path)
49 49 fp = self.opener(self.transplantfile, 'w')
50 50 for c in self.transplants:
51 51 l, r = map(revlog.hex, (c.lnode, c.rnode))
52 52 fp.write(l + ':' + r + '\n')
53 53 fp.close()
54 54 self.dirty = False
55 55
56 56 def get(self, rnode):
57 57 return [t for t in self.transplants if t.rnode == rnode]
58 58
59 59 def set(self, lnode, rnode):
60 60 self.transplants.append(transplantentry(lnode, rnode))
61 61 self.dirty = True
62 62
63 63 def remove(self, transplant):
64 64 del self.transplants[self.transplants.index(transplant)]
65 65 self.dirty = True
66 66
67 67 class transplanter:
68 68 def __init__(self, ui, repo):
69 69 self.ui = ui
70 70 self.path = repo.join('transplant')
71 71 self.opener = util.opener(self.path)
72 72 self.transplants = transplants(self.path, 'transplants', opener=self.opener)
73 73
74 74 def applied(self, repo, node, parent):
75 75 '''returns True if a node is already an ancestor of parent
76 76 or has already been transplanted'''
77 77 if hasnode(repo, node):
78 78 if node in repo.changelog.reachable(parent, stop=node):
79 79 return True
80 80 for t in self.transplants.get(node):
81 81 # it might have been stripped
82 82 if not hasnode(repo, t.lnode):
83 83 self.transplants.remove(t)
84 84 return False
85 85 if t.lnode in repo.changelog.reachable(parent, stop=t.lnode):
86 86 return True
87 87 return False
88 88
89 89 def apply(self, repo, source, revmap, merges, opts={}):
90 90 '''apply the revisions in revmap one by one in revision order'''
91 91 revs = util.sort(revmap)
92 92 p1, p2 = repo.dirstate.parents()
93 93 pulls = []
94 94 diffopts = patch.diffopts(self.ui, opts)
95 95 diffopts.git = True
96 96
97 97 lock = wlock = None
98 98 try:
99 99 wlock = repo.wlock()
100 100 lock = repo.lock()
101 101 for rev in revs:
102 102 node = revmap[rev]
103 103 revstr = '%s:%s' % (rev, revlog.short(node))
104 104
105 105 if self.applied(repo, node, p1):
106 106 self.ui.warn(_('skipping already applied revision %s\n') %
107 107 revstr)
108 108 continue
109 109
110 110 parents = source.changelog.parents(node)
111 111 if not opts.get('filter'):
112 112 # If the changeset parent is the same as the wdir's parent,
113 113 # just pull it.
114 114 if parents[0] == p1:
115 115 pulls.append(node)
116 116 p1 = node
117 117 continue
118 118 if pulls:
119 119 if source != repo:
120 120 repo.pull(source, heads=pulls)
121 121 merge.update(repo, pulls[-1], False, False, None)
122 122 p1, p2 = repo.dirstate.parents()
123 123 pulls = []
124 124
125 125 domerge = False
126 126 if node in merges:
127 127 # pulling all the merge revs at once would mean we couldn't
128 128 # transplant after the latest even if transplants before them
129 129 # fail.
130 130 domerge = True
131 131 if not hasnode(repo, node):
132 132 repo.pull(source, heads=[node])
133 133
134 134 if parents[1] != revlog.nullid:
135 135 self.ui.note(_('skipping merge changeset %s:%s\n')
136 136 % (rev, revlog.short(node)))
137 137 patchfile = None
138 138 else:
139 139 fd, patchfile = tempfile.mkstemp(prefix='hg-transplant-')
140 140 fp = os.fdopen(fd, 'w')
141 141 gen = patch.diff(source, parents[0], node, opts=diffopts)
142 142 for chunk in gen:
143 143 fp.write(chunk)
144 144 fp.close()
145 145
146 146 del revmap[rev]
147 147 if patchfile or domerge:
148 148 try:
149 149 n = self.applyone(repo, node,
150 150 source.changelog.read(node),
151 151 patchfile, merge=domerge,
152 152 log=opts.get('log'),
153 153 filter=opts.get('filter'))
154 154 if n and domerge:
155 155 self.ui.status(_('%s merged at %s\n') % (revstr,
156 156 revlog.short(n)))
157 157 elif n:
158 158 self.ui.status(_('%s transplanted to %s\n') % (revlog.short(node),
159 159 revlog.short(n)))
160 160 finally:
161 161 if patchfile:
162 162 os.unlink(patchfile)
163 163 if pulls:
164 164 repo.pull(source, heads=pulls)
165 165 merge.update(repo, pulls[-1], False, False, None)
166 166 finally:
167 167 self.saveseries(revmap, merges)
168 168 self.transplants.write()
169 169 del lock, wlock
170 170
171 171 def filter(self, filter, changelog, patchfile):
172 172 '''arbitrarily rewrite changeset before applying it'''
173 173
174 174 self.ui.status(_('filtering %s\n') % patchfile)
175 175 user, date, msg = (changelog[1], changelog[2], changelog[4])
176 176
177 177 fd, headerfile = tempfile.mkstemp(prefix='hg-transplant-')
178 178 fp = os.fdopen(fd, 'w')
179 179 fp.write("# HG changeset patch\n")
180 180 fp.write("# User %s\n" % user)
181 181 fp.write("# Date %d %d\n" % date)
182 182 fp.write(changelog[4])
183 183 fp.close()
184 184
185 185 try:
186 186 util.system('%s %s %s' % (filter, util.shellquote(headerfile),
187 187 util.shellquote(patchfile)),
188 188 environ={'HGUSER': changelog[1]},
189 189 onerr=util.Abort, errprefix=_('filter failed'))
190 190 user, date, msg = self.parselog(file(headerfile))[1:4]
191 191 finally:
192 192 os.unlink(headerfile)
193 193
194 194 return (user, date, msg)
195 195
196 196 def applyone(self, repo, node, cl, patchfile, merge=False, log=False,
197 197 filter=None):
198 198 '''apply the patch in patchfile to the repository as a transplant'''
199 199 (manifest, user, (time, timezone), files, message) = cl[:5]
200 200 date = "%d %d" % (time, timezone)
201 201 extra = {'transplant_source': node}
202 202 if filter:
203 203 (user, date, message) = self.filter(filter, cl, patchfile)
204 204
205 205 if log:
206 206 message += '\n(transplanted from %s)' % revlog.hex(node)
207 207
208 208 self.ui.status(_('applying %s\n') % revlog.short(node))
209 209 self.ui.note('%s %s\n%s\n' % (user, date, message))
210 210
211 211 if not patchfile and not merge:
212 212 raise util.Abort(_('can only omit patchfile if merging'))
213 213 if patchfile:
214 214 try:
215 215 files = {}
216 216 try:
217 217 fuzz = patch.patch(patchfile, self.ui, cwd=repo.root,
218 218 files=files)
219 219 if not files:
220 220 self.ui.warn(_('%s: empty changeset') % revlog.hex(node))
221 221 return None
222 222 finally:
223 223 files = patch.updatedir(self.ui, repo, files)
224 224 except Exception, inst:
225 225 if filter:
226 226 os.unlink(patchfile)
227 227 seriespath = os.path.join(self.path, 'series')
228 228 if os.path.exists(seriespath):
229 229 os.unlink(seriespath)
230 230 p1 = repo.dirstate.parents()[0]
231 231 p2 = node
232 232 self.log(user, date, message, p1, p2, merge=merge)
233 233 self.ui.write(str(inst) + '\n')
234 234 raise util.Abort(_('Fix up the merge and run hg transplant --continue'))
235 235 else:
236 236 files = None
237 237 if merge:
238 238 p1, p2 = repo.dirstate.parents()
239 239 repo.dirstate.setparents(p1, node)
240 240
241 241 n = repo.commit(files, message, user, date, extra=extra)
242 242 if not merge:
243 243 self.transplants.set(n, node)
244 244
245 245 return n
246 246
247 247 def resume(self, repo, source, opts=None):
248 248 '''recover last transaction and apply remaining changesets'''
249 249 if os.path.exists(os.path.join(self.path, 'journal')):
250 250 n, node = self.recover(repo)
251 251 self.ui.status(_('%s transplanted as %s\n') % (revlog.short(node),
252 252 revlog.short(n)))
253 253 seriespath = os.path.join(self.path, 'series')
254 254 if not os.path.exists(seriespath):
255 255 self.transplants.write()
256 256 return
257 257 nodes, merges = self.readseries()
258 258 revmap = {}
259 259 for n in nodes:
260 260 revmap[source.changelog.rev(n)] = n
261 261 os.unlink(seriespath)
262 262
263 263 self.apply(repo, source, revmap, merges, opts)
264 264
265 265 def recover(self, repo):
266 266 '''commit working directory using journal metadata'''
267 267 node, user, date, message, parents = self.readlog()
268 268 merge = len(parents) == 2
269 269
270 270 if not user or not date or not message or not parents[0]:
271 271 raise util.Abort(_('transplant log file is corrupt'))
272 272
273 273 extra = {'transplant_source': node}
274 274 wlock = repo.wlock()
275 275 try:
276 276 p1, p2 = repo.dirstate.parents()
277 277 if p1 != parents[0]:
278 278 raise util.Abort(
279 279 _('working dir not at transplant parent %s') %
280 280 revlog.hex(parents[0]))
281 281 if merge:
282 282 repo.dirstate.setparents(p1, parents[1])
283 283 n = repo.commit(None, message, user, date, extra=extra)
284 284 if not n:
285 285 raise util.Abort(_('commit failed'))
286 286 if not merge:
287 287 self.transplants.set(n, node)
288 288 self.unlog()
289 289
290 290 return n, node
291 291 finally:
292 292 del wlock
293 293
294 294 def readseries(self):
295 295 nodes = []
296 296 merges = []
297 297 cur = nodes
298 298 for line in self.opener('series').read().splitlines():
299 299 if line.startswith('# Merges'):
300 300 cur = merges
301 301 continue
302 302 cur.append(revlog.bin(line))
303 303
304 304 return (nodes, merges)
305 305
306 306 def saveseries(self, revmap, merges):
307 307 if not revmap:
308 308 return
309 309
310 310 if not os.path.isdir(self.path):
311 311 os.mkdir(self.path)
312 312 series = self.opener('series', 'w')
313 313 for rev in util.sort(revmap):
314 314 series.write(revlog.hex(revmap[rev]) + '\n')
315 315 if merges:
316 316 series.write('# Merges\n')
317 317 for m in merges:
318 318 series.write(revlog.hex(m) + '\n')
319 319 series.close()
320 320
321 321 def parselog(self, fp):
322 322 parents = []
323 323 message = []
324 324 node = revlog.nullid
325 325 inmsg = False
326 326 for line in fp.read().splitlines():
327 327 if inmsg:
328 328 message.append(line)
329 329 elif line.startswith('# User '):
330 330 user = line[7:]
331 331 elif line.startswith('# Date '):
332 332 date = line[7:]
333 333 elif line.startswith('# Node ID '):
334 334 node = revlog.bin(line[10:])
335 335 elif line.startswith('# Parent '):
336 336 parents.append(revlog.bin(line[9:]))
337 337 elif not line.startswith('#'):
338 338 inmsg = True
339 339 message.append(line)
340 340 return (node, user, date, '\n'.join(message), parents)
341 341
342 342 def log(self, user, date, message, p1, p2, merge=False):
343 343 '''journal changelog metadata for later recover'''
344 344
345 345 if not os.path.isdir(self.path):
346 346 os.mkdir(self.path)
347 347 fp = self.opener('journal', 'w')
348 348 fp.write('# User %s\n' % user)
349 349 fp.write('# Date %s\n' % date)
350 350 fp.write('# Node ID %s\n' % revlog.hex(p2))
351 351 fp.write('# Parent ' + revlog.hex(p1) + '\n')
352 352 if merge:
353 353 fp.write('# Parent ' + revlog.hex(p2) + '\n')
354 354 fp.write(message.rstrip() + '\n')
355 355 fp.close()
356 356
357 357 def readlog(self):
358 358 return self.parselog(self.opener('journal'))
359 359
360 360 def unlog(self):
361 361 '''remove changelog journal'''
362 362 absdst = os.path.join(self.path, 'journal')
363 363 if os.path.exists(absdst):
364 364 os.unlink(absdst)
365 365
366 366 def transplantfilter(self, repo, source, root):
367 367 def matchfn(node):
368 368 if self.applied(repo, node, root):
369 369 return False
370 370 if source.changelog.parents(node)[1] != revlog.nullid:
371 371 return False
372 372 extra = source.changelog.read(node)[5]
373 373 cnode = extra.get('transplant_source')
374 374 if cnode and self.applied(repo, cnode, root):
375 375 return False
376 376 return True
377 377
378 378 return matchfn
379 379
380 380 def hasnode(repo, node):
381 381 try:
382 382 return repo.changelog.rev(node) != None
383 except revlog.RevlogError:
383 except error.RevlogError:
384 384 return False
385 385
386 386 def browserevs(ui, repo, nodes, opts):
387 387 '''interactively transplant changesets'''
388 388 def browsehelp(ui):
389 389 ui.write('y: transplant this changeset\n'
390 390 'n: skip this changeset\n'
391 391 'm: merge at this changeset\n'
392 392 'p: show patch\n'
393 393 'c: commit selected changesets\n'
394 394 'q: cancel transplant\n'
395 395 '?: show this help\n')
396 396
397 397 displayer = cmdutil.show_changeset(ui, repo, opts)
398 398 transplants = []
399 399 merges = []
400 400 for node in nodes:
401 401 displayer.show(repo[node])
402 402 action = None
403 403 while not action:
404 404 action = ui.prompt(_('apply changeset? [ynmpcq?]:'))
405 405 if action == '?':
406 406 browsehelp(ui)
407 407 action = None
408 408 elif action == 'p':
409 409 parent = repo.changelog.parents(node)[0]
410 410 for chunk in patch.diff(repo, parent, node):
411 411 repo.ui.write(chunk)
412 412 action = None
413 413 elif action not in ('y', 'n', 'm', 'c', 'q'):
414 414 ui.write('no such option\n')
415 415 action = None
416 416 if action == 'y':
417 417 transplants.append(node)
418 418 elif action == 'm':
419 419 merges.append(node)
420 420 elif action == 'c':
421 421 break
422 422 elif action == 'q':
423 423 transplants = ()
424 424 merges = ()
425 425 break
426 426 return (transplants, merges)
427 427
428 428 def transplant(ui, repo, *revs, **opts):
429 429 '''transplant changesets from another branch
430 430
431 431 Selected changesets will be applied on top of the current working
432 432 directory with the log of the original changeset. If --log is
433 433 specified, log messages will have a comment appended of the form:
434 434
435 435 (transplanted from CHANGESETHASH)
436 436
437 437 You can rewrite the changelog message with the --filter option.
438 438 Its argument will be invoked with the current changelog message
439 439 as $1 and the patch as $2.
440 440
441 441 If --source is specified, selects changesets from the named
442 442 repository. If --branch is specified, selects changesets from the
443 443 branch holding the named revision, up to that revision. If --all
444 444 is specified, all changesets on the branch will be transplanted,
445 445 otherwise you will be prompted to select the changesets you want.
446 446
447 447 hg transplant --branch REVISION --all will rebase the selected branch
448 448 (up to the named revision) onto your current working directory.
449 449
450 450 You can optionally mark selected transplanted changesets as
451 451 merge changesets. You will not be prompted to transplant any
452 452 ancestors of a merged transplant, and you can merge descendants
453 453 of them normally instead of transplanting them.
454 454
455 455 If no merges or revisions are provided, hg transplant will start
456 456 an interactive changeset browser.
457 457
458 458 If a changeset application fails, you can fix the merge by hand and
459 459 then resume where you left off by calling hg transplant --continue.
460 460 '''
461 461 def getremotechanges(repo, url):
462 462 sourcerepo = ui.expandpath(url)
463 463 source = hg.repository(ui, sourcerepo)
464 464 common, incoming, rheads = repo.findcommonincoming(source, force=True)
465 465 if not incoming:
466 466 return (source, None, None)
467 467
468 468 bundle = None
469 469 if not source.local():
470 470 if source.capable('changegroupsubset'):
471 471 cg = source.changegroupsubset(incoming, rheads, 'incoming')
472 472 else:
473 473 cg = source.changegroup(incoming, 'incoming')
474 474 bundle = changegroup.writebundle(cg, None, 'HG10UN')
475 475 source = bundlerepo.bundlerepository(ui, repo.root, bundle)
476 476
477 477 return (source, incoming, bundle)
478 478
479 479 def incwalk(repo, incoming, branches, match=util.always):
480 480 if not branches:
481 481 branches=None
482 482 for node in repo.changelog.nodesbetween(incoming, branches)[0]:
483 483 if match(node):
484 484 yield node
485 485
486 486 def transplantwalk(repo, root, branches, match=util.always):
487 487 if not branches:
488 488 branches = repo.heads()
489 489 ancestors = []
490 490 for branch in branches:
491 491 ancestors.append(repo.changelog.ancestor(root, branch))
492 492 for node in repo.changelog.nodesbetween(ancestors, branches)[0]:
493 493 if match(node):
494 494 yield node
495 495
496 496 def checkopts(opts, revs):
497 497 if opts.get('continue'):
498 498 if filter(lambda opt: opts.get(opt), ('branch', 'all', 'merge')):
499 499 raise util.Abort(_('--continue is incompatible with branch, all or merge'))
500 500 return
501 501 if not (opts.get('source') or revs or
502 502 opts.get('merge') or opts.get('branch')):
503 503 raise util.Abort(_('no source URL, branch tag or revision list provided'))
504 504 if opts.get('all'):
505 505 if not opts.get('branch'):
506 506 raise util.Abort(_('--all requires a branch revision'))
507 507 if revs:
508 508 raise util.Abort(_('--all is incompatible with a revision list'))
509 509
510 510 checkopts(opts, revs)
511 511
512 512 if not opts.get('log'):
513 513 opts['log'] = ui.config('transplant', 'log')
514 514 if not opts.get('filter'):
515 515 opts['filter'] = ui.config('transplant', 'filter')
516 516
517 517 tp = transplanter(ui, repo)
518 518
519 519 p1, p2 = repo.dirstate.parents()
520 520 if p1 == revlog.nullid:
521 521 raise util.Abort(_('no revision checked out'))
522 522 if not opts.get('continue'):
523 523 if p2 != revlog.nullid:
524 524 raise util.Abort(_('outstanding uncommitted merges'))
525 525 m, a, r, d = repo.status()[:4]
526 526 if m or a or r or d:
527 527 raise util.Abort(_('outstanding local changes'))
528 528
529 529 bundle = None
530 530 source = opts.get('source')
531 531 if source:
532 532 (source, incoming, bundle) = getremotechanges(repo, source)
533 533 else:
534 534 source = repo
535 535
536 536 try:
537 537 if opts.get('continue'):
538 538 tp.resume(repo, source, opts)
539 539 return
540 540
541 541 tf=tp.transplantfilter(repo, source, p1)
542 542 if opts.get('prune'):
543 543 prune = [source.lookup(r)
544 544 for r in cmdutil.revrange(source, opts.get('prune'))]
545 545 matchfn = lambda x: tf(x) and x not in prune
546 546 else:
547 547 matchfn = tf
548 548 branches = map(source.lookup, opts.get('branch', ()))
549 549 merges = map(source.lookup, opts.get('merge', ()))
550 550 revmap = {}
551 551 if revs:
552 552 for r in cmdutil.revrange(source, revs):
553 553 revmap[int(r)] = source.lookup(r)
554 554 elif opts.get('all') or not merges:
555 555 if source != repo:
556 556 alltransplants = incwalk(source, incoming, branches, match=matchfn)
557 557 else:
558 558 alltransplants = transplantwalk(source, p1, branches, match=matchfn)
559 559 if opts.get('all'):
560 560 revs = alltransplants
561 561 else:
562 562 revs, newmerges = browserevs(ui, source, alltransplants, opts)
563 563 merges.extend(newmerges)
564 564 for r in revs:
565 565 revmap[source.changelog.rev(r)] = r
566 566 for r in merges:
567 567 revmap[source.changelog.rev(r)] = r
568 568
569 569 tp.apply(repo, source, revmap, merges, opts)
570 570 finally:
571 571 if bundle:
572 572 source.close()
573 573 os.unlink(bundle)
574 574
575 575 cmdtable = {
576 576 "transplant":
577 577 (transplant,
578 578 [('s', 'source', '', _('pull patches from REPOSITORY')),
579 579 ('b', 'branch', [], _('pull patches from branch BRANCH')),
580 580 ('a', 'all', None, _('pull all changesets up to BRANCH')),
581 581 ('p', 'prune', [], _('skip over REV')),
582 582 ('m', 'merge', [], _('merge at REV')),
583 583 ('', 'log', None, _('append transplant info to log message')),
584 584 ('c', 'continue', None, _('continue last transplant session after repair')),
585 585 ('', 'filter', '', _('filter changesets through FILTER'))],
586 586 _('hg transplant [-s REPOSITORY] [-b BRANCH [-a]] [-p REV] [-m REV] [REV]...'))
587 587 }
@@ -1,298 +1,298 b''
1 1 """
2 2 bundlerepo.py - repository class for viewing uncompressed bundles
3 3
4 4 This provides a read-only repository interface to bundles as if
5 5 they were part of the actual repository.
6 6
7 7 Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import hex, nullid, short
14 14 from i18n import _
15 15 import changegroup, util, os, struct, bz2, zlib, tempfile, shutil, mdiff
16 import repo, localrepo, changelog, manifest, filelog, revlog, context
16 import repo, localrepo, changelog, manifest, filelog, revlog, context, error
17 17
18 18 class bundlerevlog(revlog.revlog):
19 19 def __init__(self, opener, indexfile, bundlefile,
20 20 linkmapper=None):
21 21 # How it works:
22 22 # to retrieve a revision, we need to know the offset of
23 23 # the revision in the bundlefile (an opened file).
24 24 #
25 25 # We store this offset in the index (start), to differentiate a
26 26 # rev in the bundle and from a rev in the revlog, we check
27 27 # len(index[r]). If the tuple is bigger than 7, it is a bundle
28 28 # (it is bigger since we store the node to which the delta is)
29 29 #
30 30 revlog.revlog.__init__(self, opener, indexfile)
31 31 self.bundlefile = bundlefile
32 32 self.basemap = {}
33 33 def chunkpositer():
34 34 for chunk in changegroup.chunkiter(bundlefile):
35 35 pos = bundlefile.tell()
36 36 yield chunk, pos - len(chunk)
37 37 n = len(self)
38 38 prev = None
39 39 for chunk, start in chunkpositer():
40 40 size = len(chunk)
41 41 if size < 80:
42 42 raise util.Abort(_("invalid changegroup"))
43 43 start += 80
44 44 size -= 80
45 45 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
46 46 if node in self.nodemap:
47 47 prev = node
48 48 continue
49 49 for p in (p1, p2):
50 50 if not p in self.nodemap:
51 raise revlog.LookupError(p1, self.indexfile,
52 _("unknown parent"))
51 raise error.LookupError(p1, self.indexfile,
52 _("unknown parent"))
53 53 if linkmapper is None:
54 54 link = n
55 55 else:
56 56 link = linkmapper(cs)
57 57
58 58 if not prev:
59 59 prev = p1
60 60 # start, size, full unc. size, base (unused), link, p1, p2, node
61 61 e = (revlog.offset_type(start, 0), size, -1, -1, link,
62 62 self.rev(p1), self.rev(p2), node)
63 63 self.basemap[n] = prev
64 64 self.index.insert(-1, e)
65 65 self.nodemap[node] = n
66 66 prev = node
67 67 n += 1
68 68
69 69 def bundle(self, rev):
70 70 """is rev from the bundle"""
71 71 if rev < 0:
72 72 return False
73 73 return rev in self.basemap
74 74 def bundlebase(self, rev): return self.basemap[rev]
75 75 def chunk(self, rev, df=None, cachelen=4096):
76 76 # Warning: in case of bundle, the diff is against bundlebase,
77 77 # not against rev - 1
78 78 # XXX: could use some caching
79 79 if not self.bundle(rev):
80 80 return revlog.revlog.chunk(self, rev, df)
81 81 self.bundlefile.seek(self.start(rev))
82 82 return self.bundlefile.read(self.length(rev))
83 83
84 84 def revdiff(self, rev1, rev2):
85 85 """return or calculate a delta between two revisions"""
86 86 if self.bundle(rev1) and self.bundle(rev2):
87 87 # hot path for bundle
88 88 revb = self.rev(self.bundlebase(rev2))
89 89 if revb == rev1:
90 90 return self.chunk(rev2)
91 91 elif not self.bundle(rev1) and not self.bundle(rev2):
92 92 return revlog.revlog.revdiff(self, rev1, rev2)
93 93
94 94 return mdiff.textdiff(self.revision(self.node(rev1)),
95 95 self.revision(self.node(rev2)))
96 96
97 97 def revision(self, node):
98 98 """return an uncompressed revision of a given"""
99 99 if node == nullid: return ""
100 100
101 101 text = None
102 102 chain = []
103 103 iter_node = node
104 104 rev = self.rev(iter_node)
105 105 # reconstruct the revision if it is from a changegroup
106 106 while self.bundle(rev):
107 107 if self._cache and self._cache[0] == iter_node:
108 108 text = self._cache[2]
109 109 break
110 110 chain.append(rev)
111 111 iter_node = self.bundlebase(rev)
112 112 rev = self.rev(iter_node)
113 113 if text is None:
114 114 text = revlog.revlog.revision(self, iter_node)
115 115
116 116 while chain:
117 117 delta = self.chunk(chain.pop())
118 118 text = mdiff.patches(text, [delta])
119 119
120 120 p1, p2 = self.parents(node)
121 121 if node != revlog.hash(text, p1, p2):
122 raise revlog.RevlogError(_("integrity check failed on %s:%d")
122 raise error.RevlogError(_("integrity check failed on %s:%d")
123 123 % (self.datafile, self.rev(node)))
124 124
125 125 self._cache = (node, self.rev(node), text)
126 126 return text
127 127
128 128 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
129 129 raise NotImplementedError
130 130 def addgroup(self, revs, linkmapper, transaction):
131 131 raise NotImplementedError
132 132 def strip(self, rev, minlink):
133 133 raise NotImplementedError
134 134 def checksize(self):
135 135 raise NotImplementedError
136 136
137 137 class bundlechangelog(bundlerevlog, changelog.changelog):
138 138 def __init__(self, opener, bundlefile):
139 139 changelog.changelog.__init__(self, opener)
140 140 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile)
141 141
142 142 class bundlemanifest(bundlerevlog, manifest.manifest):
143 143 def __init__(self, opener, bundlefile, linkmapper):
144 144 manifest.manifest.__init__(self, opener)
145 145 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
146 146 linkmapper)
147 147
148 148 class bundlefilelog(bundlerevlog, filelog.filelog):
149 149 def __init__(self, opener, path, bundlefile, linkmapper):
150 150 filelog.filelog.__init__(self, opener, path)
151 151 bundlerevlog.__init__(self, opener, self.indexfile, bundlefile,
152 152 linkmapper)
153 153
154 154 class bundlerepository(localrepo.localrepository):
155 155 def __init__(self, ui, path, bundlename):
156 156 self._tempparent = None
157 157 try:
158 158 localrepo.localrepository.__init__(self, ui, path)
159 159 except repo.RepoError:
160 160 self._tempparent = tempfile.mkdtemp()
161 161 tmprepo = localrepo.instance(ui,self._tempparent,1)
162 162 localrepo.localrepository.__init__(self, ui, self._tempparent)
163 163
164 164 if path:
165 165 self._url = 'bundle:' + path + '+' + bundlename
166 166 else:
167 167 self._url = 'bundle:' + bundlename
168 168
169 169 self.tempfile = None
170 170 self.bundlefile = open(bundlename, "rb")
171 171 header = self.bundlefile.read(6)
172 172 if not header.startswith("HG"):
173 173 raise util.Abort(_("%s: not a Mercurial bundle file") % bundlename)
174 174 elif not header.startswith("HG10"):
175 175 raise util.Abort(_("%s: unknown bundle version") % bundlename)
176 176 elif (header == "HG10BZ") or (header == "HG10GZ"):
177 177 fdtemp, temp = tempfile.mkstemp(prefix="hg-bundle-",
178 178 suffix=".hg10un", dir=self.path)
179 179 self.tempfile = temp
180 180 fptemp = os.fdopen(fdtemp, 'wb')
181 181 def generator(f):
182 182 if header == "HG10BZ":
183 183 zd = bz2.BZ2Decompressor()
184 184 zd.decompress("BZ")
185 185 elif header == "HG10GZ":
186 186 zd = zlib.decompressobj()
187 187 for chunk in f:
188 188 yield zd.decompress(chunk)
189 189 gen = generator(util.filechunkiter(self.bundlefile, 4096))
190 190
191 191 try:
192 192 fptemp.write("HG10UN")
193 193 for chunk in gen:
194 194 fptemp.write(chunk)
195 195 finally:
196 196 fptemp.close()
197 197 self.bundlefile.close()
198 198
199 199 self.bundlefile = open(self.tempfile, "rb")
200 200 # seek right after the header
201 201 self.bundlefile.seek(6)
202 202 elif header == "HG10UN":
203 203 # nothing to do
204 204 pass
205 205 else:
206 206 raise util.Abort(_("%s: unknown bundle compression type")
207 207 % bundlename)
208 208 # dict with the mapping 'filename' -> position in the bundle
209 209 self.bundlefilespos = {}
210 210
211 211 def __getattr__(self, name):
212 212 if name == 'changelog':
213 213 self.changelog = bundlechangelog(self.sopener, self.bundlefile)
214 214 self.manstart = self.bundlefile.tell()
215 215 return self.changelog
216 216 elif name == 'manifest':
217 217 self.bundlefile.seek(self.manstart)
218 218 self.manifest = bundlemanifest(self.sopener, self.bundlefile,
219 219 self.changelog.rev)
220 220 self.filestart = self.bundlefile.tell()
221 221 return self.manifest
222 222 elif name == 'manstart':
223 223 self.changelog
224 224 return self.manstart
225 225 elif name == 'filestart':
226 226 self.manifest
227 227 return self.filestart
228 228 else:
229 229 raise AttributeError(name)
230 230
231 231 def url(self):
232 232 return self._url
233 233
234 234 def file(self, f):
235 235 if not self.bundlefilespos:
236 236 self.bundlefile.seek(self.filestart)
237 237 while 1:
238 238 chunk = changegroup.getchunk(self.bundlefile)
239 239 if not chunk:
240 240 break
241 241 self.bundlefilespos[chunk] = self.bundlefile.tell()
242 242 for c in changegroup.chunkiter(self.bundlefile):
243 243 pass
244 244
245 245 if f[0] == '/':
246 246 f = f[1:]
247 247 if f in self.bundlefilespos:
248 248 self.bundlefile.seek(self.bundlefilespos[f])
249 249 return bundlefilelog(self.sopener, f, self.bundlefile,
250 250 self.changelog.rev)
251 251 else:
252 252 return filelog.filelog(self.sopener, f)
253 253
254 254 def close(self):
255 255 """Close assigned bundle file immediately."""
256 256 self.bundlefile.close()
257 257
258 258 def __del__(self):
259 259 bundlefile = getattr(self, 'bundlefile', None)
260 260 if bundlefile and not bundlefile.closed:
261 261 bundlefile.close()
262 262 tempfile = getattr(self, 'tempfile', None)
263 263 if tempfile is not None:
264 264 os.unlink(tempfile)
265 265 if self._tempparent:
266 266 shutil.rmtree(self._tempparent, True)
267 267
268 268 def cancopy(self):
269 269 return False
270 270
271 271 def getcwd(self):
272 272 return os.getcwd() # always outside the repo
273 273
274 274 def instance(ui, path, create):
275 275 if create:
276 276 raise util.Abort(_('cannot create new bundle repository'))
277 277 parentpath = ui.config("bundle", "mainreporoot", "")
278 278 if parentpath:
279 279 # Try to make the full path relative so we get a nice, short URL.
280 280 # In particular, we don't want temp dir names in test outputs.
281 281 cwd = os.getcwd()
282 282 if parentpath == cwd:
283 283 parentpath = ''
284 284 else:
285 285 cwd = os.path.join(cwd,'')
286 286 if parentpath.startswith(cwd):
287 287 parentpath = parentpath[len(cwd):]
288 288 path = util.drop_scheme('file', path)
289 289 if path.startswith('bundle:'):
290 290 path = util.drop_scheme('bundle', path)
291 291 s = path.split("+", 1)
292 292 if len(s) == 1:
293 293 repopath, bundlename = parentpath, s[0]
294 294 else:
295 295 repopath, bundlename = s
296 296 else:
297 297 repopath, bundlename = parentpath, path
298 298 return bundlerepository(ui, repopath, bundlename)
@@ -1,196 +1,197 b''
1 1 # changelog.py - changelog class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid
9 9 from revlog import revlog, RevlogError
10 10 from i18n import _
11 import util
11 import util, error
12 12
13 13 def _string_escape(text):
14 14 """
15 15 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
16 16 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
17 17 >>> s
18 18 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
19 19 >>> res = _string_escape(s)
20 20 >>> s == res.decode('string_escape')
21 21 True
22 22 """
23 23 # subset of the string_escape codec
24 24 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
25 25 return text.replace('\0', '\\0')
26 26
27 27 class appender:
28 28 '''the changelog index must be update last on disk, so we use this class
29 29 to delay writes to it'''
30 30 def __init__(self, fp, buf):
31 31 self.data = buf
32 32 self.fp = fp
33 33 self.offset = fp.tell()
34 34 self.size = util.fstat(fp).st_size
35 35
36 36 def end(self):
37 37 return self.size + len("".join(self.data))
38 38 def tell(self):
39 39 return self.offset
40 40 def flush(self):
41 41 pass
42 42 def close(self):
43 43 self.fp.close()
44 44
45 45 def seek(self, offset, whence=0):
46 46 '''virtual file offset spans real file and data'''
47 47 if whence == 0:
48 48 self.offset = offset
49 49 elif whence == 1:
50 50 self.offset += offset
51 51 elif whence == 2:
52 52 self.offset = self.end() + offset
53 53 if self.offset < self.size:
54 54 self.fp.seek(self.offset)
55 55
56 56 def read(self, count=-1):
57 57 '''only trick here is reads that span real file and data'''
58 58 ret = ""
59 59 if self.offset < self.size:
60 60 s = self.fp.read(count)
61 61 ret = s
62 62 self.offset += len(s)
63 63 if count > 0:
64 64 count -= len(s)
65 65 if count != 0:
66 66 doff = self.offset - self.size
67 67 self.data.insert(0, "".join(self.data))
68 68 del self.data[1:]
69 69 s = self.data[0][doff:doff+count]
70 70 self.offset += len(s)
71 71 ret += s
72 72 return ret
73 73
74 74 def write(self, s):
75 75 self.data.append(str(s))
76 76 self.offset += len(s)
77 77
78 78 class changelog(revlog):
79 79 def __init__(self, opener):
80 80 revlog.__init__(self, opener, "00changelog.i")
81 81
82 82 def delayupdate(self):
83 83 "delay visibility of index updates to other readers"
84 84 self._realopener = self.opener
85 85 self.opener = self._delayopener
86 86 self._delaycount = len(self)
87 87 self._delaybuf = []
88 88 self._delayname = None
89 89
90 90 def finalize(self, tr):
91 91 "finalize index updates"
92 92 self.opener = self._realopener
93 93 # move redirected index data back into place
94 94 if self._delayname:
95 95 util.rename(self._delayname + ".a", self._delayname)
96 96 elif self._delaybuf:
97 97 fp = self.opener(self.indexfile, 'a')
98 98 fp.write("".join(self._delaybuf))
99 99 fp.close()
100 100 del self._delaybuf
101 101 # split when we're done
102 102 self.checkinlinesize(tr)
103 103
104 104 def _delayopener(self, name, mode='r'):
105 105 fp = self._realopener(name, mode)
106 106 # only divert the index
107 107 if not name == self.indexfile:
108 108 return fp
109 109 # if we're doing an initial clone, divert to another file
110 110 if self._delaycount == 0:
111 111 self._delayname = fp.name
112 112 if not len(self):
113 113 # make sure to truncate the file
114 114 mode = mode.replace('a', 'w')
115 115 return self._realopener(name + ".a", mode)
116 116 # otherwise, divert to memory
117 117 return appender(fp, self._delaybuf)
118 118
119 119 def checkinlinesize(self, tr, fp=None):
120 120 if self.opener == self._delayopener:
121 121 return
122 122 return revlog.checkinlinesize(self, tr, fp)
123 123
124 124 def decode_extra(self, text):
125 125 extra = {}
126 126 for l in text.split('\0'):
127 127 if l:
128 128 k, v = l.decode('string_escape').split(':', 1)
129 129 extra[k] = v
130 130 return extra
131 131
132 132 def encode_extra(self, d):
133 133 # keys must be sorted to produce a deterministic changelog entry
134 134 items = [_string_escape('%s:%s' % (k, d[k])) for k in util.sort(d)]
135 135 return "\0".join(items)
136 136
137 137 def read(self, node):
138 138 """
139 139 format used:
140 140 nodeid\n : manifest node in ascii
141 141 user\n : user, no \n or \r allowed
142 142 time tz extra\n : date (time is int or float, timezone is int)
143 143 : extra is metadatas, encoded and separated by '\0'
144 144 : older versions ignore it
145 145 files\n\n : files modified by the cset, no \n or \r allowed
146 146 (.*) : comment (free text, ideally utf-8)
147 147
148 148 changelog v0 doesn't use extra
149 149 """
150 150 text = self.revision(node)
151 151 if not text:
152 152 return (nullid, "", (0, 0), [], "", {'branch': 'default'})
153 153 last = text.index("\n\n")
154 154 desc = util.tolocal(text[last + 2:])
155 155 l = text[:last].split('\n')
156 156 manifest = bin(l[0])
157 157 user = util.tolocal(l[1])
158 158
159 159 extra_data = l[2].split(' ', 2)
160 160 if len(extra_data) != 3:
161 161 time = float(extra_data.pop(0))
162 162 try:
163 163 # various tools did silly things with the time zone field.
164 164 timezone = int(extra_data[0])
165 165 except:
166 166 timezone = 0
167 167 extra = {}
168 168 else:
169 169 time, timezone, extra = extra_data
170 170 time, timezone = float(time), int(timezone)
171 171 extra = self.decode_extra(extra)
172 172 if not extra.get('branch'):
173 173 extra['branch'] = 'default'
174 174 files = l[3:]
175 175 return (manifest, user, (time, timezone), files, desc, extra)
176 176
177 177 def add(self, manifest, files, desc, transaction, p1=None, p2=None,
178 178 user=None, date=None, extra={}):
179 179
180 180 user = user.strip()
181 181 if "\n" in user:
182 raise RevlogError(_("username %s contains a newline") % repr(user))
182 raise error.RevlogError(_("username %s contains a newline")
183 % repr(user))
183 184 user, desc = util.fromlocal(user), util.fromlocal(desc)
184 185
185 186 if date:
186 187 parseddate = "%d %d" % util.parsedate(date)
187 188 else:
188 189 parseddate = "%d %d" % util.makedate()
189 190 if extra and extra.get("branch") in ("default", ""):
190 191 del extra["branch"]
191 192 if extra:
192 193 extra = self.encode_extra(extra)
193 194 parseddate = "%s %s" % (parseddate, extra)
194 195 l = [hex(manifest), user, parseddate] + util.sort(files) + ["", desc]
195 196 text = "\n".join(l)
196 197 return self.addrevision(text, transaction, len(self), p1, p2)
@@ -1,3429 +1,3429 b''
1 1 # commands.py - command processing for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from repo import RepoError, NoCapability
10 10 from i18n import _, gettext
11 11 import os, re, sys
12 import hg, util, revlog, bundlerepo, extensions, copies, context
12 import hg, util, revlog, bundlerepo, extensions, copies, context, error
13 13 import difflib, patch, time, help, mdiff, tempfile, url
14 14 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
15 15 import merge as merge_
16 16
17 17 # Commands start here, listed alphabetically
18 18
19 19 def add(ui, repo, *pats, **opts):
20 20 """add the specified files on the next commit
21 21
22 22 Schedule files to be version controlled and added to the repository.
23 23
24 24 The files will be added to the repository at the next commit. To
25 25 undo an add before that, see hg revert.
26 26
27 27 If no names are given, add all files in the repository.
28 28 """
29 29
30 30 rejected = None
31 31 exacts = {}
32 32 names = []
33 33 m = cmdutil.match(repo, pats, opts)
34 34 m.bad = lambda x,y: True
35 35 for abs in repo.walk(m):
36 36 if m.exact(abs):
37 37 if ui.verbose:
38 38 ui.status(_('adding %s\n') % m.rel(abs))
39 39 names.append(abs)
40 40 exacts[abs] = 1
41 41 elif abs not in repo.dirstate:
42 42 ui.status(_('adding %s\n') % m.rel(abs))
43 43 names.append(abs)
44 44 if not opts.get('dry_run'):
45 45 rejected = repo.add(names)
46 46 rejected = [p for p in rejected if p in exacts]
47 47 return rejected and 1 or 0
48 48
49 49 def addremove(ui, repo, *pats, **opts):
50 50 """add all new files, delete all missing files
51 51
52 52 Add all new files and remove all missing files from the repository.
53 53
54 54 New files are ignored if they match any of the patterns in .hgignore. As
55 55 with add, these changes take effect at the next commit.
56 56
57 57 Use the -s option to detect renamed files. With a parameter > 0,
58 58 this compares every removed file with every added file and records
59 59 those similar enough as renames. This option takes a percentage
60 60 between 0 (disabled) and 100 (files must be identical) as its
61 61 parameter. Detecting renamed files this way can be expensive.
62 62 """
63 63 try:
64 64 sim = float(opts.get('similarity') or 0)
65 65 except ValueError:
66 66 raise util.Abort(_('similarity must be a number'))
67 67 if sim < 0 or sim > 100:
68 68 raise util.Abort(_('similarity must be between 0 and 100'))
69 69 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
70 70
71 71 def annotate(ui, repo, *pats, **opts):
72 72 """show changeset information per file line
73 73
74 74 List changes in files, showing the revision id responsible for each line
75 75
76 76 This command is useful to discover who did a change or when a change took
77 77 place.
78 78
79 79 Without the -a option, annotate will avoid processing files it
80 80 detects as binary. With -a, annotate will generate an annotation
81 81 anyway, probably with undesirable results.
82 82 """
83 83 datefunc = ui.quiet and util.shortdate or util.datestr
84 84 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
85 85
86 86 if not pats:
87 87 raise util.Abort(_('at least one file name or pattern required'))
88 88
89 89 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
90 90 ('number', lambda x: str(x[0].rev())),
91 91 ('changeset', lambda x: short(x[0].node())),
92 92 ('date', getdate),
93 93 ('follow', lambda x: x[0].path()),
94 94 ]
95 95
96 96 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
97 97 and not opts.get('follow')):
98 98 opts['number'] = 1
99 99
100 100 linenumber = opts.get('line_number') is not None
101 101 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
102 102 raise util.Abort(_('at least one of -n/-c is required for -l'))
103 103
104 104 funcmap = [func for op, func in opmap if opts.get(op)]
105 105 if linenumber:
106 106 lastfunc = funcmap[-1]
107 107 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
108 108
109 109 ctx = repo[opts.get('rev')]
110 110
111 111 m = cmdutil.match(repo, pats, opts)
112 112 for abs in ctx.walk(m):
113 113 fctx = ctx[abs]
114 114 if not opts.get('text') and util.binary(fctx.data()):
115 115 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
116 116 continue
117 117
118 118 lines = fctx.annotate(follow=opts.get('follow'),
119 119 linenumber=linenumber)
120 120 pieces = []
121 121
122 122 for f in funcmap:
123 123 l = [f(n) for n, dummy in lines]
124 124 if l:
125 125 ml = max(map(len, l))
126 126 pieces.append(["%*s" % (ml, x) for x in l])
127 127
128 128 if pieces:
129 129 for p, l in zip(zip(*pieces), lines):
130 130 ui.write("%s: %s" % (" ".join(p), l[1]))
131 131
132 132 def archive(ui, repo, dest, **opts):
133 133 '''create unversioned archive of a repository revision
134 134
135 135 By default, the revision used is the parent of the working
136 136 directory; use "-r" to specify a different revision.
137 137
138 138 To specify the type of archive to create, use "-t". Valid
139 139 types are:
140 140
141 141 "files" (default): a directory full of files
142 142 "tar": tar archive, uncompressed
143 143 "tbz2": tar archive, compressed using bzip2
144 144 "tgz": tar archive, compressed using gzip
145 145 "uzip": zip archive, uncompressed
146 146 "zip": zip archive, compressed using deflate
147 147
148 148 The exact name of the destination archive or directory is given
149 149 using a format string; see "hg help export" for details.
150 150
151 151 Each member added to an archive file has a directory prefix
152 152 prepended. Use "-p" to specify a format string for the prefix.
153 153 The default is the basename of the archive, with suffixes removed.
154 154 '''
155 155
156 156 ctx = repo[opts.get('rev')]
157 157 if not ctx:
158 158 raise util.Abort(_('no working directory: please specify a revision'))
159 159 node = ctx.node()
160 160 dest = cmdutil.make_filename(repo, dest, node)
161 161 if os.path.realpath(dest) == repo.root:
162 162 raise util.Abort(_('repository root cannot be destination'))
163 163 matchfn = cmdutil.match(repo, [], opts)
164 164 kind = opts.get('type') or 'files'
165 165 prefix = opts.get('prefix')
166 166 if dest == '-':
167 167 if kind == 'files':
168 168 raise util.Abort(_('cannot archive plain files to stdout'))
169 169 dest = sys.stdout
170 170 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
171 171 prefix = cmdutil.make_filename(repo, prefix, node)
172 172 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
173 173 matchfn, prefix)
174 174
175 175 def backout(ui, repo, node=None, rev=None, **opts):
176 176 '''reverse effect of earlier changeset
177 177
178 178 Commit the backed out changes as a new changeset. The new
179 179 changeset is a child of the backed out changeset.
180 180
181 181 If you back out a changeset other than the tip, a new head is
182 182 created. This head will be the new tip and you should merge this
183 183 backout changeset with another head (current one by default).
184 184
185 185 The --merge option remembers the parent of the working directory
186 186 before starting the backout, then merges the new head with that
187 187 changeset afterwards. This saves you from doing the merge by
188 188 hand. The result of this merge is not committed, as for a normal
189 189 merge.
190 190
191 191 See \'hg help dates\' for a list of formats valid for -d/--date.
192 192 '''
193 193 if rev and node:
194 194 raise util.Abort(_("please specify just one revision"))
195 195
196 196 if not rev:
197 197 rev = node
198 198
199 199 if not rev:
200 200 raise util.Abort(_("please specify a revision to backout"))
201 201
202 202 date = opts.get('date')
203 203 if date:
204 204 opts['date'] = util.parsedate(date)
205 205
206 206 cmdutil.bail_if_changed(repo)
207 207 node = repo.lookup(rev)
208 208
209 209 op1, op2 = repo.dirstate.parents()
210 210 a = repo.changelog.ancestor(op1, node)
211 211 if a != node:
212 212 raise util.Abort(_('cannot back out change on a different branch'))
213 213
214 214 p1, p2 = repo.changelog.parents(node)
215 215 if p1 == nullid:
216 216 raise util.Abort(_('cannot back out a change with no parents'))
217 217 if p2 != nullid:
218 218 if not opts.get('parent'):
219 219 raise util.Abort(_('cannot back out a merge changeset without '
220 220 '--parent'))
221 221 p = repo.lookup(opts['parent'])
222 222 if p not in (p1, p2):
223 223 raise util.Abort(_('%s is not a parent of %s') %
224 224 (short(p), short(node)))
225 225 parent = p
226 226 else:
227 227 if opts.get('parent'):
228 228 raise util.Abort(_('cannot use --parent on non-merge changeset'))
229 229 parent = p1
230 230
231 231 # the backout should appear on the same branch
232 232 branch = repo.dirstate.branch()
233 233 hg.clean(repo, node, show_stats=False)
234 234 repo.dirstate.setbranch(branch)
235 235 revert_opts = opts.copy()
236 236 revert_opts['date'] = None
237 237 revert_opts['all'] = True
238 238 revert_opts['rev'] = hex(parent)
239 239 revert_opts['no_backup'] = None
240 240 revert(ui, repo, **revert_opts)
241 241 commit_opts = opts.copy()
242 242 commit_opts['addremove'] = False
243 243 if not commit_opts['message'] and not commit_opts['logfile']:
244 244 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
245 245 commit_opts['force_editor'] = True
246 246 commit(ui, repo, **commit_opts)
247 247 def nice(node):
248 248 return '%d:%s' % (repo.changelog.rev(node), short(node))
249 249 ui.status(_('changeset %s backs out changeset %s\n') %
250 250 (nice(repo.changelog.tip()), nice(node)))
251 251 if op1 != node:
252 252 hg.clean(repo, op1, show_stats=False)
253 253 if opts.get('merge'):
254 254 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
255 255 hg.merge(repo, hex(repo.changelog.tip()))
256 256 else:
257 257 ui.status(_('the backout changeset is a new head - '
258 258 'do not forget to merge\n'))
259 259 ui.status(_('(use "backout --merge" '
260 260 'if you want to auto-merge)\n'))
261 261
262 262 def bisect(ui, repo, rev=None, extra=None, command=None,
263 263 reset=None, good=None, bad=None, skip=None, noupdate=None):
264 264 """subdivision search of changesets
265 265
266 266 This command helps to find changesets which introduce problems.
267 267 To use, mark the earliest changeset you know exhibits the problem
268 268 as bad, then mark the latest changeset which is free from the
269 269 problem as good. Bisect will update your working directory to a
270 270 revision for testing (unless the --noupdate option is specified).
271 271 Once you have performed tests, mark the working directory as bad
272 272 or good and bisect will either update to another candidate changeset
273 273 or announce that it has found the bad revision.
274 274
275 275 As a shortcut, you can also use the revision argument to mark a
276 276 revision as good or bad without checking it out first.
277 277
278 278 If you supply a command it will be used for automatic bisection. Its exit
279 279 status will be used as flag to mark revision as bad or good. In case exit
280 280 status is 0 the revision is marked as good, 125 - skipped, 127 (command not
281 281 found) - bisection will be aborted and any other status bigger than 0 will
282 282 mark revision as bad.
283 283 """
284 284 def print_result(nodes, good):
285 285 displayer = cmdutil.show_changeset(ui, repo, {})
286 286 transition = (good and "good" or "bad")
287 287 if len(nodes) == 1:
288 288 # narrowed it down to a single revision
289 289 ui.write(_("The first %s revision is:\n") % transition)
290 290 displayer.show(repo[nodes[0]])
291 291 else:
292 292 # multiple possible revisions
293 293 ui.write(_("Due to skipped revisions, the first "
294 294 "%s revision could be any of:\n") % transition)
295 295 for n in nodes:
296 296 displayer.show(repo[n])
297 297
298 298 def check_state(state, interactive=True):
299 299 if not state['good'] or not state['bad']:
300 300 if (good or bad or skip or reset) and interactive:
301 301 return
302 302 if not state['good']:
303 303 raise util.Abort(_('cannot bisect (no known good revisions)'))
304 304 else:
305 305 raise util.Abort(_('cannot bisect (no known bad revisions)'))
306 306 return True
307 307
308 308 # backward compatibility
309 309 if rev in "good bad reset init".split():
310 310 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
311 311 cmd, rev, extra = rev, extra, None
312 312 if cmd == "good":
313 313 good = True
314 314 elif cmd == "bad":
315 315 bad = True
316 316 else:
317 317 reset = True
318 318 elif extra or good + bad + skip + reset + bool(command) > 1:
319 319 raise util.Abort(_('incompatible arguments'))
320 320
321 321 if reset:
322 322 p = repo.join("bisect.state")
323 323 if os.path.exists(p):
324 324 os.unlink(p)
325 325 return
326 326
327 327 state = hbisect.load_state(repo)
328 328
329 329 if command:
330 330 commandpath = util.find_exe(command)
331 331 changesets = 1
332 332 try:
333 333 while changesets:
334 334 # update state
335 335 status = os.spawnl(os.P_WAIT, commandpath)
336 336 if status == 125:
337 337 transition = "skip"
338 338 elif status == 0:
339 339 transition = "good"
340 340 # status < 0 means process was killed
341 341 elif status == 127:
342 342 raise util.Abort(_("failed to execute %s") % command)
343 343 elif status < 0:
344 344 raise util.Abort(_("%s killed") % command)
345 345 else:
346 346 transition = "bad"
347 347 node = repo.lookup(rev or '.')
348 348 state[transition].append(node)
349 349 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
350 350 check_state(state, interactive=False)
351 351 # bisect
352 352 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
353 353 # update to next check
354 354 cmdutil.bail_if_changed(repo)
355 355 hg.clean(repo, nodes[0], show_stats=False)
356 356 finally:
357 357 hbisect.save_state(repo, state)
358 358 return print_result(nodes, not status)
359 359
360 360 # update state
361 361 node = repo.lookup(rev or '.')
362 362 if good:
363 363 state['good'].append(node)
364 364 elif bad:
365 365 state['bad'].append(node)
366 366 elif skip:
367 367 state['skip'].append(node)
368 368
369 369 hbisect.save_state(repo, state)
370 370
371 371 if not check_state(state):
372 372 return
373 373
374 374 # actually bisect
375 375 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
376 376 if changesets == 0:
377 377 print_result(nodes, good)
378 378 else:
379 379 assert len(nodes) == 1 # only a single node can be tested next
380 380 node = nodes[0]
381 381 # compute the approximate number of remaining tests
382 382 tests, size = 0, 2
383 383 while size <= changesets:
384 384 tests, size = tests + 1, size * 2
385 385 rev = repo.changelog.rev(node)
386 386 ui.write(_("Testing changeset %s:%s "
387 387 "(%s changesets remaining, ~%s tests)\n")
388 388 % (rev, short(node), changesets, tests))
389 389 if not noupdate:
390 390 cmdutil.bail_if_changed(repo)
391 391 return hg.clean(repo, node)
392 392
393 393 def branch(ui, repo, label=None, **opts):
394 394 """set or show the current branch name
395 395
396 396 With no argument, show the current branch name. With one argument,
397 397 set the working directory branch name (the branch does not exist in
398 398 the repository until the next commit).
399 399
400 400 Unless --force is specified, branch will not let you set a
401 401 branch name that shadows an existing branch.
402 402
403 403 Use --clean to reset the working directory branch to that of the
404 404 parent of the working directory, negating a previous branch change.
405 405
406 406 Use the command 'hg update' to switch to an existing branch.
407 407 """
408 408
409 409 if opts.get('clean'):
410 410 label = repo[None].parents()[0].branch()
411 411 repo.dirstate.setbranch(label)
412 412 ui.status(_('reset working directory to branch %s\n') % label)
413 413 elif label:
414 414 if not opts.get('force') and label in repo.branchtags():
415 415 if label not in [p.branch() for p in repo.parents()]:
416 416 raise util.Abort(_('a branch of the same name already exists'
417 417 ' (use --force to override)'))
418 418 repo.dirstate.setbranch(util.fromlocal(label))
419 419 ui.status(_('marked working directory as branch %s\n') % label)
420 420 else:
421 421 ui.write("%s\n" % util.tolocal(repo.dirstate.branch()))
422 422
423 423 def branches(ui, repo, active=False):
424 424 """list repository named branches
425 425
426 426 List the repository's named branches, indicating which ones are
427 427 inactive. If active is specified, only show active branches.
428 428
429 429 A branch is considered active if it contains repository heads.
430 430
431 431 Use the command 'hg update' to switch to an existing branch.
432 432 """
433 433 hexfunc = ui.debugflag and hex or short
434 434 activebranches = [util.tolocal(repo[n].branch())
435 435 for n in repo.heads()]
436 436 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
437 437 for tag, node in repo.branchtags().items()])
438 438 branches.reverse()
439 439
440 440 for isactive, node, tag in branches:
441 441 if (not active) or isactive:
442 442 if ui.quiet:
443 443 ui.write("%s\n" % tag)
444 444 else:
445 445 rev = str(node).rjust(31 - util.locallen(tag))
446 446 isinactive = ((not isactive) and " (inactive)") or ''
447 447 data = tag, rev, hexfunc(repo.lookup(node)), isinactive
448 448 ui.write("%s %s:%s%s\n" % data)
449 449
450 450 def bundle(ui, repo, fname, dest=None, **opts):
451 451 """create a changegroup file
452 452
453 453 Generate a compressed changegroup file collecting changesets not
454 454 found in the other repository.
455 455
456 456 If no destination repository is specified the destination is
457 457 assumed to have all the nodes specified by one or more --base
458 458 parameters. To create a bundle containing all changesets, use
459 459 --all (or --base null). To change the compression method applied,
460 460 use the -t option (by default, bundles are compressed using bz2).
461 461
462 462 The bundle file can then be transferred using conventional means and
463 463 applied to another repository with the unbundle or pull command.
464 464 This is useful when direct push and pull are not available or when
465 465 exporting an entire repository is undesirable.
466 466
467 467 Applying bundles preserves all changeset contents including
468 468 permissions, copy/rename information, and revision history.
469 469 """
470 470 revs = opts.get('rev') or None
471 471 if revs:
472 472 revs = [repo.lookup(rev) for rev in revs]
473 473 if opts.get('all'):
474 474 base = ['null']
475 475 else:
476 476 base = opts.get('base')
477 477 if base:
478 478 if dest:
479 479 raise util.Abort(_("--base is incompatible with specifiying "
480 480 "a destination"))
481 481 base = [repo.lookup(rev) for rev in base]
482 482 # create the right base
483 483 # XXX: nodesbetween / changegroup* should be "fixed" instead
484 484 o = []
485 485 has = {nullid: None}
486 486 for n in base:
487 487 has.update(repo.changelog.reachable(n))
488 488 if revs:
489 489 visit = list(revs)
490 490 else:
491 491 visit = repo.changelog.heads()
492 492 seen = {}
493 493 while visit:
494 494 n = visit.pop(0)
495 495 parents = [p for p in repo.changelog.parents(n) if p not in has]
496 496 if len(parents) == 0:
497 497 o.insert(0, n)
498 498 else:
499 499 for p in parents:
500 500 if p not in seen:
501 501 seen[p] = 1
502 502 visit.append(p)
503 503 else:
504 504 cmdutil.setremoteconfig(ui, opts)
505 505 dest, revs, checkout = hg.parseurl(
506 506 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
507 507 other = hg.repository(ui, dest)
508 508 o = repo.findoutgoing(other, force=opts.get('force'))
509 509
510 510 if revs:
511 511 cg = repo.changegroupsubset(o, revs, 'bundle')
512 512 else:
513 513 cg = repo.changegroup(o, 'bundle')
514 514
515 515 bundletype = opts.get('type', 'bzip2').lower()
516 516 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
517 517 bundletype = btypes.get(bundletype)
518 518 if bundletype not in changegroup.bundletypes:
519 519 raise util.Abort(_('unknown bundle type specified with --type'))
520 520
521 521 changegroup.writebundle(cg, fname, bundletype)
522 522
523 523 def cat(ui, repo, file1, *pats, **opts):
524 524 """output the current or given revision of files
525 525
526 526 Print the specified files as they were at the given revision.
527 527 If no revision is given, the parent of the working directory is used,
528 528 or tip if no revision is checked out.
529 529
530 530 Output may be to a file, in which case the name of the file is
531 531 given using a format string. The formatting rules are the same as
532 532 for the export command, with the following additions:
533 533
534 534 %s basename of file being printed
535 535 %d dirname of file being printed, or '.' if in repo root
536 536 %p root-relative path name of file being printed
537 537 """
538 538 ctx = repo[opts.get('rev')]
539 539 err = 1
540 540 m = cmdutil.match(repo, (file1,) + pats, opts)
541 541 for abs in ctx.walk(m):
542 542 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
543 543 data = ctx[abs].data()
544 544 if opts.get('decode'):
545 545 data = repo.wwritedata(abs, data)
546 546 fp.write(data)
547 547 err = 0
548 548 return err
549 549
550 550 def clone(ui, source, dest=None, **opts):
551 551 """make a copy of an existing repository
552 552
553 553 Create a copy of an existing repository in a new directory.
554 554
555 555 If no destination directory name is specified, it defaults to the
556 556 basename of the source.
557 557
558 558 The location of the source is added to the new repository's
559 559 .hg/hgrc file, as the default to be used for future pulls.
560 560
561 561 For efficiency, hardlinks are used for cloning whenever the source
562 562 and destination are on the same filesystem (note this applies only
563 563 to the repository data, not to the checked out files). Some
564 564 filesystems, such as AFS, implement hardlinking incorrectly, but
565 565 do not report errors. In these cases, use the --pull option to
566 566 avoid hardlinking.
567 567
568 568 In some cases, you can clone repositories and checked out files
569 569 using full hardlinks with
570 570
571 571 $ cp -al REPO REPOCLONE
572 572
573 573 This is the fastest way to clone, but it is not always safe. The
574 574 operation is not atomic (making sure REPO is not modified during
575 575 the operation is up to you) and you have to make sure your editor
576 576 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
577 577 this is not compatible with certain extensions that place their
578 578 metadata under the .hg directory, such as mq.
579 579
580 580 If you use the -r option to clone up to a specific revision, no
581 581 subsequent revisions will be present in the cloned repository.
582 582 This option implies --pull, even on local repositories.
583 583
584 584 If the -U option is used, the new clone will contain only a repository
585 585 (.hg) and no working copy (the working copy parent is the null revision).
586 586
587 587 See pull for valid source format details.
588 588
589 589 It is possible to specify an ssh:// URL as the destination, but no
590 590 .hg/hgrc and working directory will be created on the remote side.
591 591 Look at the help text for the pull command for important details
592 592 about ssh:// URLs.
593 593 """
594 594 cmdutil.setremoteconfig(ui, opts)
595 595 hg.clone(ui, source, dest,
596 596 pull=opts.get('pull'),
597 597 stream=opts.get('uncompressed'),
598 598 rev=opts.get('rev'),
599 599 update=not opts.get('noupdate'))
600 600
601 601 def commit(ui, repo, *pats, **opts):
602 602 """commit the specified files or all outstanding changes
603 603
604 604 Commit changes to the given files into the repository.
605 605
606 606 If a list of files is omitted, all changes reported by "hg status"
607 607 will be committed.
608 608
609 609 If you are committing the result of a merge, do not provide any
610 610 file names or -I/-X filters.
611 611
612 612 If no commit message is specified, the configured editor is started to
613 613 enter a message.
614 614
615 615 See 'hg help dates' for a list of formats valid for -d/--date.
616 616 """
617 617 def commitfunc(ui, repo, message, match, opts):
618 618 return repo.commit(match.files(), message, opts.get('user'), opts.get('date'),
619 619 match, force_editor=opts.get('force_editor'))
620 620
621 621 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
622 622 if not node:
623 623 return
624 624 cl = repo.changelog
625 625 rev = cl.rev(node)
626 626 parents = cl.parentrevs(rev)
627 627 if rev - 1 in parents:
628 628 # one of the parents was the old tip
629 629 pass
630 630 elif (parents == (nullrev, nullrev) or
631 631 len(cl.heads(cl.node(parents[0]))) > 1 and
632 632 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
633 633 ui.status(_('created new head\n'))
634 634
635 635 if ui.debugflag:
636 636 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
637 637 elif ui.verbose:
638 638 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
639 639
640 640 def copy(ui, repo, *pats, **opts):
641 641 """mark files as copied for the next commit
642 642
643 643 Mark dest as having copies of source files. If dest is a
644 644 directory, copies are put in that directory. If dest is a file,
645 645 there can only be one source.
646 646
647 647 By default, this command copies the contents of files as they
648 648 stand in the working directory. If invoked with --after, the
649 649 operation is recorded, but no copying is performed.
650 650
651 651 This command takes effect in the next commit. To undo a copy
652 652 before that, see hg revert.
653 653 """
654 654 wlock = repo.wlock(False)
655 655 try:
656 656 return cmdutil.copy(ui, repo, pats, opts)
657 657 finally:
658 658 del wlock
659 659
660 660 def debugancestor(ui, repo, *args):
661 661 """find the ancestor revision of two revisions in a given index"""
662 662 if len(args) == 3:
663 663 index, rev1, rev2 = args
664 664 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
665 665 lookup = r.lookup
666 666 elif len(args) == 2:
667 667 if not repo:
668 668 raise util.Abort(_("There is no Mercurial repository here "
669 669 "(.hg not found)"))
670 670 rev1, rev2 = args
671 671 r = repo.changelog
672 672 lookup = repo.lookup
673 673 else:
674 674 raise util.Abort(_('either two or three arguments required'))
675 675 a = r.ancestor(lookup(rev1), lookup(rev2))
676 676 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
677 677
678 678 def debugcomplete(ui, cmd='', **opts):
679 679 """returns the completion list associated with the given command"""
680 680
681 681 if opts.get('options'):
682 682 options = []
683 683 otables = [globalopts]
684 684 if cmd:
685 685 aliases, entry = cmdutil.findcmd(cmd, table, False)
686 686 otables.append(entry[1])
687 687 for t in otables:
688 688 for o in t:
689 689 if o[0]:
690 690 options.append('-%s' % o[0])
691 691 options.append('--%s' % o[1])
692 692 ui.write("%s\n" % "\n".join(options))
693 693 return
694 694
695 695 cmdlist = cmdutil.findpossible(cmd, table)
696 696 if ui.verbose:
697 697 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
698 698 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
699 699
700 700 def debugfsinfo(ui, path = "."):
701 701 file('.debugfsinfo', 'w').write('')
702 702 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
703 703 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
704 704 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
705 705 and 'yes' or 'no'))
706 706 os.unlink('.debugfsinfo')
707 707
708 708 def debugrebuildstate(ui, repo, rev="tip"):
709 709 """rebuild the dirstate as it would look like for the given revision"""
710 710 ctx = repo[rev]
711 711 wlock = repo.wlock()
712 712 try:
713 713 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
714 714 finally:
715 715 del wlock
716 716
717 717 def debugcheckstate(ui, repo):
718 718 """validate the correctness of the current dirstate"""
719 719 parent1, parent2 = repo.dirstate.parents()
720 720 m1 = repo[parent1].manifest()
721 721 m2 = repo[parent2].manifest()
722 722 errors = 0
723 723 for f in repo.dirstate:
724 724 state = repo.dirstate[f]
725 725 if state in "nr" and f not in m1:
726 726 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
727 727 errors += 1
728 728 if state in "a" and f in m1:
729 729 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
730 730 errors += 1
731 731 if state in "m" and f not in m1 and f not in m2:
732 732 ui.warn(_("%s in state %s, but not in either manifest\n") %
733 733 (f, state))
734 734 errors += 1
735 735 for f in m1:
736 736 state = repo.dirstate[f]
737 737 if state not in "nrm":
738 738 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
739 739 errors += 1
740 740 if errors:
741 741 error = _(".hg/dirstate inconsistent with current parent's manifest")
742 742 raise util.Abort(error)
743 743
744 744 def showconfig(ui, repo, *values, **opts):
745 745 """show combined config settings from all hgrc files
746 746
747 747 With no args, print names and values of all config items.
748 748
749 749 With one arg of the form section.name, print just the value of
750 750 that config item.
751 751
752 752 With multiple args, print names and values of all config items
753 753 with matching section names."""
754 754
755 755 untrusted = bool(opts.get('untrusted'))
756 756 if values:
757 757 if len([v for v in values if '.' in v]) > 1:
758 758 raise util.Abort(_('only one config item permitted'))
759 759 for section, name, value in ui.walkconfig(untrusted=untrusted):
760 760 sectname = section + '.' + name
761 761 if values:
762 762 for v in values:
763 763 if v == section:
764 764 ui.write('%s=%s\n' % (sectname, value))
765 765 elif v == sectname:
766 766 ui.write(value, '\n')
767 767 else:
768 768 ui.write('%s=%s\n' % (sectname, value))
769 769
770 770 def debugsetparents(ui, repo, rev1, rev2=None):
771 771 """manually set the parents of the current working directory
772 772
773 773 This is useful for writing repository conversion tools, but should
774 774 be used with care.
775 775 """
776 776
777 777 if not rev2:
778 778 rev2 = hex(nullid)
779 779
780 780 wlock = repo.wlock()
781 781 try:
782 782 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
783 783 finally:
784 784 del wlock
785 785
786 786 def debugstate(ui, repo, nodates=None):
787 787 """show the contents of the current dirstate"""
788 788 timestr = ""
789 789 showdate = not nodates
790 790 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
791 791 if showdate:
792 792 if ent[3] == -1:
793 793 # Pad or slice to locale representation
794 794 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
795 795 timestr = 'unset'
796 796 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
797 797 else:
798 798 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
799 799 if ent[1] & 020000:
800 800 mode = 'lnk'
801 801 else:
802 802 mode = '%3o' % (ent[1] & 0777)
803 803 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
804 804 for f in repo.dirstate.copies():
805 805 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
806 806
807 807 def debugdata(ui, file_, rev):
808 808 """dump the contents of a data file revision"""
809 809 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
810 810 try:
811 811 ui.write(r.revision(r.lookup(rev)))
812 812 except KeyError:
813 813 raise util.Abort(_('invalid revision identifier %s') % rev)
814 814
815 815 def debugdate(ui, date, range=None, **opts):
816 816 """parse and display a date"""
817 817 if opts["extended"]:
818 818 d = util.parsedate(date, util.extendeddateformats)
819 819 else:
820 820 d = util.parsedate(date)
821 821 ui.write("internal: %s %s\n" % d)
822 822 ui.write("standard: %s\n" % util.datestr(d))
823 823 if range:
824 824 m = util.matchdate(range)
825 825 ui.write("match: %s\n" % m(d[0]))
826 826
827 827 def debugindex(ui, file_):
828 828 """dump the contents of an index file"""
829 829 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
830 830 ui.write(" rev offset length base linkrev" +
831 831 " nodeid p1 p2\n")
832 832 for i in r:
833 833 node = r.node(i)
834 834 try:
835 835 pp = r.parents(node)
836 836 except:
837 837 pp = [nullid, nullid]
838 838 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
839 839 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
840 840 short(node), short(pp[0]), short(pp[1])))
841 841
842 842 def debugindexdot(ui, file_):
843 843 """dump an index DAG as a .dot file"""
844 844 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
845 845 ui.write("digraph G {\n")
846 846 for i in r:
847 847 node = r.node(i)
848 848 pp = r.parents(node)
849 849 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
850 850 if pp[1] != nullid:
851 851 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
852 852 ui.write("}\n")
853 853
854 854 def debuginstall(ui):
855 855 '''test Mercurial installation'''
856 856
857 857 def writetemp(contents):
858 858 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
859 859 f = os.fdopen(fd, "wb")
860 860 f.write(contents)
861 861 f.close()
862 862 return name
863 863
864 864 problems = 0
865 865
866 866 # encoding
867 867 ui.status(_("Checking encoding (%s)...\n") % util._encoding)
868 868 try:
869 869 util.fromlocal("test")
870 870 except util.Abort, inst:
871 871 ui.write(" %s\n" % inst)
872 872 ui.write(_(" (check that your locale is properly set)\n"))
873 873 problems += 1
874 874
875 875 # compiled modules
876 876 ui.status(_("Checking extensions...\n"))
877 877 try:
878 878 import bdiff, mpatch, base85
879 879 except Exception, inst:
880 880 ui.write(" %s\n" % inst)
881 881 ui.write(_(" One or more extensions could not be found"))
882 882 ui.write(_(" (check that you compiled the extensions)\n"))
883 883 problems += 1
884 884
885 885 # templates
886 886 ui.status(_("Checking templates...\n"))
887 887 try:
888 888 import templater
889 889 t = templater.templater(templater.templatepath("map-cmdline.default"))
890 890 except Exception, inst:
891 891 ui.write(" %s\n" % inst)
892 892 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
893 893 problems += 1
894 894
895 895 # patch
896 896 ui.status(_("Checking patch...\n"))
897 897 patchproblems = 0
898 898 a = "1\n2\n3\n4\n"
899 899 b = "1\n2\n3\ninsert\n4\n"
900 900 fa = writetemp(a)
901 901 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
902 902 os.path.basename(fa))
903 903 fd = writetemp(d)
904 904
905 905 files = {}
906 906 try:
907 907 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
908 908 except util.Abort, e:
909 909 ui.write(_(" patch call failed:\n"))
910 910 ui.write(" " + str(e) + "\n")
911 911 patchproblems += 1
912 912 else:
913 913 if list(files) != [os.path.basename(fa)]:
914 914 ui.write(_(" unexpected patch output!\n"))
915 915 patchproblems += 1
916 916 a = file(fa).read()
917 917 if a != b:
918 918 ui.write(_(" patch test failed!\n"))
919 919 patchproblems += 1
920 920
921 921 if patchproblems:
922 922 if ui.config('ui', 'patch'):
923 923 ui.write(_(" (Current patch tool may be incompatible with patch,"
924 924 " or misconfigured. Please check your .hgrc file)\n"))
925 925 else:
926 926 ui.write(_(" Internal patcher failure, please report this error"
927 927 " to http://www.selenic.com/mercurial/bts\n"))
928 928 problems += patchproblems
929 929
930 930 os.unlink(fa)
931 931 os.unlink(fd)
932 932
933 933 # editor
934 934 ui.status(_("Checking commit editor...\n"))
935 935 editor = ui.geteditor()
936 936 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
937 937 if not cmdpath:
938 938 if editor == 'vi':
939 939 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
940 940 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
941 941 else:
942 942 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
943 943 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
944 944 problems += 1
945 945
946 946 # check username
947 947 ui.status(_("Checking username...\n"))
948 948 user = os.environ.get("HGUSER")
949 949 if user is None:
950 950 user = ui.config("ui", "username")
951 951 if user is None:
952 952 user = os.environ.get("EMAIL")
953 953 if not user:
954 954 ui.warn(" ")
955 955 ui.username()
956 956 ui.write(_(" (specify a username in your .hgrc file)\n"))
957 957
958 958 if not problems:
959 959 ui.status(_("No problems detected\n"))
960 960 else:
961 961 ui.write(_("%s problems detected,"
962 962 " please check your install!\n") % problems)
963 963
964 964 return problems
965 965
966 966 def debugrename(ui, repo, file1, *pats, **opts):
967 967 """dump rename information"""
968 968
969 969 ctx = repo[opts.get('rev')]
970 970 m = cmdutil.match(repo, (file1,) + pats, opts)
971 971 for abs in ctx.walk(m):
972 972 fctx = ctx[abs]
973 973 o = fctx.filelog().renamed(fctx.filenode())
974 974 rel = m.rel(abs)
975 975 if o:
976 976 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
977 977 else:
978 978 ui.write(_("%s not renamed\n") % rel)
979 979
980 980 def debugwalk(ui, repo, *pats, **opts):
981 981 """show how files match on given patterns"""
982 982 m = cmdutil.match(repo, pats, opts)
983 983 items = list(repo.walk(m))
984 984 if not items:
985 985 return
986 986 fmt = 'f %%-%ds %%-%ds %%s' % (
987 987 max([len(abs) for abs in items]),
988 988 max([len(m.rel(abs)) for abs in items]))
989 989 for abs in items:
990 990 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
991 991 ui.write("%s\n" % line.rstrip())
992 992
993 993 def diff(ui, repo, *pats, **opts):
994 994 """diff repository (or selected files)
995 995
996 996 Show differences between revisions for the specified files.
997 997
998 998 Differences between files are shown using the unified diff format.
999 999
1000 1000 NOTE: diff may generate unexpected results for merges, as it will
1001 1001 default to comparing against the working directory's first parent
1002 1002 changeset if no revisions are specified.
1003 1003
1004 1004 When two revision arguments are given, then changes are shown
1005 1005 between those revisions. If only one revision is specified then
1006 1006 that revision is compared to the working directory, and, when no
1007 1007 revisions are specified, the working directory files are compared
1008 1008 to its parent.
1009 1009
1010 1010 Without the -a option, diff will avoid generating diffs of files
1011 1011 it detects as binary. With -a, diff will generate a diff anyway,
1012 1012 probably with undesirable results.
1013 1013
1014 1014 Use the --git option to generate diffs in the git extended diff
1015 1015 format. Read the diffs help topic for more information.
1016 1016 """
1017 1017
1018 1018 revs = opts.get('rev')
1019 1019 change = opts.get('change')
1020 1020
1021 1021 if revs and change:
1022 1022 msg = _('cannot specify --rev and --change at the same time')
1023 1023 raise util.Abort(msg)
1024 1024 elif change:
1025 1025 node2 = repo.lookup(change)
1026 1026 node1 = repo[node2].parents()[0].node()
1027 1027 else:
1028 1028 node1, node2 = cmdutil.revpair(repo, revs)
1029 1029
1030 1030 m = cmdutil.match(repo, pats, opts)
1031 1031 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1032 1032 for chunk in it:
1033 1033 repo.ui.write(chunk)
1034 1034
1035 1035 def export(ui, repo, *changesets, **opts):
1036 1036 """dump the header and diffs for one or more changesets
1037 1037
1038 1038 Print the changeset header and diffs for one or more revisions.
1039 1039
1040 1040 The information shown in the changeset header is: author,
1041 1041 changeset hash, parent(s) and commit comment.
1042 1042
1043 1043 NOTE: export may generate unexpected diff output for merge changesets,
1044 1044 as it will compare the merge changeset against its first parent only.
1045 1045
1046 1046 Output may be to a file, in which case the name of the file is
1047 1047 given using a format string. The formatting rules are as follows:
1048 1048
1049 1049 %% literal "%" character
1050 1050 %H changeset hash (40 bytes of hexadecimal)
1051 1051 %N number of patches being generated
1052 1052 %R changeset revision number
1053 1053 %b basename of the exporting repository
1054 1054 %h short-form changeset hash (12 bytes of hexadecimal)
1055 1055 %n zero-padded sequence number, starting at 1
1056 1056 %r zero-padded changeset revision number
1057 1057
1058 1058 Without the -a option, export will avoid generating diffs of files
1059 1059 it detects as binary. With -a, export will generate a diff anyway,
1060 1060 probably with undesirable results.
1061 1061
1062 1062 Use the --git option to generate diffs in the git extended diff
1063 1063 format. Read the diffs help topic for more information.
1064 1064
1065 1065 With the --switch-parent option, the diff will be against the second
1066 1066 parent. It can be useful to review a merge.
1067 1067 """
1068 1068 if not changesets:
1069 1069 raise util.Abort(_("export requires at least one changeset"))
1070 1070 revs = cmdutil.revrange(repo, changesets)
1071 1071 if len(revs) > 1:
1072 1072 ui.note(_('exporting patches:\n'))
1073 1073 else:
1074 1074 ui.note(_('exporting patch:\n'))
1075 1075 patch.export(repo, revs, template=opts.get('output'),
1076 1076 switch_parent=opts.get('switch_parent'),
1077 1077 opts=patch.diffopts(ui, opts))
1078 1078
1079 1079 def grep(ui, repo, pattern, *pats, **opts):
1080 1080 """search for a pattern in specified files and revisions
1081 1081
1082 1082 Search revisions of files for a regular expression.
1083 1083
1084 1084 This command behaves differently than Unix grep. It only accepts
1085 1085 Python/Perl regexps. It searches repository history, not the
1086 1086 working directory. It always prints the revision number in which
1087 1087 a match appears.
1088 1088
1089 1089 By default, grep only prints output for the first revision of a
1090 1090 file in which it finds a match. To get it to print every revision
1091 1091 that contains a change in match status ("-" for a match that
1092 1092 becomes a non-match, or "+" for a non-match that becomes a match),
1093 1093 use the --all flag.
1094 1094 """
1095 1095 reflags = 0
1096 1096 if opts.get('ignore_case'):
1097 1097 reflags |= re.I
1098 1098 try:
1099 1099 regexp = re.compile(pattern, reflags)
1100 1100 except Exception, inst:
1101 1101 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1102 1102 return None
1103 1103 sep, eol = ':', '\n'
1104 1104 if opts.get('print0'):
1105 1105 sep = eol = '\0'
1106 1106
1107 1107 fcache = {}
1108 1108 def getfile(fn):
1109 1109 if fn not in fcache:
1110 1110 fcache[fn] = repo.file(fn)
1111 1111 return fcache[fn]
1112 1112
1113 1113 def matchlines(body):
1114 1114 begin = 0
1115 1115 linenum = 0
1116 1116 while True:
1117 1117 match = regexp.search(body, begin)
1118 1118 if not match:
1119 1119 break
1120 1120 mstart, mend = match.span()
1121 1121 linenum += body.count('\n', begin, mstart) + 1
1122 1122 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1123 1123 begin = body.find('\n', mend) + 1 or len(body)
1124 1124 lend = begin - 1
1125 1125 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1126 1126
1127 1127 class linestate(object):
1128 1128 def __init__(self, line, linenum, colstart, colend):
1129 1129 self.line = line
1130 1130 self.linenum = linenum
1131 1131 self.colstart = colstart
1132 1132 self.colend = colend
1133 1133
1134 1134 def __hash__(self):
1135 1135 return hash((self.linenum, self.line))
1136 1136
1137 1137 def __eq__(self, other):
1138 1138 return self.line == other.line
1139 1139
1140 1140 matches = {}
1141 1141 copies = {}
1142 1142 def grepbody(fn, rev, body):
1143 1143 matches[rev].setdefault(fn, [])
1144 1144 m = matches[rev][fn]
1145 1145 for lnum, cstart, cend, line in matchlines(body):
1146 1146 s = linestate(line, lnum, cstart, cend)
1147 1147 m.append(s)
1148 1148
1149 1149 def difflinestates(a, b):
1150 1150 sm = difflib.SequenceMatcher(None, a, b)
1151 1151 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1152 1152 if tag == 'insert':
1153 1153 for i in xrange(blo, bhi):
1154 1154 yield ('+', b[i])
1155 1155 elif tag == 'delete':
1156 1156 for i in xrange(alo, ahi):
1157 1157 yield ('-', a[i])
1158 1158 elif tag == 'replace':
1159 1159 for i in xrange(alo, ahi):
1160 1160 yield ('-', a[i])
1161 1161 for i in xrange(blo, bhi):
1162 1162 yield ('+', b[i])
1163 1163
1164 1164 prev = {}
1165 1165 def display(fn, rev, states, prevstates):
1166 1166 datefunc = ui.quiet and util.shortdate or util.datestr
1167 1167 found = False
1168 1168 filerevmatches = {}
1169 1169 r = prev.get(fn, -1)
1170 1170 if opts.get('all'):
1171 1171 iter = difflinestates(states, prevstates)
1172 1172 else:
1173 1173 iter = [('', l) for l in prevstates]
1174 1174 for change, l in iter:
1175 1175 cols = [fn, str(r)]
1176 1176 if opts.get('line_number'):
1177 1177 cols.append(str(l.linenum))
1178 1178 if opts.get('all'):
1179 1179 cols.append(change)
1180 1180 if opts.get('user'):
1181 1181 cols.append(ui.shortuser(get(r)[1]))
1182 1182 if opts.get('date'):
1183 1183 cols.append(datefunc(get(r)[2]))
1184 1184 if opts.get('files_with_matches'):
1185 1185 c = (fn, r)
1186 1186 if c in filerevmatches:
1187 1187 continue
1188 1188 filerevmatches[c] = 1
1189 1189 else:
1190 1190 cols.append(l.line)
1191 1191 ui.write(sep.join(cols), eol)
1192 1192 found = True
1193 1193 return found
1194 1194
1195 1195 fstate = {}
1196 1196 skip = {}
1197 1197 get = util.cachefunc(lambda r: repo[r].changeset())
1198 1198 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1199 1199 found = False
1200 1200 follow = opts.get('follow')
1201 1201 for st, rev, fns in changeiter:
1202 1202 if st == 'window':
1203 1203 matches.clear()
1204 1204 elif st == 'add':
1205 1205 ctx = repo[rev]
1206 1206 matches[rev] = {}
1207 1207 for fn in fns:
1208 1208 if fn in skip:
1209 1209 continue
1210 1210 try:
1211 1211 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1212 1212 fstate.setdefault(fn, [])
1213 1213 if follow:
1214 1214 copied = getfile(fn).renamed(ctx.filenode(fn))
1215 1215 if copied:
1216 1216 copies.setdefault(rev, {})[fn] = copied[0]
1217 except revlog.LookupError:
1217 except error.LookupError:
1218 1218 pass
1219 1219 elif st == 'iter':
1220 1220 for fn, m in util.sort(matches[rev].items()):
1221 1221 copy = copies.get(rev, {}).get(fn)
1222 1222 if fn in skip:
1223 1223 if copy:
1224 1224 skip[copy] = True
1225 1225 continue
1226 1226 if fn in prev or fstate[fn]:
1227 1227 r = display(fn, rev, m, fstate[fn])
1228 1228 found = found or r
1229 1229 if r and not opts.get('all'):
1230 1230 skip[fn] = True
1231 1231 if copy:
1232 1232 skip[copy] = True
1233 1233 fstate[fn] = m
1234 1234 if copy:
1235 1235 fstate[copy] = m
1236 1236 prev[fn] = rev
1237 1237
1238 1238 for fn, state in util.sort(fstate.items()):
1239 1239 if fn in skip:
1240 1240 continue
1241 1241 if fn not in copies.get(prev[fn], {}):
1242 1242 found = display(fn, rev, {}, state) or found
1243 1243 return (not found and 1) or 0
1244 1244
1245 1245 def heads(ui, repo, *branchrevs, **opts):
1246 1246 """show current repository heads or show branch heads
1247 1247
1248 1248 With no arguments, show all repository head changesets.
1249 1249
1250 1250 If branch or revisions names are given this will show the heads of
1251 1251 the specified branches or the branches those revisions are tagged
1252 1252 with.
1253 1253
1254 1254 Repository "heads" are changesets that don't have child
1255 1255 changesets. They are where development generally takes place and
1256 1256 are the usual targets for update and merge operations.
1257 1257
1258 1258 Branch heads are changesets that have a given branch tag, but have
1259 1259 no child changesets with that tag. They are usually where
1260 1260 development on the given branch takes place.
1261 1261 """
1262 1262 if opts.get('rev'):
1263 1263 start = repo.lookup(opts['rev'])
1264 1264 else:
1265 1265 start = None
1266 1266 if not branchrevs:
1267 1267 # Assume we're looking repo-wide heads if no revs were specified.
1268 1268 heads = repo.heads(start)
1269 1269 else:
1270 1270 heads = []
1271 1271 visitedset = util.set()
1272 1272 for branchrev in branchrevs:
1273 1273 branch = repo[branchrev].branch()
1274 1274 if branch in visitedset:
1275 1275 continue
1276 1276 visitedset.add(branch)
1277 1277 bheads = repo.branchheads(branch, start)
1278 1278 if not bheads:
1279 1279 if branch != branchrev:
1280 1280 ui.warn(_("no changes on branch %s containing %s are "
1281 1281 "reachable from %s\n")
1282 1282 % (branch, branchrev, opts.get('rev')))
1283 1283 else:
1284 1284 ui.warn(_("no changes on branch %s are reachable from %s\n")
1285 1285 % (branch, opts.get('rev')))
1286 1286 heads.extend(bheads)
1287 1287 if not heads:
1288 1288 return 1
1289 1289 displayer = cmdutil.show_changeset(ui, repo, opts)
1290 1290 for n in heads:
1291 1291 displayer.show(repo[n])
1292 1292
1293 1293 def help_(ui, name=None, with_version=False):
1294 1294 """show help for a given topic or a help overview
1295 1295
1296 1296 With no arguments, print a list of commands and short help.
1297 1297
1298 1298 Given a topic, extension, or command name, print help for that topic."""
1299 1299 option_lists = []
1300 1300
1301 1301 def addglobalopts(aliases):
1302 1302 if ui.verbose:
1303 1303 option_lists.append((_("global options:"), globalopts))
1304 1304 if name == 'shortlist':
1305 1305 option_lists.append((_('use "hg help" for the full list '
1306 1306 'of commands'), ()))
1307 1307 else:
1308 1308 if name == 'shortlist':
1309 1309 msg = _('use "hg help" for the full list of commands '
1310 1310 'or "hg -v" for details')
1311 1311 elif aliases:
1312 1312 msg = _('use "hg -v help%s" to show aliases and '
1313 1313 'global options') % (name and " " + name or "")
1314 1314 else:
1315 1315 msg = _('use "hg -v help %s" to show global options') % name
1316 1316 option_lists.append((msg, ()))
1317 1317
1318 1318 def helpcmd(name):
1319 1319 if with_version:
1320 1320 version_(ui)
1321 1321 ui.write('\n')
1322 1322
1323 1323 try:
1324 1324 aliases, i = cmdutil.findcmd(name, table, False)
1325 1325 except cmdutil.AmbiguousCommand, inst:
1326 1326 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1327 1327 helplist(_('list of commands:\n\n'), select)
1328 1328 return
1329 1329
1330 1330 # synopsis
1331 1331 if len(i) > 2:
1332 1332 if i[2].startswith('hg'):
1333 1333 ui.write("%s\n" % i[2])
1334 1334 else:
1335 1335 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1336 1336 else:
1337 1337 ui.write('hg %s\n' % aliases[0])
1338 1338
1339 1339 # aliases
1340 1340 if not ui.quiet and len(aliases) > 1:
1341 1341 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1342 1342
1343 1343 # description
1344 1344 doc = gettext(i[0].__doc__)
1345 1345 if not doc:
1346 1346 doc = _("(no help text available)")
1347 1347 if ui.quiet:
1348 1348 doc = doc.splitlines(0)[0]
1349 1349 ui.write("\n%s\n" % doc.rstrip())
1350 1350
1351 1351 if not ui.quiet:
1352 1352 # options
1353 1353 if i[1]:
1354 1354 option_lists.append((_("options:\n"), i[1]))
1355 1355
1356 1356 addglobalopts(False)
1357 1357
1358 1358 def helplist(header, select=None):
1359 1359 h = {}
1360 1360 cmds = {}
1361 1361 for c, e in table.iteritems():
1362 1362 f = c.split("|", 1)[0]
1363 1363 if select and not select(f):
1364 1364 continue
1365 1365 if (not select and name != 'shortlist' and
1366 1366 e[0].__module__ != __name__):
1367 1367 continue
1368 1368 if name == "shortlist" and not f.startswith("^"):
1369 1369 continue
1370 1370 f = f.lstrip("^")
1371 1371 if not ui.debugflag and f.startswith("debug"):
1372 1372 continue
1373 1373 doc = gettext(e[0].__doc__)
1374 1374 if not doc:
1375 1375 doc = _("(no help text available)")
1376 1376 h[f] = doc.splitlines(0)[0].rstrip()
1377 1377 cmds[f] = c.lstrip("^")
1378 1378
1379 1379 if not h:
1380 1380 ui.status(_('no commands defined\n'))
1381 1381 return
1382 1382
1383 1383 ui.status(header)
1384 1384 fns = util.sort(h)
1385 1385 m = max(map(len, fns))
1386 1386 for f in fns:
1387 1387 if ui.verbose:
1388 1388 commands = cmds[f].replace("|",", ")
1389 1389 ui.write(" %s:\n %s\n"%(commands, h[f]))
1390 1390 else:
1391 1391 ui.write(' %-*s %s\n' % (m, f, h[f]))
1392 1392
1393 1393 exts = list(extensions.extensions())
1394 1394 if exts and name != 'shortlist':
1395 1395 ui.write(_('\nenabled extensions:\n\n'))
1396 1396 maxlength = 0
1397 1397 exthelps = []
1398 1398 for ename, ext in exts:
1399 1399 doc = (ext.__doc__ or _('(no help text available)'))
1400 1400 ename = ename.split('.')[-1]
1401 1401 maxlength = max(len(ename), maxlength)
1402 1402 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1403 1403 for ename, text in exthelps:
1404 1404 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1405 1405
1406 1406 if not ui.quiet:
1407 1407 addglobalopts(True)
1408 1408
1409 1409 def helptopic(name):
1410 1410 for names, header, doc in help.helptable:
1411 1411 if name in names:
1412 1412 break
1413 1413 else:
1414 1414 raise cmdutil.UnknownCommand(name)
1415 1415
1416 1416 # description
1417 1417 if not doc:
1418 1418 doc = _("(no help text available)")
1419 1419 if callable(doc):
1420 1420 doc = doc()
1421 1421
1422 1422 ui.write("%s\n" % header)
1423 1423 ui.write("%s\n" % doc.rstrip())
1424 1424
1425 1425 def helpext(name):
1426 1426 try:
1427 1427 mod = extensions.find(name)
1428 1428 except KeyError:
1429 1429 raise cmdutil.UnknownCommand(name)
1430 1430
1431 1431 doc = gettext(mod.__doc__) or _('no help text available')
1432 1432 doc = doc.splitlines(0)
1433 1433 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1434 1434 for d in doc[1:]:
1435 1435 ui.write(d, '\n')
1436 1436
1437 1437 ui.status('\n')
1438 1438
1439 1439 try:
1440 1440 ct = mod.cmdtable
1441 1441 except AttributeError:
1442 1442 ct = {}
1443 1443
1444 1444 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1445 1445 helplist(_('list of commands:\n\n'), modcmds.has_key)
1446 1446
1447 1447 if name and name != 'shortlist':
1448 1448 i = None
1449 1449 for f in (helptopic, helpcmd, helpext):
1450 1450 try:
1451 1451 f(name)
1452 1452 i = None
1453 1453 break
1454 1454 except cmdutil.UnknownCommand, inst:
1455 1455 i = inst
1456 1456 if i:
1457 1457 raise i
1458 1458
1459 1459 else:
1460 1460 # program name
1461 1461 if ui.verbose or with_version:
1462 1462 version_(ui)
1463 1463 else:
1464 1464 ui.status(_("Mercurial Distributed SCM\n"))
1465 1465 ui.status('\n')
1466 1466
1467 1467 # list of commands
1468 1468 if name == "shortlist":
1469 1469 header = _('basic commands:\n\n')
1470 1470 else:
1471 1471 header = _('list of commands:\n\n')
1472 1472
1473 1473 helplist(header)
1474 1474
1475 1475 # list all option lists
1476 1476 opt_output = []
1477 1477 for title, options in option_lists:
1478 1478 opt_output.append(("\n%s" % title, None))
1479 1479 for shortopt, longopt, default, desc in options:
1480 1480 if "DEPRECATED" in desc and not ui.verbose: continue
1481 1481 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1482 1482 longopt and " --%s" % longopt),
1483 1483 "%s%s" % (desc,
1484 1484 default
1485 1485 and _(" (default: %s)") % default
1486 1486 or "")))
1487 1487
1488 1488 if not name:
1489 1489 ui.write(_("\nadditional help topics:\n\n"))
1490 1490 topics = []
1491 1491 for names, header, doc in help.helptable:
1492 1492 names = [(-len(name), name) for name in names]
1493 1493 names.sort()
1494 1494 topics.append((names[0][1], header))
1495 1495 topics_len = max([len(s[0]) for s in topics])
1496 1496 for t, desc in topics:
1497 1497 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1498 1498
1499 1499 if opt_output:
1500 1500 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1501 1501 for first, second in opt_output:
1502 1502 if second:
1503 1503 ui.write(" %-*s %s\n" % (opts_len, first, second))
1504 1504 else:
1505 1505 ui.write("%s\n" % first)
1506 1506
1507 1507 def identify(ui, repo, source=None,
1508 1508 rev=None, num=None, id=None, branch=None, tags=None):
1509 1509 """identify the working copy or specified revision
1510 1510
1511 1511 With no revision, print a summary of the current state of the repo.
1512 1512
1513 1513 With a path, do a lookup in another repository.
1514 1514
1515 1515 This summary identifies the repository state using one or two parent
1516 1516 hash identifiers, followed by a "+" if there are uncommitted changes
1517 1517 in the working directory, a list of tags for this revision and a branch
1518 1518 name for non-default branches.
1519 1519 """
1520 1520
1521 1521 if not repo and not source:
1522 1522 raise util.Abort(_("There is no Mercurial repository here "
1523 1523 "(.hg not found)"))
1524 1524
1525 1525 hexfunc = ui.debugflag and hex or short
1526 1526 default = not (num or id or branch or tags)
1527 1527 output = []
1528 1528
1529 1529 if source:
1530 1530 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1531 1531 srepo = hg.repository(ui, source)
1532 1532 if not rev and revs:
1533 1533 rev = revs[0]
1534 1534 if not rev:
1535 1535 rev = "tip"
1536 1536 if num or branch or tags:
1537 1537 raise util.Abort(
1538 1538 "can't query remote revision number, branch, or tags")
1539 1539 output = [hexfunc(srepo.lookup(rev))]
1540 1540 elif not rev:
1541 1541 ctx = repo[None]
1542 1542 parents = ctx.parents()
1543 1543 changed = False
1544 1544 if default or id or num:
1545 1545 changed = ctx.files() + ctx.deleted()
1546 1546 if default or id:
1547 1547 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1548 1548 (changed) and "+" or "")]
1549 1549 if num:
1550 1550 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1551 1551 (changed) and "+" or ""))
1552 1552 else:
1553 1553 ctx = repo[rev]
1554 1554 if default or id:
1555 1555 output = [hexfunc(ctx.node())]
1556 1556 if num:
1557 1557 output.append(str(ctx.rev()))
1558 1558
1559 1559 if not source and default and not ui.quiet:
1560 1560 b = util.tolocal(ctx.branch())
1561 1561 if b != 'default':
1562 1562 output.append("(%s)" % b)
1563 1563
1564 1564 # multiple tags for a single parent separated by '/'
1565 1565 t = "/".join(ctx.tags())
1566 1566 if t:
1567 1567 output.append(t)
1568 1568
1569 1569 if branch:
1570 1570 output.append(util.tolocal(ctx.branch()))
1571 1571
1572 1572 if tags:
1573 1573 output.extend(ctx.tags())
1574 1574
1575 1575 ui.write("%s\n" % ' '.join(output))
1576 1576
1577 1577 def import_(ui, repo, patch1, *patches, **opts):
1578 1578 """import an ordered set of patches
1579 1579
1580 1580 Import a list of patches and commit them individually.
1581 1581
1582 1582 If there are outstanding changes in the working directory, import
1583 1583 will abort unless given the -f flag.
1584 1584
1585 1585 You can import a patch straight from a mail message. Even patches
1586 1586 as attachments work (body part must be type text/plain or
1587 1587 text/x-patch to be used). From and Subject headers of email
1588 1588 message are used as default committer and commit message. All
1589 1589 text/plain body parts before first diff are added to commit
1590 1590 message.
1591 1591
1592 1592 If the imported patch was generated by hg export, user and description
1593 1593 from patch override values from message headers and body. Values
1594 1594 given on command line with -m and -u override these.
1595 1595
1596 1596 If --exact is specified, import will set the working directory
1597 1597 to the parent of each patch before applying it, and will abort
1598 1598 if the resulting changeset has a different ID than the one
1599 1599 recorded in the patch. This may happen due to character set
1600 1600 problems or other deficiencies in the text patch format.
1601 1601
1602 1602 With --similarity, hg will attempt to discover renames and copies
1603 1603 in the patch in the same way as 'addremove'.
1604 1604
1605 1605 To read a patch from standard input, use patch name "-".
1606 1606 See 'hg help dates' for a list of formats valid for -d/--date.
1607 1607 """
1608 1608 patches = (patch1,) + patches
1609 1609
1610 1610 date = opts.get('date')
1611 1611 if date:
1612 1612 opts['date'] = util.parsedate(date)
1613 1613
1614 1614 try:
1615 1615 sim = float(opts.get('similarity') or 0)
1616 1616 except ValueError:
1617 1617 raise util.Abort(_('similarity must be a number'))
1618 1618 if sim < 0 or sim > 100:
1619 1619 raise util.Abort(_('similarity must be between 0 and 100'))
1620 1620
1621 1621 if opts.get('exact') or not opts.get('force'):
1622 1622 cmdutil.bail_if_changed(repo)
1623 1623
1624 1624 d = opts["base"]
1625 1625 strip = opts["strip"]
1626 1626 wlock = lock = None
1627 1627 try:
1628 1628 wlock = repo.wlock()
1629 1629 lock = repo.lock()
1630 1630 for p in patches:
1631 1631 pf = os.path.join(d, p)
1632 1632
1633 1633 if pf == '-':
1634 1634 ui.status(_("applying patch from stdin\n"))
1635 1635 pf = sys.stdin
1636 1636 else:
1637 1637 ui.status(_("applying %s\n") % p)
1638 1638 pf = url.open(ui, pf)
1639 1639 data = patch.extract(ui, pf)
1640 1640 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1641 1641
1642 1642 if tmpname is None:
1643 1643 raise util.Abort(_('no diffs found'))
1644 1644
1645 1645 try:
1646 1646 cmdline_message = cmdutil.logmessage(opts)
1647 1647 if cmdline_message:
1648 1648 # pickup the cmdline msg
1649 1649 message = cmdline_message
1650 1650 elif message:
1651 1651 # pickup the patch msg
1652 1652 message = message.strip()
1653 1653 else:
1654 1654 # launch the editor
1655 1655 message = None
1656 1656 ui.debug(_('message:\n%s\n') % message)
1657 1657
1658 1658 wp = repo.parents()
1659 1659 if opts.get('exact'):
1660 1660 if not nodeid or not p1:
1661 1661 raise util.Abort(_('not a mercurial patch'))
1662 1662 p1 = repo.lookup(p1)
1663 1663 p2 = repo.lookup(p2 or hex(nullid))
1664 1664
1665 1665 if p1 != wp[0].node():
1666 1666 hg.clean(repo, p1)
1667 1667 repo.dirstate.setparents(p1, p2)
1668 1668 elif p2:
1669 1669 try:
1670 1670 p1 = repo.lookup(p1)
1671 1671 p2 = repo.lookup(p2)
1672 1672 if p1 == wp[0].node():
1673 1673 repo.dirstate.setparents(p1, p2)
1674 1674 except RepoError:
1675 1675 pass
1676 1676 if opts.get('exact') or opts.get('import_branch'):
1677 1677 repo.dirstate.setbranch(branch or 'default')
1678 1678
1679 1679 files = {}
1680 1680 try:
1681 1681 fuzz = patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1682 1682 files=files)
1683 1683 finally:
1684 1684 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1685 1685 if not opts.get('no_commit'):
1686 1686 n = repo.commit(files, message, opts.get('user') or user,
1687 1687 opts.get('date') or date)
1688 1688 if opts.get('exact'):
1689 1689 if hex(n) != nodeid:
1690 1690 repo.rollback()
1691 1691 raise util.Abort(_('patch is damaged'
1692 1692 ' or loses information'))
1693 1693 # Force a dirstate write so that the next transaction
1694 1694 # backups an up-do-date file.
1695 1695 repo.dirstate.write()
1696 1696 finally:
1697 1697 os.unlink(tmpname)
1698 1698 finally:
1699 1699 del lock, wlock
1700 1700
1701 1701 def incoming(ui, repo, source="default", **opts):
1702 1702 """show new changesets found in source
1703 1703
1704 1704 Show new changesets found in the specified path/URL or the default
1705 1705 pull location. These are the changesets that would be pulled if a pull
1706 1706 was requested.
1707 1707
1708 1708 For remote repository, using --bundle avoids downloading the changesets
1709 1709 twice if the incoming is followed by a pull.
1710 1710
1711 1711 See pull for valid source format details.
1712 1712 """
1713 1713 limit = cmdutil.loglimit(opts)
1714 1714 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1715 1715 cmdutil.setremoteconfig(ui, opts)
1716 1716
1717 1717 other = hg.repository(ui, source)
1718 1718 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1719 1719 if revs:
1720 1720 revs = [other.lookup(rev) for rev in revs]
1721 1721 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1722 1722 force=opts["force"])
1723 1723 if not incoming:
1724 1724 try:
1725 1725 os.unlink(opts["bundle"])
1726 1726 except:
1727 1727 pass
1728 1728 ui.status(_("no changes found\n"))
1729 1729 return 1
1730 1730
1731 1731 cleanup = None
1732 1732 try:
1733 1733 fname = opts["bundle"]
1734 1734 if fname or not other.local():
1735 1735 # create a bundle (uncompressed if other repo is not local)
1736 1736
1737 1737 if revs is None and other.capable('changegroupsubset'):
1738 1738 revs = rheads
1739 1739
1740 1740 if revs is None:
1741 1741 cg = other.changegroup(incoming, "incoming")
1742 1742 else:
1743 1743 cg = other.changegroupsubset(incoming, revs, 'incoming')
1744 1744 bundletype = other.local() and "HG10BZ" or "HG10UN"
1745 1745 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1746 1746 # keep written bundle?
1747 1747 if opts["bundle"]:
1748 1748 cleanup = None
1749 1749 if not other.local():
1750 1750 # use the created uncompressed bundlerepo
1751 1751 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1752 1752
1753 1753 o = other.changelog.nodesbetween(incoming, revs)[0]
1754 1754 if opts.get('newest_first'):
1755 1755 o.reverse()
1756 1756 displayer = cmdutil.show_changeset(ui, other, opts)
1757 1757 count = 0
1758 1758 for n in o:
1759 1759 if count >= limit:
1760 1760 break
1761 1761 parents = [p for p in other.changelog.parents(n) if p != nullid]
1762 1762 if opts.get('no_merges') and len(parents) == 2:
1763 1763 continue
1764 1764 count += 1
1765 1765 displayer.show(other[n])
1766 1766 finally:
1767 1767 if hasattr(other, 'close'):
1768 1768 other.close()
1769 1769 if cleanup:
1770 1770 os.unlink(cleanup)
1771 1771
1772 1772 def init(ui, dest=".", **opts):
1773 1773 """create a new repository in the given directory
1774 1774
1775 1775 Initialize a new repository in the given directory. If the given
1776 1776 directory does not exist, it is created.
1777 1777
1778 1778 If no directory is given, the current directory is used.
1779 1779
1780 1780 It is possible to specify an ssh:// URL as the destination.
1781 1781 Look at the help text for the pull command for important details
1782 1782 about ssh:// URLs.
1783 1783 """
1784 1784 cmdutil.setremoteconfig(ui, opts)
1785 1785 hg.repository(ui, dest, create=1)
1786 1786
1787 1787 def locate(ui, repo, *pats, **opts):
1788 1788 """locate files matching specific patterns
1789 1789
1790 1790 Print all files under Mercurial control whose names match the
1791 1791 given patterns.
1792 1792
1793 1793 This command searches the entire repository by default. To search
1794 1794 just the current directory and its subdirectories, use
1795 1795 "--include .".
1796 1796
1797 1797 If no patterns are given to match, this command prints all file
1798 1798 names.
1799 1799
1800 1800 If you want to feed the output of this command into the "xargs"
1801 1801 command, use the "-0" option to both this command and "xargs".
1802 1802 This will avoid the problem of "xargs" treating single filenames
1803 1803 that contain white space as multiple filenames.
1804 1804 """
1805 1805 end = opts.get('print0') and '\0' or '\n'
1806 1806 rev = opts.get('rev') or None
1807 1807
1808 1808 ret = 1
1809 1809 m = cmdutil.match(repo, pats, opts, default='relglob')
1810 1810 m.bad = lambda x,y: False
1811 1811 for abs in repo[rev].walk(m):
1812 1812 if not rev and abs not in repo.dirstate:
1813 1813 continue
1814 1814 if opts.get('fullpath'):
1815 1815 ui.write(repo.wjoin(abs), end)
1816 1816 else:
1817 1817 ui.write(((pats and m.rel(abs)) or abs), end)
1818 1818 ret = 0
1819 1819
1820 1820 return ret
1821 1821
1822 1822 def log(ui, repo, *pats, **opts):
1823 1823 """show revision history of entire repository or files
1824 1824
1825 1825 Print the revision history of the specified files or the entire
1826 1826 project.
1827 1827
1828 1828 File history is shown without following rename or copy history of
1829 1829 files. Use -f/--follow with a file name to follow history across
1830 1830 renames and copies. --follow without a file name will only show
1831 1831 ancestors or descendants of the starting revision. --follow-first
1832 1832 only follows the first parent of merge revisions.
1833 1833
1834 1834 If no revision range is specified, the default is tip:0 unless
1835 1835 --follow is set, in which case the working directory parent is
1836 1836 used as the starting revision.
1837 1837
1838 1838 See 'hg help dates' for a list of formats valid for -d/--date.
1839 1839
1840 1840 By default this command outputs: changeset id and hash, tags,
1841 1841 non-trivial parents, user, date and time, and a summary for each
1842 1842 commit. When the -v/--verbose switch is used, the list of changed
1843 1843 files and full commit message is shown.
1844 1844
1845 1845 NOTE: log -p may generate unexpected diff output for merge
1846 1846 changesets, as it will compare the merge changeset against its
1847 1847 first parent only. Also, the files: list will only reflect files
1848 1848 that are different from BOTH parents.
1849 1849
1850 1850 """
1851 1851
1852 1852 get = util.cachefunc(lambda r: repo[r].changeset())
1853 1853 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1854 1854
1855 1855 limit = cmdutil.loglimit(opts)
1856 1856 count = 0
1857 1857
1858 1858 if opts.get('copies') and opts.get('rev'):
1859 1859 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1860 1860 else:
1861 1861 endrev = len(repo)
1862 1862 rcache = {}
1863 1863 ncache = {}
1864 1864 def getrenamed(fn, rev):
1865 1865 '''looks up all renames for a file (up to endrev) the first
1866 1866 time the file is given. It indexes on the changerev and only
1867 1867 parses the manifest if linkrev != changerev.
1868 1868 Returns rename info for fn at changerev rev.'''
1869 1869 if fn not in rcache:
1870 1870 rcache[fn] = {}
1871 1871 ncache[fn] = {}
1872 1872 fl = repo.file(fn)
1873 1873 for i in fl:
1874 1874 node = fl.node(i)
1875 1875 lr = fl.linkrev(i)
1876 1876 renamed = fl.renamed(node)
1877 1877 rcache[fn][lr] = renamed
1878 1878 if renamed:
1879 1879 ncache[fn][node] = renamed
1880 1880 if lr >= endrev:
1881 1881 break
1882 1882 if rev in rcache[fn]:
1883 1883 return rcache[fn][rev]
1884 1884
1885 1885 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1886 1886 # filectx logic.
1887 1887
1888 1888 try:
1889 1889 return repo[rev][fn].renamed()
1890 except revlog.LookupError:
1890 except error.LookupError:
1891 1891 pass
1892 1892 return None
1893 1893
1894 1894 df = False
1895 1895 if opts["date"]:
1896 1896 df = util.matchdate(opts["date"])
1897 1897
1898 1898 only_branches = opts.get('only_branch')
1899 1899
1900 1900 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1901 1901 for st, rev, fns in changeiter:
1902 1902 if st == 'add':
1903 1903 parents = [p for p in repo.changelog.parentrevs(rev)
1904 1904 if p != nullrev]
1905 1905 if opts.get('no_merges') and len(parents) == 2:
1906 1906 continue
1907 1907 if opts.get('only_merges') and len(parents) != 2:
1908 1908 continue
1909 1909
1910 1910 if only_branches:
1911 1911 revbranch = get(rev)[5]['branch']
1912 1912 if revbranch not in only_branches:
1913 1913 continue
1914 1914
1915 1915 if df:
1916 1916 changes = get(rev)
1917 1917 if not df(changes[2][0]):
1918 1918 continue
1919 1919
1920 1920 if opts.get('keyword'):
1921 1921 changes = get(rev)
1922 1922 miss = 0
1923 1923 for k in [kw.lower() for kw in opts['keyword']]:
1924 1924 if not (k in changes[1].lower() or
1925 1925 k in changes[4].lower() or
1926 1926 k in " ".join(changes[3]).lower()):
1927 1927 miss = 1
1928 1928 break
1929 1929 if miss:
1930 1930 continue
1931 1931
1932 1932 if opts['user']:
1933 1933 changes = get(rev)
1934 1934 miss = 0
1935 1935 for k in opts['user']:
1936 1936 if k != changes[1]:
1937 1937 miss = 1
1938 1938 break
1939 1939 if miss:
1940 1940 continue
1941 1941
1942 1942 copies = []
1943 1943 if opts.get('copies') and rev:
1944 1944 for fn in get(rev)[3]:
1945 1945 rename = getrenamed(fn, rev)
1946 1946 if rename:
1947 1947 copies.append((fn, rename[0]))
1948 1948 displayer.show(context.changectx(repo, rev), copies=copies)
1949 1949 elif st == 'iter':
1950 1950 if count == limit: break
1951 1951 if displayer.flush(rev):
1952 1952 count += 1
1953 1953
1954 1954 def manifest(ui, repo, node=None, rev=None):
1955 1955 """output the current or given revision of the project manifest
1956 1956
1957 1957 Print a list of version controlled files for the given revision.
1958 1958 If no revision is given, the parent of the working directory is used,
1959 1959 or tip if no revision is checked out.
1960 1960
1961 1961 The manifest is the list of files being version controlled. If no revision
1962 1962 is given then the first parent of the working directory is used.
1963 1963
1964 1964 With -v flag, print file permissions, symlink and executable bits. With
1965 1965 --debug flag, print file revision hashes.
1966 1966 """
1967 1967
1968 1968 if rev and node:
1969 1969 raise util.Abort(_("please specify just one revision"))
1970 1970
1971 1971 if not node:
1972 1972 node = rev
1973 1973
1974 1974 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
1975 1975 ctx = repo[node]
1976 1976 for f in ctx:
1977 1977 if ui.debugflag:
1978 1978 ui.write("%40s " % hex(ctx.manifest()[f]))
1979 1979 if ui.verbose:
1980 1980 ui.write(decor[ctx.flags(f)])
1981 1981 ui.write("%s\n" % f)
1982 1982
1983 1983 def merge(ui, repo, node=None, force=None, rev=None):
1984 1984 """merge working directory with another revision
1985 1985
1986 1986 Merge the contents of the current working directory and the
1987 1987 requested revision. Files that changed between either parent are
1988 1988 marked as changed for the next commit and a commit must be
1989 1989 performed before any further updates are allowed.
1990 1990
1991 1991 If no revision is specified, the working directory's parent is a
1992 1992 head revision, and the current branch contains exactly one other head,
1993 1993 the other head is merged with by default. Otherwise, an explicit
1994 1994 revision to merge with must be provided.
1995 1995 """
1996 1996
1997 1997 if rev and node:
1998 1998 raise util.Abort(_("please specify just one revision"))
1999 1999 if not node:
2000 2000 node = rev
2001 2001
2002 2002 if not node:
2003 2003 branch = repo.changectx(None).branch()
2004 2004 bheads = repo.branchheads(branch)
2005 2005 if len(bheads) > 2:
2006 2006 raise util.Abort(_("branch '%s' has %d heads - "
2007 2007 "please merge with an explicit rev") %
2008 2008 (branch, len(bheads)))
2009 2009
2010 2010 parent = repo.dirstate.parents()[0]
2011 2011 if len(bheads) == 1:
2012 2012 if len(repo.heads()) > 1:
2013 2013 raise util.Abort(_("branch '%s' has one head - "
2014 2014 "please merge with an explicit rev") %
2015 2015 branch)
2016 2016 msg = _('there is nothing to merge')
2017 2017 if parent != repo.lookup(repo[None].branch()):
2018 2018 msg = _('%s - use "hg update" instead') % msg
2019 2019 raise util.Abort(msg)
2020 2020
2021 2021 if parent not in bheads:
2022 2022 raise util.Abort(_('working dir not at a head rev - '
2023 2023 'use "hg update" or merge with an explicit rev'))
2024 2024 node = parent == bheads[0] and bheads[-1] or bheads[0]
2025 2025 return hg.merge(repo, node, force=force)
2026 2026
2027 2027 def outgoing(ui, repo, dest=None, **opts):
2028 2028 """show changesets not found in destination
2029 2029
2030 2030 Show changesets not found in the specified destination repository or
2031 2031 the default push location. These are the changesets that would be pushed
2032 2032 if a push was requested.
2033 2033
2034 2034 See pull for valid destination format details.
2035 2035 """
2036 2036 limit = cmdutil.loglimit(opts)
2037 2037 dest, revs, checkout = hg.parseurl(
2038 2038 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2039 2039 cmdutil.setremoteconfig(ui, opts)
2040 2040 if revs:
2041 2041 revs = [repo.lookup(rev) for rev in revs]
2042 2042
2043 2043 other = hg.repository(ui, dest)
2044 2044 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2045 2045 o = repo.findoutgoing(other, force=opts.get('force'))
2046 2046 if not o:
2047 2047 ui.status(_("no changes found\n"))
2048 2048 return 1
2049 2049 o = repo.changelog.nodesbetween(o, revs)[0]
2050 2050 if opts.get('newest_first'):
2051 2051 o.reverse()
2052 2052 displayer = cmdutil.show_changeset(ui, repo, opts)
2053 2053 count = 0
2054 2054 for n in o:
2055 2055 if count >= limit:
2056 2056 break
2057 2057 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2058 2058 if opts.get('no_merges') and len(parents) == 2:
2059 2059 continue
2060 2060 count += 1
2061 2061 displayer.show(repo[n])
2062 2062
2063 2063 def parents(ui, repo, file_=None, **opts):
2064 2064 """show the parents of the working dir or revision
2065 2065
2066 2066 Print the working directory's parent revisions. If a
2067 2067 revision is given via --rev, the parent of that revision
2068 2068 will be printed. If a file argument is given, revision in
2069 2069 which the file was last changed (before the working directory
2070 2070 revision or the argument to --rev if given) is printed.
2071 2071 """
2072 2072 rev = opts.get('rev')
2073 2073 if rev:
2074 2074 ctx = repo[rev]
2075 2075 else:
2076 2076 ctx = repo[None]
2077 2077
2078 2078 if file_:
2079 2079 m = cmdutil.match(repo, (file_,), opts)
2080 2080 if m.anypats() or len(m.files()) != 1:
2081 2081 raise util.Abort(_('can only specify an explicit file name'))
2082 2082 file_ = m.files()[0]
2083 2083 filenodes = []
2084 2084 for cp in ctx.parents():
2085 2085 if not cp:
2086 2086 continue
2087 2087 try:
2088 2088 filenodes.append(cp.filenode(file_))
2089 except revlog.LookupError:
2089 except error.LookupError:
2090 2090 pass
2091 2091 if not filenodes:
2092 2092 raise util.Abort(_("'%s' not found in manifest!") % file_)
2093 2093 fl = repo.file(file_)
2094 2094 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2095 2095 else:
2096 2096 p = [cp.node() for cp in ctx.parents()]
2097 2097
2098 2098 displayer = cmdutil.show_changeset(ui, repo, opts)
2099 2099 for n in p:
2100 2100 if n != nullid:
2101 2101 displayer.show(repo[n])
2102 2102
2103 2103 def paths(ui, repo, search=None):
2104 2104 """show definition of symbolic path names
2105 2105
2106 2106 Show definition of symbolic path name NAME. If no name is given, show
2107 2107 definition of available names.
2108 2108
2109 2109 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2110 2110 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2111 2111 """
2112 2112 if search:
2113 2113 for name, path in ui.configitems("paths"):
2114 2114 if name == search:
2115 2115 ui.write("%s\n" % url.hidepassword(path))
2116 2116 return
2117 2117 ui.warn(_("not found!\n"))
2118 2118 return 1
2119 2119 else:
2120 2120 for name, path in ui.configitems("paths"):
2121 2121 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2122 2122
2123 2123 def postincoming(ui, repo, modheads, optupdate, checkout):
2124 2124 if modheads == 0:
2125 2125 return
2126 2126 if optupdate:
2127 2127 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2128 2128 return hg.update(repo, checkout)
2129 2129 else:
2130 2130 ui.status(_("not updating, since new heads added\n"))
2131 2131 if modheads > 1:
2132 2132 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2133 2133 else:
2134 2134 ui.status(_("(run 'hg update' to get a working copy)\n"))
2135 2135
2136 2136 def pull(ui, repo, source="default", **opts):
2137 2137 """pull changes from the specified source
2138 2138
2139 2139 Pull changes from a remote repository to a local one.
2140 2140
2141 2141 This finds all changes from the repository at the specified path
2142 2142 or URL and adds them to the local repository. By default, this
2143 2143 does not update the copy of the project in the working directory.
2144 2144
2145 2145 Valid URLs are of the form:
2146 2146
2147 2147 local/filesystem/path (or file://local/filesystem/path)
2148 2148 http://[user[:pass]@]host[:port]/[path]
2149 2149 https://[user[:pass]@]host[:port]/[path]
2150 2150 ssh://[user[:pass]@]host[:port]/[path]
2151 2151
2152 2152 Paths in the local filesystem can either point to Mercurial
2153 2153 repositories or to bundle files (as created by 'hg bundle' or
2154 2154 'hg incoming --bundle').
2155 2155
2156 2156 An optional identifier after # indicates a particular branch, tag,
2157 2157 or changeset to pull.
2158 2158
2159 2159 Some notes about using SSH with Mercurial:
2160 2160 - SSH requires an accessible shell account on the destination machine
2161 2161 and a copy of hg in the remote path or specified with as remotecmd.
2162 2162 - path is relative to the remote user's home directory by default.
2163 2163 Use an extra slash at the start of a path to specify an absolute path:
2164 2164 ssh://example.com//tmp/repository
2165 2165 - Mercurial doesn't use its own compression via SSH; the right thing
2166 2166 to do is to configure it in your ~/.ssh/config, e.g.:
2167 2167 Host *.mylocalnetwork.example.com
2168 2168 Compression no
2169 2169 Host *
2170 2170 Compression yes
2171 2171 Alternatively specify "ssh -C" as your ssh command in your hgrc or
2172 2172 with the --ssh command line option.
2173 2173 """
2174 2174 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2175 2175 cmdutil.setremoteconfig(ui, opts)
2176 2176
2177 2177 other = hg.repository(ui, source)
2178 2178 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2179 2179 if revs:
2180 2180 try:
2181 2181 revs = [other.lookup(rev) for rev in revs]
2182 2182 except NoCapability:
2183 2183 error = _("Other repository doesn't support revision lookup, "
2184 2184 "so a rev cannot be specified.")
2185 2185 raise util.Abort(error)
2186 2186
2187 2187 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2188 2188 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2189 2189
2190 2190 def push(ui, repo, dest=None, **opts):
2191 2191 """push changes to the specified destination
2192 2192
2193 2193 Push changes from the local repository to the given destination.
2194 2194
2195 2195 This is the symmetrical operation for pull. It helps to move
2196 2196 changes from the current repository to a different one. If the
2197 2197 destination is local this is identical to a pull in that directory
2198 2198 from the current one.
2199 2199
2200 2200 By default, push will refuse to run if it detects the result would
2201 2201 increase the number of remote heads. This generally indicates the
2202 2202 the client has forgotten to pull and merge before pushing.
2203 2203
2204 2204 Valid URLs are of the form:
2205 2205
2206 2206 local/filesystem/path (or file://local/filesystem/path)
2207 2207 ssh://[user[:pass]@]host[:port]/[path]
2208 2208 http://[user[:pass]@]host[:port]/[path]
2209 2209 https://[user[:pass]@]host[:port]/[path]
2210 2210
2211 2211 An optional identifier after # indicates a particular branch, tag,
2212 2212 or changeset to push. If -r is used, the named changeset and all its
2213 2213 ancestors will be pushed to the remote repository.
2214 2214
2215 2215 Look at the help text for the pull command for important details
2216 2216 about ssh:// URLs.
2217 2217
2218 2218 Pushing to http:// and https:// URLs is only possible, if this
2219 2219 feature is explicitly enabled on the remote Mercurial server.
2220 2220 """
2221 2221 dest, revs, checkout = hg.parseurl(
2222 2222 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2223 2223 cmdutil.setremoteconfig(ui, opts)
2224 2224
2225 2225 other = hg.repository(ui, dest)
2226 2226 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2227 2227 if revs:
2228 2228 revs = [repo.lookup(rev) for rev in revs]
2229 2229 r = repo.push(other, opts.get('force'), revs=revs)
2230 2230 return r == 0
2231 2231
2232 2232 def rawcommit(ui, repo, *pats, **opts):
2233 2233 """raw commit interface (DEPRECATED)
2234 2234
2235 2235 (DEPRECATED)
2236 2236 Lowlevel commit, for use in helper scripts.
2237 2237
2238 2238 This command is not intended to be used by normal users, as it is
2239 2239 primarily useful for importing from other SCMs.
2240 2240
2241 2241 This command is now deprecated and will be removed in a future
2242 2242 release, please use debugsetparents and commit instead.
2243 2243 """
2244 2244
2245 2245 ui.warn(_("(the rawcommit command is deprecated)\n"))
2246 2246
2247 2247 message = cmdutil.logmessage(opts)
2248 2248
2249 2249 files = cmdutil.match(repo, pats, opts).files()
2250 2250 if opts.get('files'):
2251 2251 files += open(opts['files']).read().splitlines()
2252 2252
2253 2253 parents = [repo.lookup(p) for p in opts['parent']]
2254 2254
2255 2255 try:
2256 2256 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2257 2257 except ValueError, inst:
2258 2258 raise util.Abort(str(inst))
2259 2259
2260 2260 def recover(ui, repo):
2261 2261 """roll back an interrupted transaction
2262 2262
2263 2263 Recover from an interrupted commit or pull.
2264 2264
2265 2265 This command tries to fix the repository status after an interrupted
2266 2266 operation. It should only be necessary when Mercurial suggests it.
2267 2267 """
2268 2268 if repo.recover():
2269 2269 return hg.verify(repo)
2270 2270 return 1
2271 2271
2272 2272 def remove(ui, repo, *pats, **opts):
2273 2273 """remove the specified files on the next commit
2274 2274
2275 2275 Schedule the indicated files for removal from the repository.
2276 2276
2277 2277 This only removes files from the current branch, not from the entire
2278 2278 project history. -A can be used to remove only files that have already
2279 2279 been deleted, -f can be used to force deletion, and -Af can be used
2280 2280 to remove files from the next revision without deleting them.
2281 2281
2282 2282 The following table details the behavior of remove for different file
2283 2283 states (columns) and option combinations (rows). The file states are
2284 2284 Added, Clean, Modified and Missing (as reported by hg status). The
2285 2285 actions are Warn, Remove (from branch) and Delete (from disk).
2286 2286
2287 2287 A C M !
2288 2288 none W RD W R
2289 2289 -f R RD RD R
2290 2290 -A W W W R
2291 2291 -Af R R R R
2292 2292
2293 2293 This command schedules the files to be removed at the next commit.
2294 2294 To undo a remove before that, see hg revert.
2295 2295 """
2296 2296
2297 2297 after, force = opts.get('after'), opts.get('force')
2298 2298 if not pats and not after:
2299 2299 raise util.Abort(_('no files specified'))
2300 2300
2301 2301 m = cmdutil.match(repo, pats, opts)
2302 2302 s = repo.status(match=m, clean=True)
2303 2303 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2304 2304
2305 2305 def warn(files, reason):
2306 2306 for f in files:
2307 2307 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2308 2308 % (m.rel(f), reason))
2309 2309
2310 2310 if force:
2311 2311 remove, forget = modified + deleted + clean, added
2312 2312 elif after:
2313 2313 remove, forget = deleted, []
2314 2314 warn(modified + added + clean, _('still exists'))
2315 2315 else:
2316 2316 remove, forget = deleted + clean, []
2317 2317 warn(modified, _('is modified'))
2318 2318 warn(added, _('has been marked for add'))
2319 2319
2320 2320 for f in util.sort(remove + forget):
2321 2321 if ui.verbose or not m.exact(f):
2322 2322 ui.status(_('removing %s\n') % m.rel(f))
2323 2323
2324 2324 repo.forget(forget)
2325 2325 repo.remove(remove, unlink=not after)
2326 2326
2327 2327 def rename(ui, repo, *pats, **opts):
2328 2328 """rename files; equivalent of copy + remove
2329 2329
2330 2330 Mark dest as copies of sources; mark sources for deletion. If
2331 2331 dest is a directory, copies are put in that directory. If dest is
2332 2332 a file, there can only be one source.
2333 2333
2334 2334 By default, this command copies the contents of files as they
2335 2335 stand in the working directory. If invoked with --after, the
2336 2336 operation is recorded, but no copying is performed.
2337 2337
2338 2338 This command takes effect in the next commit. To undo a rename
2339 2339 before that, see hg revert.
2340 2340 """
2341 2341 wlock = repo.wlock(False)
2342 2342 try:
2343 2343 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2344 2344 finally:
2345 2345 del wlock
2346 2346
2347 2347 def resolve(ui, repo, *pats, **opts):
2348 2348 """retry file merges from a merge or update
2349 2349
2350 2350 This command will cleanly retry unresolved file merges using file
2351 2351 revisions preserved from the last update or merge. To attempt to
2352 2352 resolve all unresolved files, use the -a switch.
2353 2353
2354 2354 This command will also allow listing resolved files and manually
2355 2355 marking and unmarking files as resolved.
2356 2356
2357 2357 The codes used to show the status of files are:
2358 2358 U = unresolved
2359 2359 R = resolved
2360 2360 """
2361 2361
2362 2362 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2363 2363
2364 2364 if (show and (mark or unmark)) or (mark and unmark):
2365 2365 raise util.Abort(_("too many options specified"))
2366 2366 if pats and all:
2367 2367 raise util.Abort(_("can't specify --all and patterns"))
2368 2368 if not (all or pats or show or mark or unmark):
2369 2369 raise util.Abort(_('no files or directories specified; '
2370 2370 'use --all to remerge all files'))
2371 2371
2372 2372 ms = merge_.mergestate(repo)
2373 2373 m = cmdutil.match(repo, pats, opts)
2374 2374
2375 2375 for f in ms:
2376 2376 if m(f):
2377 2377 if show:
2378 2378 ui.write("%s %s\n" % (ms[f].upper(), f))
2379 2379 elif mark:
2380 2380 ms.mark(f, "r")
2381 2381 elif unmark:
2382 2382 ms.mark(f, "u")
2383 2383 else:
2384 2384 wctx = repo[None]
2385 2385 mctx = wctx.parents()[-1]
2386 2386 ms.resolve(f, wctx, mctx)
2387 2387
2388 2388 def revert(ui, repo, *pats, **opts):
2389 2389 """restore individual files or dirs to an earlier state
2390 2390
2391 2391 (use update -r to check out earlier revisions, revert does not
2392 2392 change the working dir parents)
2393 2393
2394 2394 With no revision specified, revert the named files or directories
2395 2395 to the contents they had in the parent of the working directory.
2396 2396 This restores the contents of the affected files to an unmodified
2397 2397 state and unschedules adds, removes, copies, and renames. If the
2398 2398 working directory has two parents, you must explicitly specify the
2399 2399 revision to revert to.
2400 2400
2401 2401 Using the -r option, revert the given files or directories to their
2402 2402 contents as of a specific revision. This can be helpful to "roll
2403 2403 back" some or all of an earlier change.
2404 2404 See 'hg help dates' for a list of formats valid for -d/--date.
2405 2405
2406 2406 Revert modifies the working directory. It does not commit any
2407 2407 changes, or change the parent of the working directory. If you
2408 2408 revert to a revision other than the parent of the working
2409 2409 directory, the reverted files will thus appear modified
2410 2410 afterwards.
2411 2411
2412 2412 If a file has been deleted, it is restored. If the executable
2413 2413 mode of a file was changed, it is reset.
2414 2414
2415 2415 If names are given, all files matching the names are reverted.
2416 2416 If no arguments are given, no files are reverted.
2417 2417
2418 2418 Modified files are saved with a .orig suffix before reverting.
2419 2419 To disable these backups, use --no-backup.
2420 2420 """
2421 2421
2422 2422 if opts["date"]:
2423 2423 if opts["rev"]:
2424 2424 raise util.Abort(_("you can't specify a revision and a date"))
2425 2425 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2426 2426
2427 2427 if not pats and not opts.get('all'):
2428 2428 raise util.Abort(_('no files or directories specified; '
2429 2429 'use --all to revert the whole repo'))
2430 2430
2431 2431 parent, p2 = repo.dirstate.parents()
2432 2432 if not opts.get('rev') and p2 != nullid:
2433 2433 raise util.Abort(_('uncommitted merge - please provide a '
2434 2434 'specific revision'))
2435 2435 ctx = repo[opts.get('rev')]
2436 2436 node = ctx.node()
2437 2437 mf = ctx.manifest()
2438 2438 if node == parent:
2439 2439 pmf = mf
2440 2440 else:
2441 2441 pmf = None
2442 2442
2443 2443 # need all matching names in dirstate and manifest of target rev,
2444 2444 # so have to walk both. do not print errors if files exist in one
2445 2445 # but not other.
2446 2446
2447 2447 names = {}
2448 2448
2449 2449 wlock = repo.wlock()
2450 2450 try:
2451 2451 # walk dirstate.
2452 2452 files = []
2453 2453
2454 2454 m = cmdutil.match(repo, pats, opts)
2455 2455 m.bad = lambda x,y: False
2456 2456 for abs in repo.walk(m):
2457 2457 names[abs] = m.rel(abs), m.exact(abs)
2458 2458
2459 2459 # walk target manifest.
2460 2460
2461 2461 def badfn(path, msg):
2462 2462 if path in names:
2463 2463 return False
2464 2464 path_ = path + '/'
2465 2465 for f in names:
2466 2466 if f.startswith(path_):
2467 2467 return False
2468 2468 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2469 2469 return False
2470 2470
2471 2471 m = cmdutil.match(repo, pats, opts)
2472 2472 m.bad = badfn
2473 2473 for abs in repo[node].walk(m):
2474 2474 if abs not in names:
2475 2475 names[abs] = m.rel(abs), m.exact(abs)
2476 2476
2477 2477 m = cmdutil.matchfiles(repo, names)
2478 2478 changes = repo.status(match=m)[:4]
2479 2479 modified, added, removed, deleted = map(dict.fromkeys, changes)
2480 2480
2481 2481 # if f is a rename, also revert the source
2482 2482 cwd = repo.getcwd()
2483 2483 for f in added:
2484 2484 src = repo.dirstate.copied(f)
2485 2485 if src and src not in names and repo.dirstate[src] == 'r':
2486 2486 removed[src] = None
2487 2487 names[src] = (repo.pathto(src, cwd), True)
2488 2488
2489 2489 def removeforget(abs):
2490 2490 if repo.dirstate[abs] == 'a':
2491 2491 return _('forgetting %s\n')
2492 2492 return _('removing %s\n')
2493 2493
2494 2494 revert = ([], _('reverting %s\n'))
2495 2495 add = ([], _('adding %s\n'))
2496 2496 remove = ([], removeforget)
2497 2497 undelete = ([], _('undeleting %s\n'))
2498 2498
2499 2499 disptable = (
2500 2500 # dispatch table:
2501 2501 # file state
2502 2502 # action if in target manifest
2503 2503 # action if not in target manifest
2504 2504 # make backup if in target manifest
2505 2505 # make backup if not in target manifest
2506 2506 (modified, revert, remove, True, True),
2507 2507 (added, revert, remove, True, False),
2508 2508 (removed, undelete, None, False, False),
2509 2509 (deleted, revert, remove, False, False),
2510 2510 )
2511 2511
2512 2512 for abs, (rel, exact) in util.sort(names.items()):
2513 2513 mfentry = mf.get(abs)
2514 2514 target = repo.wjoin(abs)
2515 2515 def handle(xlist, dobackup):
2516 2516 xlist[0].append(abs)
2517 2517 if dobackup and not opts.get('no_backup') and util.lexists(target):
2518 2518 bakname = "%s.orig" % rel
2519 2519 ui.note(_('saving current version of %s as %s\n') %
2520 2520 (rel, bakname))
2521 2521 if not opts.get('dry_run'):
2522 2522 util.copyfile(target, bakname)
2523 2523 if ui.verbose or not exact:
2524 2524 msg = xlist[1]
2525 2525 if not isinstance(msg, basestring):
2526 2526 msg = msg(abs)
2527 2527 ui.status(msg % rel)
2528 2528 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2529 2529 if abs not in table: continue
2530 2530 # file has changed in dirstate
2531 2531 if mfentry:
2532 2532 handle(hitlist, backuphit)
2533 2533 elif misslist is not None:
2534 2534 handle(misslist, backupmiss)
2535 2535 break
2536 2536 else:
2537 2537 if abs not in repo.dirstate:
2538 2538 if mfentry:
2539 2539 handle(add, True)
2540 2540 elif exact:
2541 2541 ui.warn(_('file not managed: %s\n') % rel)
2542 2542 continue
2543 2543 # file has not changed in dirstate
2544 2544 if node == parent:
2545 2545 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2546 2546 continue
2547 2547 if pmf is None:
2548 2548 # only need parent manifest in this unlikely case,
2549 2549 # so do not read by default
2550 2550 pmf = repo[parent].manifest()
2551 2551 if abs in pmf:
2552 2552 if mfentry:
2553 2553 # if version of file is same in parent and target
2554 2554 # manifests, do nothing
2555 2555 if (pmf[abs] != mfentry or
2556 2556 pmf.flags(abs) != mf.flags(abs)):
2557 2557 handle(revert, False)
2558 2558 else:
2559 2559 handle(remove, False)
2560 2560
2561 2561 if not opts.get('dry_run'):
2562 2562 def checkout(f):
2563 2563 fc = ctx[f]
2564 2564 repo.wwrite(f, fc.data(), fc.flags())
2565 2565
2566 2566 audit_path = util.path_auditor(repo.root)
2567 2567 for f in remove[0]:
2568 2568 if repo.dirstate[f] == 'a':
2569 2569 repo.dirstate.forget(f)
2570 2570 continue
2571 2571 audit_path(f)
2572 2572 try:
2573 2573 util.unlink(repo.wjoin(f))
2574 2574 except OSError:
2575 2575 pass
2576 2576 repo.dirstate.remove(f)
2577 2577
2578 2578 normal = None
2579 2579 if node == parent:
2580 2580 # We're reverting to our parent. If possible, we'd like status
2581 2581 # to report the file as clean. We have to use normallookup for
2582 2582 # merges to avoid losing information about merged/dirty files.
2583 2583 if p2 != nullid:
2584 2584 normal = repo.dirstate.normallookup
2585 2585 else:
2586 2586 normal = repo.dirstate.normal
2587 2587 for f in revert[0]:
2588 2588 checkout(f)
2589 2589 if normal:
2590 2590 normal(f)
2591 2591
2592 2592 for f in add[0]:
2593 2593 checkout(f)
2594 2594 repo.dirstate.add(f)
2595 2595
2596 2596 normal = repo.dirstate.normallookup
2597 2597 if node == parent and p2 == nullid:
2598 2598 normal = repo.dirstate.normal
2599 2599 for f in undelete[0]:
2600 2600 checkout(f)
2601 2601 normal(f)
2602 2602
2603 2603 finally:
2604 2604 del wlock
2605 2605
2606 2606 def rollback(ui, repo):
2607 2607 """roll back the last transaction
2608 2608
2609 2609 This command should be used with care. There is only one level of
2610 2610 rollback, and there is no way to undo a rollback. It will also
2611 2611 restore the dirstate at the time of the last transaction, losing
2612 2612 any dirstate changes since that time.
2613 2613
2614 2614 Transactions are used to encapsulate the effects of all commands
2615 2615 that create new changesets or propagate existing changesets into a
2616 2616 repository. For example, the following commands are transactional,
2617 2617 and their effects can be rolled back:
2618 2618
2619 2619 commit
2620 2620 import
2621 2621 pull
2622 2622 push (with this repository as destination)
2623 2623 unbundle
2624 2624
2625 2625 This command is not intended for use on public repositories. Once
2626 2626 changes are visible for pull by other users, rolling a transaction
2627 2627 back locally is ineffective (someone else may already have pulled
2628 2628 the changes). Furthermore, a race is possible with readers of the
2629 2629 repository; for example an in-progress pull from the repository
2630 2630 may fail if a rollback is performed.
2631 2631 """
2632 2632 repo.rollback()
2633 2633
2634 2634 def root(ui, repo):
2635 2635 """print the root (top) of the current working dir
2636 2636
2637 2637 Print the root directory of the current repository.
2638 2638 """
2639 2639 ui.write(repo.root + "\n")
2640 2640
2641 2641 def serve(ui, repo, **opts):
2642 2642 """export the repository via HTTP
2643 2643
2644 2644 Start a local HTTP repository browser and pull server.
2645 2645
2646 2646 By default, the server logs accesses to stdout and errors to
2647 2647 stderr. Use the "-A" and "-E" options to log to files.
2648 2648 """
2649 2649
2650 2650 if opts["stdio"]:
2651 2651 if repo is None:
2652 2652 raise RepoError(_("There is no Mercurial repository here"
2653 2653 " (.hg not found)"))
2654 2654 s = sshserver.sshserver(ui, repo)
2655 2655 s.serve_forever()
2656 2656
2657 2657 parentui = ui.parentui or ui
2658 2658 optlist = ("name templates style address port prefix ipv6"
2659 2659 " accesslog errorlog webdir_conf certificate")
2660 2660 for o in optlist.split():
2661 2661 if opts[o]:
2662 2662 parentui.setconfig("web", o, str(opts[o]))
2663 2663 if (repo is not None) and (repo.ui != parentui):
2664 2664 repo.ui.setconfig("web", o, str(opts[o]))
2665 2665
2666 2666 if repo is None and not ui.config("web", "webdir_conf"):
2667 2667 raise RepoError(_("There is no Mercurial repository here"
2668 2668 " (.hg not found)"))
2669 2669
2670 2670 class service:
2671 2671 def init(self):
2672 2672 util.set_signal_handler()
2673 2673 self.httpd = hgweb.server.create_server(parentui, repo)
2674 2674
2675 2675 if not ui.verbose: return
2676 2676
2677 2677 if self.httpd.prefix:
2678 2678 prefix = self.httpd.prefix.strip('/') + '/'
2679 2679 else:
2680 2680 prefix = ''
2681 2681
2682 2682 port = ':%d' % self.httpd.port
2683 2683 if port == ':80':
2684 2684 port = ''
2685 2685
2686 2686 bindaddr = self.httpd.addr
2687 2687 if bindaddr == '0.0.0.0':
2688 2688 bindaddr = '*'
2689 2689 elif ':' in bindaddr: # IPv6
2690 2690 bindaddr = '[%s]' % bindaddr
2691 2691
2692 2692 fqaddr = self.httpd.fqaddr
2693 2693 if ':' in fqaddr:
2694 2694 fqaddr = '[%s]' % fqaddr
2695 2695 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2696 2696 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2697 2697
2698 2698 def run(self):
2699 2699 self.httpd.serve_forever()
2700 2700
2701 2701 service = service()
2702 2702
2703 2703 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2704 2704
2705 2705 def status(ui, repo, *pats, **opts):
2706 2706 """show changed files in the working directory
2707 2707
2708 2708 Show status of files in the repository. If names are given, only
2709 2709 files that match are shown. Files that are clean or ignored or
2710 2710 source of a copy/move operation, are not listed unless -c (clean),
2711 2711 -i (ignored), -C (copies) or -A is given. Unless options described
2712 2712 with "show only ..." are given, the options -mardu are used.
2713 2713
2714 2714 Option -q/--quiet hides untracked (unknown and ignored) files
2715 2715 unless explicitly requested with -u/--unknown or -i/-ignored.
2716 2716
2717 2717 NOTE: status may appear to disagree with diff if permissions have
2718 2718 changed or a merge has occurred. The standard diff format does not
2719 2719 report permission changes and diff only reports changes relative
2720 2720 to one merge parent.
2721 2721
2722 2722 If one revision is given, it is used as the base revision.
2723 2723 If two revisions are given, the difference between them is shown.
2724 2724
2725 2725 The codes used to show the status of files are:
2726 2726 M = modified
2727 2727 A = added
2728 2728 R = removed
2729 2729 C = clean
2730 2730 ! = deleted, but still tracked
2731 2731 ? = not tracked
2732 2732 I = ignored
2733 2733 = the previous added file was copied from here
2734 2734 """
2735 2735
2736 2736 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2737 2737 cwd = (pats and repo.getcwd()) or ''
2738 2738 end = opts.get('print0') and '\0' or '\n'
2739 2739 copy = {}
2740 2740 states = 'modified added removed deleted unknown ignored clean'.split()
2741 2741 show = [k for k in states if opts[k]]
2742 2742 if opts.get('all'):
2743 2743 show += ui.quiet and (states[:4] + ['clean']) or states
2744 2744 if not show:
2745 2745 show = ui.quiet and states[:4] or states[:5]
2746 2746
2747 2747 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2748 2748 'ignored' in show, 'clean' in show, 'unknown' in show)
2749 2749 changestates = zip(states, 'MAR!?IC', stat)
2750 2750
2751 2751 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2752 2752 ctxn = repo[nullid]
2753 2753 ctx1 = repo[node1]
2754 2754 ctx2 = repo[node2]
2755 2755 added = stat[1]
2756 2756 if node2 is None:
2757 2757 added = stat[0] + stat[1] # merged?
2758 2758
2759 2759 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2760 2760 if k in added:
2761 2761 copy[k] = v
2762 2762 elif v in added:
2763 2763 copy[v] = k
2764 2764
2765 2765 for state, char, files in changestates:
2766 2766 if state in show:
2767 2767 format = "%s %%s%s" % (char, end)
2768 2768 if opts.get('no_status'):
2769 2769 format = "%%s%s" % end
2770 2770
2771 2771 for f in files:
2772 2772 ui.write(format % repo.pathto(f, cwd))
2773 2773 if f in copy:
2774 2774 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2775 2775
2776 2776 def tag(ui, repo, name1, *names, **opts):
2777 2777 """add one or more tags for the current or given revision
2778 2778
2779 2779 Name a particular revision using <name>.
2780 2780
2781 2781 Tags are used to name particular revisions of the repository and are
2782 2782 very useful to compare different revisions, to go back to significant
2783 2783 earlier versions or to mark branch points as releases, etc.
2784 2784
2785 2785 If no revision is given, the parent of the working directory is used,
2786 2786 or tip if no revision is checked out.
2787 2787
2788 2788 To facilitate version control, distribution, and merging of tags,
2789 2789 they are stored as a file named ".hgtags" which is managed
2790 2790 similarly to other project files and can be hand-edited if
2791 2791 necessary. The file '.hg/localtags' is used for local tags (not
2792 2792 shared among repositories).
2793 2793
2794 2794 See 'hg help dates' for a list of formats valid for -d/--date.
2795 2795 """
2796 2796
2797 2797 rev_ = "."
2798 2798 names = (name1,) + names
2799 2799 if len(names) != len(dict.fromkeys(names)):
2800 2800 raise util.Abort(_('tag names must be unique'))
2801 2801 for n in names:
2802 2802 if n in ['tip', '.', 'null']:
2803 2803 raise util.Abort(_('the name \'%s\' is reserved') % n)
2804 2804 if opts.get('rev') and opts.get('remove'):
2805 2805 raise util.Abort(_("--rev and --remove are incompatible"))
2806 2806 if opts.get('rev'):
2807 2807 rev_ = opts['rev']
2808 2808 message = opts.get('message')
2809 2809 if opts.get('remove'):
2810 2810 expectedtype = opts.get('local') and 'local' or 'global'
2811 2811 for n in names:
2812 2812 if not repo.tagtype(n):
2813 2813 raise util.Abort(_('tag \'%s\' does not exist') % n)
2814 2814 if repo.tagtype(n) != expectedtype:
2815 2815 raise util.Abort(_('tag \'%s\' is not a %s tag') %
2816 2816 (n, expectedtype))
2817 2817 rev_ = nullid
2818 2818 if not message:
2819 2819 message = _('Removed tag %s') % ', '.join(names)
2820 2820 elif not opts.get('force'):
2821 2821 for n in names:
2822 2822 if n in repo.tags():
2823 2823 raise util.Abort(_('tag \'%s\' already exists '
2824 2824 '(use -f to force)') % n)
2825 2825 if not rev_ and repo.dirstate.parents()[1] != nullid:
2826 2826 raise util.Abort(_('uncommitted merge - please provide a '
2827 2827 'specific revision'))
2828 2828 r = repo[rev_].node()
2829 2829
2830 2830 if not message:
2831 2831 message = (_('Added tag %s for changeset %s') %
2832 2832 (', '.join(names), short(r)))
2833 2833
2834 2834 date = opts.get('date')
2835 2835 if date:
2836 2836 date = util.parsedate(date)
2837 2837
2838 2838 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2839 2839
2840 2840 def tags(ui, repo):
2841 2841 """list repository tags
2842 2842
2843 2843 This lists both regular and local tags. When the -v/--verbose switch
2844 2844 is used, a third column "local" is printed for local tags.
2845 2845 """
2846 2846
2847 2847 l = repo.tagslist()
2848 2848 l.reverse()
2849 2849 hexfunc = ui.debugflag and hex or short
2850 2850 tagtype = ""
2851 2851
2852 2852 for t, n in l:
2853 2853 if ui.quiet:
2854 2854 ui.write("%s\n" % t)
2855 2855 continue
2856 2856
2857 2857 try:
2858 2858 hn = hexfunc(n)
2859 2859 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2860 except revlog.LookupError:
2860 except error.LookupError:
2861 2861 r = " ?:%s" % hn
2862 2862 else:
2863 2863 spaces = " " * (30 - util.locallen(t))
2864 2864 if ui.verbose:
2865 2865 if repo.tagtype(t) == 'local':
2866 2866 tagtype = " local"
2867 2867 else:
2868 2868 tagtype = ""
2869 2869 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2870 2870
2871 2871 def tip(ui, repo, **opts):
2872 2872 """show the tip revision
2873 2873
2874 2874 The tip revision (usually just called the tip) is the most
2875 2875 recently added changeset in the repository, the most recently
2876 2876 changed head.
2877 2877
2878 2878 If you have just made a commit, that commit will be the tip. If
2879 2879 you have just pulled changes from another repository, the tip of
2880 2880 that repository becomes the current tip. The "tip" tag is special
2881 2881 and cannot be renamed or assigned to a different changeset.
2882 2882 """
2883 2883 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2884 2884
2885 2885 def unbundle(ui, repo, fname1, *fnames, **opts):
2886 2886 """apply one or more changegroup files
2887 2887
2888 2888 Apply one or more compressed changegroup files generated by the
2889 2889 bundle command.
2890 2890 """
2891 2891 fnames = (fname1,) + fnames
2892 2892
2893 2893 lock = None
2894 2894 try:
2895 2895 lock = repo.lock()
2896 2896 for fname in fnames:
2897 2897 f = url.open(ui, fname)
2898 2898 gen = changegroup.readbundle(f, fname)
2899 2899 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2900 2900 finally:
2901 2901 del lock
2902 2902
2903 2903 return postincoming(ui, repo, modheads, opts.get('update'), None)
2904 2904
2905 2905 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2906 2906 """update working directory
2907 2907
2908 2908 Update the repository's working directory to the specified revision,
2909 2909 or the tip of the current branch if none is specified. Use null as
2910 2910 the revision to remove the working copy (like 'hg clone -U').
2911 2911
2912 2912 When the working dir contains no uncommitted changes, it will be
2913 2913 replaced by the state of the requested revision from the repo. When
2914 2914 the requested revision is on a different branch, the working dir
2915 2915 will additionally be switched to that branch.
2916 2916
2917 2917 When there are uncommitted changes, use option -C to discard them,
2918 2918 forcibly replacing the state of the working dir with the requested
2919 2919 revision.
2920 2920
2921 2921 When there are uncommitted changes and option -C is not used, and
2922 2922 the parent revision and requested revision are on the same branch,
2923 2923 and one of them is an ancestor of the other, then the new working
2924 2924 directory will contain the requested revision merged with the
2925 2925 uncommitted changes. Otherwise, the update will fail with a
2926 2926 suggestion to use 'merge' or 'update -C' instead.
2927 2927
2928 2928 If you want to update just one file to an older revision, use revert.
2929 2929
2930 2930 See 'hg help dates' for a list of formats valid for --date.
2931 2931 """
2932 2932 if rev and node:
2933 2933 raise util.Abort(_("please specify just one revision"))
2934 2934
2935 2935 if not rev:
2936 2936 rev = node
2937 2937
2938 2938 if date:
2939 2939 if rev:
2940 2940 raise util.Abort(_("you can't specify a revision and a date"))
2941 2941 rev = cmdutil.finddate(ui, repo, date)
2942 2942
2943 2943 if clean:
2944 2944 return hg.clean(repo, rev)
2945 2945 else:
2946 2946 return hg.update(repo, rev)
2947 2947
2948 2948 def verify(ui, repo):
2949 2949 """verify the integrity of the repository
2950 2950
2951 2951 Verify the integrity of the current repository.
2952 2952
2953 2953 This will perform an extensive check of the repository's
2954 2954 integrity, validating the hashes and checksums of each entry in
2955 2955 the changelog, manifest, and tracked files, as well as the
2956 2956 integrity of their crosslinks and indices.
2957 2957 """
2958 2958 return hg.verify(repo)
2959 2959
2960 2960 def version_(ui):
2961 2961 """output version and copyright information"""
2962 2962 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2963 2963 % util.version())
2964 2964 ui.status(_(
2965 2965 "\nCopyright (C) 2005-2008 Matt Mackall <mpm@selenic.com> and others\n"
2966 2966 "This is free software; see the source for copying conditions. "
2967 2967 "There is NO\nwarranty; "
2968 2968 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
2969 2969 ))
2970 2970
2971 2971 # Command options and aliases are listed here, alphabetically
2972 2972
2973 2973 globalopts = [
2974 2974 ('R', 'repository', '',
2975 2975 _('repository root directory or symbolic path name')),
2976 2976 ('', 'cwd', '', _('change working directory')),
2977 2977 ('y', 'noninteractive', None,
2978 2978 _('do not prompt, assume \'yes\' for any required answers')),
2979 2979 ('q', 'quiet', None, _('suppress output')),
2980 2980 ('v', 'verbose', None, _('enable additional output')),
2981 2981 ('', 'config', [], _('set/override config option')),
2982 2982 ('', 'debug', None, _('enable debugging output')),
2983 2983 ('', 'debugger', None, _('start debugger')),
2984 2984 ('', 'encoding', util._encoding, _('set the charset encoding')),
2985 2985 ('', 'encodingmode', util._encodingmode, _('set the charset encoding mode')),
2986 2986 ('', 'lsprof', None, _('print improved command execution profile')),
2987 2987 ('', 'traceback', None, _('print traceback on exception')),
2988 2988 ('', 'time', None, _('time how long the command takes')),
2989 2989 ('', 'profile', None, _('print command execution profile')),
2990 2990 ('', 'version', None, _('output version information and exit')),
2991 2991 ('h', 'help', None, _('display help and exit')),
2992 2992 ]
2993 2993
2994 2994 dryrunopts = [('n', 'dry-run', None,
2995 2995 _('do not perform actions, just print output'))]
2996 2996
2997 2997 remoteopts = [
2998 2998 ('e', 'ssh', '', _('specify ssh command to use')),
2999 2999 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
3000 3000 ]
3001 3001
3002 3002 walkopts = [
3003 3003 ('I', 'include', [], _('include names matching the given patterns')),
3004 3004 ('X', 'exclude', [], _('exclude names matching the given patterns')),
3005 3005 ]
3006 3006
3007 3007 commitopts = [
3008 3008 ('m', 'message', '', _('use <text> as commit message')),
3009 3009 ('l', 'logfile', '', _('read commit message from <file>')),
3010 3010 ]
3011 3011
3012 3012 commitopts2 = [
3013 3013 ('d', 'date', '', _('record datecode as commit date')),
3014 3014 ('u', 'user', '', _('record user as committer')),
3015 3015 ]
3016 3016
3017 3017 templateopts = [
3018 3018 ('', 'style', '', _('display using template map file')),
3019 3019 ('', 'template', '', _('display with template')),
3020 3020 ]
3021 3021
3022 3022 logopts = [
3023 3023 ('p', 'patch', None, _('show patch')),
3024 3024 ('l', 'limit', '', _('limit number of changes displayed')),
3025 3025 ('M', 'no-merges', None, _('do not show merges')),
3026 3026 ] + templateopts
3027 3027
3028 3028 diffopts = [
3029 3029 ('a', 'text', None, _('treat all files as text')),
3030 3030 ('g', 'git', None, _('use git extended diff format')),
3031 3031 ('', 'nodates', None, _("don't include dates in diff headers"))
3032 3032 ]
3033 3033
3034 3034 diffopts2 = [
3035 3035 ('p', 'show-function', None, _('show which function each change is in')),
3036 3036 ('w', 'ignore-all-space', None,
3037 3037 _('ignore white space when comparing lines')),
3038 3038 ('b', 'ignore-space-change', None,
3039 3039 _('ignore changes in the amount of white space')),
3040 3040 ('B', 'ignore-blank-lines', None,
3041 3041 _('ignore changes whose lines are all blank')),
3042 3042 ('U', 'unified', '', _('number of lines of context to show'))
3043 3043 ]
3044 3044
3045 3045 similarityopts = [
3046 3046 ('s', 'similarity', '',
3047 3047 _('guess renamed files by similarity (0<=s<=100)'))
3048 3048 ]
3049 3049
3050 3050 table = {
3051 3051 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3052 3052 "addremove":
3053 3053 (addremove, similarityopts + walkopts + dryrunopts,
3054 3054 _('[OPTION]... [FILE]...')),
3055 3055 "^annotate|blame":
3056 3056 (annotate,
3057 3057 [('r', 'rev', '', _('annotate the specified revision')),
3058 3058 ('f', 'follow', None, _('follow file copies and renames')),
3059 3059 ('a', 'text', None, _('treat all files as text')),
3060 3060 ('u', 'user', None, _('list the author (long with -v)')),
3061 3061 ('d', 'date', None, _('list the date (short with -q)')),
3062 3062 ('n', 'number', None, _('list the revision number (default)')),
3063 3063 ('c', 'changeset', None, _('list the changeset')),
3064 3064 ('l', 'line-number', None,
3065 3065 _('show line number at the first appearance'))
3066 3066 ] + walkopts,
3067 3067 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3068 3068 "archive":
3069 3069 (archive,
3070 3070 [('', 'no-decode', None, _('do not pass files through decoders')),
3071 3071 ('p', 'prefix', '', _('directory prefix for files in archive')),
3072 3072 ('r', 'rev', '', _('revision to distribute')),
3073 3073 ('t', 'type', '', _('type of distribution to create')),
3074 3074 ] + walkopts,
3075 3075 _('[OPTION]... DEST')),
3076 3076 "backout":
3077 3077 (backout,
3078 3078 [('', 'merge', None,
3079 3079 _('merge with old dirstate parent after backout')),
3080 3080 ('', 'parent', '', _('parent to choose when backing out merge')),
3081 3081 ('r', 'rev', '', _('revision to backout')),
3082 3082 ] + walkopts + commitopts + commitopts2,
3083 3083 _('[OPTION]... [-r] REV')),
3084 3084 "bisect":
3085 3085 (bisect,
3086 3086 [('r', 'reset', False, _('reset bisect state')),
3087 3087 ('g', 'good', False, _('mark changeset good')),
3088 3088 ('b', 'bad', False, _('mark changeset bad')),
3089 3089 ('s', 'skip', False, _('skip testing changeset')),
3090 3090 ('c', 'command', '', _('use command to check changeset state')),
3091 3091 ('U', 'noupdate', False, _('do not update to target'))],
3092 3092 _("[-gbsr] [-c CMD] [REV]")),
3093 3093 "branch":
3094 3094 (branch,
3095 3095 [('f', 'force', None,
3096 3096 _('set branch name even if it shadows an existing branch')),
3097 3097 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3098 3098 _('[-fC] [NAME]')),
3099 3099 "branches":
3100 3100 (branches,
3101 3101 [('a', 'active', False,
3102 3102 _('show only branches that have unmerged heads'))],
3103 3103 _('[-a]')),
3104 3104 "bundle":
3105 3105 (bundle,
3106 3106 [('f', 'force', None,
3107 3107 _('run even when remote repository is unrelated')),
3108 3108 ('r', 'rev', [],
3109 3109 _('a changeset up to which you would like to bundle')),
3110 3110 ('', 'base', [],
3111 3111 _('a base changeset to specify instead of a destination')),
3112 3112 ('a', 'all', None, _('bundle all changesets in the repository')),
3113 3113 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3114 3114 ] + remoteopts,
3115 3115 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3116 3116 "cat":
3117 3117 (cat,
3118 3118 [('o', 'output', '', _('print output to file with formatted name')),
3119 3119 ('r', 'rev', '', _('print the given revision')),
3120 3120 ('', 'decode', None, _('apply any matching decode filter')),
3121 3121 ] + walkopts,
3122 3122 _('[OPTION]... FILE...')),
3123 3123 "^clone":
3124 3124 (clone,
3125 3125 [('U', 'noupdate', None,
3126 3126 _('the clone will only contain a repository (no working copy)')),
3127 3127 ('r', 'rev', [],
3128 3128 _('a changeset you would like to have after cloning')),
3129 3129 ('', 'pull', None, _('use pull protocol to copy metadata')),
3130 3130 ('', 'uncompressed', None,
3131 3131 _('use uncompressed transfer (fast over LAN)')),
3132 3132 ] + remoteopts,
3133 3133 _('[OPTION]... SOURCE [DEST]')),
3134 3134 "^commit|ci":
3135 3135 (commit,
3136 3136 [('A', 'addremove', None,
3137 3137 _('mark new/missing files as added/removed before committing')),
3138 3138 ] + walkopts + commitopts + commitopts2,
3139 3139 _('[OPTION]... [FILE]...')),
3140 3140 "copy|cp":
3141 3141 (copy,
3142 3142 [('A', 'after', None, _('record a copy that has already occurred')),
3143 3143 ('f', 'force', None,
3144 3144 _('forcibly copy over an existing managed file')),
3145 3145 ] + walkopts + dryrunopts,
3146 3146 _('[OPTION]... [SOURCE]... DEST')),
3147 3147 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3148 3148 "debugcheckstate": (debugcheckstate, []),
3149 3149 "debugcomplete":
3150 3150 (debugcomplete,
3151 3151 [('o', 'options', None, _('show the command options'))],
3152 3152 _('[-o] CMD')),
3153 3153 "debugdate":
3154 3154 (debugdate,
3155 3155 [('e', 'extended', None, _('try extended date formats'))],
3156 3156 _('[-e] DATE [RANGE]')),
3157 3157 "debugdata": (debugdata, [], _('FILE REV')),
3158 3158 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3159 3159 "debugindex": (debugindex, [], _('FILE')),
3160 3160 "debugindexdot": (debugindexdot, [], _('FILE')),
3161 3161 "debuginstall": (debuginstall, []),
3162 3162 "debugrawcommit|rawcommit":
3163 3163 (rawcommit,
3164 3164 [('p', 'parent', [], _('parent')),
3165 3165 ('F', 'files', '', _('file list'))
3166 3166 ] + commitopts + commitopts2,
3167 3167 _('[OPTION]... [FILE]...')),
3168 3168 "debugrebuildstate":
3169 3169 (debugrebuildstate,
3170 3170 [('r', 'rev', '', _('revision to rebuild to'))],
3171 3171 _('[-r REV] [REV]')),
3172 3172 "debugrename":
3173 3173 (debugrename,
3174 3174 [('r', 'rev', '', _('revision to debug'))],
3175 3175 _('[-r REV] FILE')),
3176 3176 "debugsetparents":
3177 3177 (debugsetparents, [], _('REV1 [REV2]')),
3178 3178 "debugstate":
3179 3179 (debugstate,
3180 3180 [('', 'nodates', None, _('do not display the saved mtime'))],
3181 3181 _('[OPTION]...')),
3182 3182 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3183 3183 "^diff":
3184 3184 (diff,
3185 3185 [('r', 'rev', [], _('revision')),
3186 3186 ('c', 'change', '', _('change made by revision'))
3187 3187 ] + diffopts + diffopts2 + walkopts,
3188 3188 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3189 3189 "^export":
3190 3190 (export,
3191 3191 [('o', 'output', '', _('print output to file with formatted name')),
3192 3192 ('', 'switch-parent', None, _('diff against the second parent'))
3193 3193 ] + diffopts,
3194 3194 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3195 3195 "grep":
3196 3196 (grep,
3197 3197 [('0', 'print0', None, _('end fields with NUL')),
3198 3198 ('', 'all', None, _('print all revisions that match')),
3199 3199 ('f', 'follow', None,
3200 3200 _('follow changeset history, or file history across copies and renames')),
3201 3201 ('i', 'ignore-case', None, _('ignore case when matching')),
3202 3202 ('l', 'files-with-matches', None,
3203 3203 _('print only filenames and revs that match')),
3204 3204 ('n', 'line-number', None, _('print matching line numbers')),
3205 3205 ('r', 'rev', [], _('search in given revision range')),
3206 3206 ('u', 'user', None, _('list the author (long with -v)')),
3207 3207 ('d', 'date', None, _('list the date (short with -q)')),
3208 3208 ] + walkopts,
3209 3209 _('[OPTION]... PATTERN [FILE]...')),
3210 3210 "heads":
3211 3211 (heads,
3212 3212 [('r', 'rev', '', _('show only heads which are descendants of rev')),
3213 3213 ] + templateopts,
3214 3214 _('[-r REV] [REV]...')),
3215 3215 "help": (help_, [], _('[TOPIC]')),
3216 3216 "identify|id":
3217 3217 (identify,
3218 3218 [('r', 'rev', '', _('identify the specified rev')),
3219 3219 ('n', 'num', None, _('show local revision number')),
3220 3220 ('i', 'id', None, _('show global revision id')),
3221 3221 ('b', 'branch', None, _('show branch')),
3222 3222 ('t', 'tags', None, _('show tags'))],
3223 3223 _('[-nibt] [-r REV] [SOURCE]')),
3224 3224 "import|patch":
3225 3225 (import_,
3226 3226 [('p', 'strip', 1,
3227 3227 _('directory strip option for patch. This has the same\n'
3228 3228 'meaning as the corresponding patch option')),
3229 3229 ('b', 'base', '', _('base path')),
3230 3230 ('f', 'force', None,
3231 3231 _('skip check for outstanding uncommitted changes')),
3232 3232 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3233 3233 ('', 'exact', None,
3234 3234 _('apply patch to the nodes from which it was generated')),
3235 3235 ('', 'import-branch', None,
3236 3236 _('Use any branch information in patch (implied by --exact)'))] +
3237 3237 commitopts + commitopts2 + similarityopts,
3238 3238 _('[OPTION]... PATCH...')),
3239 3239 "incoming|in":
3240 3240 (incoming,
3241 3241 [('f', 'force', None,
3242 3242 _('run even when remote repository is unrelated')),
3243 3243 ('n', 'newest-first', None, _('show newest record first')),
3244 3244 ('', 'bundle', '', _('file to store the bundles into')),
3245 3245 ('r', 'rev', [],
3246 3246 _('a specific revision up to which you would like to pull')),
3247 3247 ] + logopts + remoteopts,
3248 3248 _('[-p] [-n] [-M] [-f] [-r REV]...'
3249 3249 ' [--bundle FILENAME] [SOURCE]')),
3250 3250 "^init":
3251 3251 (init,
3252 3252 remoteopts,
3253 3253 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3254 3254 "locate":
3255 3255 (locate,
3256 3256 [('r', 'rev', '', _('search the repository as it stood at rev')),
3257 3257 ('0', 'print0', None,
3258 3258 _('end filenames with NUL, for use with xargs')),
3259 3259 ('f', 'fullpath', None,
3260 3260 _('print complete paths from the filesystem root')),
3261 3261 ] + walkopts,
3262 3262 _('[OPTION]... [PATTERN]...')),
3263 3263 "^log|history":
3264 3264 (log,
3265 3265 [('f', 'follow', None,
3266 3266 _('follow changeset history, or file history across copies and renames')),
3267 3267 ('', 'follow-first', None,
3268 3268 _('only follow the first parent of merge changesets')),
3269 3269 ('d', 'date', '', _('show revs matching date spec')),
3270 3270 ('C', 'copies', None, _('show copied files')),
3271 3271 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3272 3272 ('r', 'rev', [], _('show the specified revision or range')),
3273 3273 ('', 'removed', None, _('include revs where files were removed')),
3274 3274 ('m', 'only-merges', None, _('show only merges')),
3275 3275 ('u', 'user', [], _('revs committed by user')),
3276 3276 ('b', 'only-branch', [],
3277 3277 _('show only changesets within the given named branch')),
3278 3278 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3279 3279 ] + logopts + walkopts,
3280 3280 _('[OPTION]... [FILE]')),
3281 3281 "manifest":
3282 3282 (manifest,
3283 3283 [('r', 'rev', '', _('revision to display'))],
3284 3284 _('[-r REV]')),
3285 3285 "^merge":
3286 3286 (merge,
3287 3287 [('f', 'force', None, _('force a merge with outstanding changes')),
3288 3288 ('r', 'rev', '', _('revision to merge')),
3289 3289 ],
3290 3290 _('[-f] [[-r] REV]')),
3291 3291 "outgoing|out":
3292 3292 (outgoing,
3293 3293 [('f', 'force', None,
3294 3294 _('run even when remote repository is unrelated')),
3295 3295 ('r', 'rev', [],
3296 3296 _('a specific revision up to which you would like to push')),
3297 3297 ('n', 'newest-first', None, _('show newest record first')),
3298 3298 ] + logopts + remoteopts,
3299 3299 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3300 3300 "^parents":
3301 3301 (parents,
3302 3302 [('r', 'rev', '', _('show parents from the specified rev')),
3303 3303 ] + templateopts,
3304 3304 _('hg parents [-r REV] [FILE]')),
3305 3305 "paths": (paths, [], _('[NAME]')),
3306 3306 "^pull":
3307 3307 (pull,
3308 3308 [('u', 'update', None,
3309 3309 _('update to new tip if changesets were pulled')),
3310 3310 ('f', 'force', None,
3311 3311 _('run even when remote repository is unrelated')),
3312 3312 ('r', 'rev', [],
3313 3313 _('a specific revision up to which you would like to pull')),
3314 3314 ] + remoteopts,
3315 3315 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3316 3316 "^push":
3317 3317 (push,
3318 3318 [('f', 'force', None, _('force push')),
3319 3319 ('r', 'rev', [],
3320 3320 _('a specific revision up to which you would like to push')),
3321 3321 ] + remoteopts,
3322 3322 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3323 3323 "recover": (recover, []),
3324 3324 "^remove|rm":
3325 3325 (remove,
3326 3326 [('A', 'after', None, _('record delete for missing files')),
3327 3327 ('f', 'force', None,
3328 3328 _('remove (and delete) file even if added or modified')),
3329 3329 ] + walkopts,
3330 3330 _('[OPTION]... FILE...')),
3331 3331 "rename|mv":
3332 3332 (rename,
3333 3333 [('A', 'after', None, _('record a rename that has already occurred')),
3334 3334 ('f', 'force', None,
3335 3335 _('forcibly copy over an existing managed file')),
3336 3336 ] + walkopts + dryrunopts,
3337 3337 _('[OPTION]... SOURCE... DEST')),
3338 3338 "resolve":
3339 3339 (resolve,
3340 3340 [('a', 'all', None, _('remerge all unresolved files')),
3341 3341 ('l', 'list', None, _('list state of files needing merge')),
3342 3342 ('m', 'mark', None, _('mark files as resolved')),
3343 3343 ('u', 'unmark', None, _('unmark files as resolved'))],
3344 3344 _('[OPTION]... [FILE]...')),
3345 3345 "revert":
3346 3346 (revert,
3347 3347 [('a', 'all', None, _('revert all changes when no arguments given')),
3348 3348 ('d', 'date', '', _('tipmost revision matching date')),
3349 3349 ('r', 'rev', '', _('revision to revert to')),
3350 3350 ('', 'no-backup', None, _('do not save backup copies of files')),
3351 3351 ] + walkopts + dryrunopts,
3352 3352 _('[OPTION]... [-r REV] [NAME]...')),
3353 3353 "rollback": (rollback, []),
3354 3354 "root": (root, []),
3355 3355 "^serve":
3356 3356 (serve,
3357 3357 [('A', 'accesslog', '', _('name of access log file to write to')),
3358 3358 ('d', 'daemon', None, _('run server in background')),
3359 3359 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3360 3360 ('E', 'errorlog', '', _('name of error log file to write to')),
3361 3361 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3362 3362 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3363 3363 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3364 3364 ('n', 'name', '',
3365 3365 _('name to show in web pages (default: working dir)')),
3366 3366 ('', 'webdir-conf', '', _('name of the webdir config file'
3367 3367 ' (serve more than one repo)')),
3368 3368 ('', 'pid-file', '', _('name of file to write process ID to')),
3369 3369 ('', 'stdio', None, _('for remote clients')),
3370 3370 ('t', 'templates', '', _('web templates to use')),
3371 3371 ('', 'style', '', _('template style to use')),
3372 3372 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3373 3373 ('', 'certificate', '', _('SSL certificate file'))],
3374 3374 _('[OPTION]...')),
3375 3375 "showconfig|debugconfig":
3376 3376 (showconfig,
3377 3377 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3378 3378 _('[-u] [NAME]...')),
3379 3379 "^status|st":
3380 3380 (status,
3381 3381 [('A', 'all', None, _('show status of all files')),
3382 3382 ('m', 'modified', None, _('show only modified files')),
3383 3383 ('a', 'added', None, _('show only added files')),
3384 3384 ('r', 'removed', None, _('show only removed files')),
3385 3385 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3386 3386 ('c', 'clean', None, _('show only files without changes')),
3387 3387 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3388 3388 ('i', 'ignored', None, _('show only ignored files')),
3389 3389 ('n', 'no-status', None, _('hide status prefix')),
3390 3390 ('C', 'copies', None, _('show source of copied files')),
3391 3391 ('0', 'print0', None,
3392 3392 _('end filenames with NUL, for use with xargs')),
3393 3393 ('', 'rev', [], _('show difference from revision')),
3394 3394 ] + walkopts,
3395 3395 _('[OPTION]... [FILE]...')),
3396 3396 "tag":
3397 3397 (tag,
3398 3398 [('f', 'force', None, _('replace existing tag')),
3399 3399 ('l', 'local', None, _('make the tag local')),
3400 3400 ('r', 'rev', '', _('revision to tag')),
3401 3401 ('', 'remove', None, _('remove a tag')),
3402 3402 # -l/--local is already there, commitopts cannot be used
3403 3403 ('m', 'message', '', _('use <text> as commit message')),
3404 3404 ] + commitopts2,
3405 3405 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3406 3406 "tags": (tags, []),
3407 3407 "tip":
3408 3408 (tip,
3409 3409 [('p', 'patch', None, _('show patch')),
3410 3410 ] + templateopts,
3411 3411 _('[-p]')),
3412 3412 "unbundle":
3413 3413 (unbundle,
3414 3414 [('u', 'update', None,
3415 3415 _('update to new tip if changesets were unbundled'))],
3416 3416 _('[-u] FILE...')),
3417 3417 "^update|up|checkout|co":
3418 3418 (update,
3419 3419 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3420 3420 ('d', 'date', '', _('tipmost revision matching date')),
3421 3421 ('r', 'rev', '', _('revision'))],
3422 3422 _('[-C] [-d DATE] [[-r] REV]')),
3423 3423 "verify": (verify, []),
3424 3424 "version": (version_, []),
3425 3425 }
3426 3426
3427 3427 norepo = ("clone init version help debugcomplete debugdata"
3428 3428 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3429 3429 optionalrepo = ("identify paths serve showconfig debugancestor")
@@ -1,806 +1,806 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 import ancestor, bdiff, revlog, util, os, errno
10 import ancestor, bdiff, error, util, os, errno
11 11
12 12 class propertycache(object):
13 13 def __init__(self, func):
14 14 self.func = func
15 15 self.name = func.__name__
16 16 def __get__(self, obj, type=None):
17 17 result = self.func(obj)
18 18 setattr(obj, self.name, result)
19 19 return result
20 20
21 21 class changectx(object):
22 22 """A changecontext object makes access to data related to a particular
23 23 changeset convenient."""
24 24 def __init__(self, repo, changeid=''):
25 25 """changeid is a revision number, node, or tag"""
26 26 if changeid == '':
27 27 changeid = '.'
28 28 self._repo = repo
29 29 if isinstance(changeid, (long, int)):
30 30 self._rev = changeid
31 31 self._node = self._repo.changelog.node(changeid)
32 32 else:
33 33 self._node = self._repo.lookup(changeid)
34 34 self._rev = self._repo.changelog.rev(self._node)
35 35
36 36 def __str__(self):
37 37 return short(self.node())
38 38
39 39 def __int__(self):
40 40 return self.rev()
41 41
42 42 def __repr__(self):
43 43 return "<changectx %s>" % str(self)
44 44
45 45 def __hash__(self):
46 46 try:
47 47 return hash(self._rev)
48 48 except AttributeError:
49 49 return id(self)
50 50
51 51 def __eq__(self, other):
52 52 try:
53 53 return self._rev == other._rev
54 54 except AttributeError:
55 55 return False
56 56
57 57 def __ne__(self, other):
58 58 return not (self == other)
59 59
60 60 def __nonzero__(self):
61 61 return self._rev != nullrev
62 62
63 63 def _changeset(self):
64 64 return self._repo.changelog.read(self.node())
65 65 _changeset = propertycache(_changeset)
66 66
67 67 def _manifest(self):
68 68 return self._repo.manifest.read(self._changeset[0])
69 69 _manifest = propertycache(_manifest)
70 70
71 71 def _manifestdelta(self):
72 72 return self._repo.manifest.readdelta(self._changeset[0])
73 73 _manifestdelta = propertycache(_manifestdelta)
74 74
75 75 def _parents(self):
76 76 p = self._repo.changelog.parentrevs(self._rev)
77 77 if p[1] == nullrev:
78 78 p = p[:-1]
79 79 return [changectx(self._repo, x) for x in p]
80 80 _parents = propertycache(_parents)
81 81
82 82 def __contains__(self, key):
83 83 return key in self._manifest
84 84
85 85 def __getitem__(self, key):
86 86 return self.filectx(key)
87 87
88 88 def __iter__(self):
89 89 for f in util.sort(self._manifest):
90 90 yield f
91 91
92 92 def changeset(self): return self._changeset
93 93 def manifest(self): return self._manifest
94 94
95 95 def rev(self): return self._rev
96 96 def node(self): return self._node
97 97 def hex(self): return hex(self._node)
98 98 def user(self): return self._changeset[1]
99 99 def date(self): return self._changeset[2]
100 100 def files(self): return self._changeset[3]
101 101 def description(self): return self._changeset[4]
102 102 def branch(self): return self._changeset[5].get("branch")
103 103 def extra(self): return self._changeset[5]
104 104 def tags(self): return self._repo.nodetags(self._node)
105 105
106 106 def parents(self):
107 107 """return contexts for each parent changeset"""
108 108 return self._parents
109 109
110 110 def children(self):
111 111 """return contexts for each child changeset"""
112 112 c = self._repo.changelog.children(self._node)
113 113 return [changectx(self._repo, x) for x in c]
114 114
115 115 def ancestors(self):
116 116 for a in self._repo.changelog.ancestors(self._rev):
117 117 yield changectx(self._repo, a)
118 118
119 119 def descendants(self):
120 120 for d in self._repo.changelog.descendants(self._rev):
121 121 yield changectx(self._repo, d)
122 122
123 123 def _fileinfo(self, path):
124 124 if '_manifest' in self.__dict__:
125 125 try:
126 126 return self._manifest[path], self._manifest.flags(path)
127 127 except KeyError:
128 raise revlog.LookupError(self._node, path,
129 _('not found in manifest'))
128 raise error.LookupError(self._node, path,
129 _('not found in manifest'))
130 130 if '_manifestdelta' in self.__dict__ or path in self.files():
131 131 if path in self._manifestdelta:
132 132 return self._manifestdelta[path], self._manifestdelta.flags(path)
133 133 node, flag = self._repo.manifest.find(self._changeset[0], path)
134 134 if not node:
135 raise revlog.LookupError(self._node, path,
136 _('not found in manifest'))
135 raise error.LookupError(self._node, path,
136 _('not found in manifest'))
137 137
138 138 return node, flag
139 139
140 140 def filenode(self, path):
141 141 return self._fileinfo(path)[0]
142 142
143 143 def flags(self, path):
144 144 try:
145 145 return self._fileinfo(path)[1]
146 except revlog.LookupError:
146 except error.LookupError:
147 147 return ''
148 148
149 149 def filectx(self, path, fileid=None, filelog=None):
150 150 """get a file context from this changeset"""
151 151 if fileid is None:
152 152 fileid = self.filenode(path)
153 153 return filectx(self._repo, path, fileid=fileid,
154 154 changectx=self, filelog=filelog)
155 155
156 156 def ancestor(self, c2):
157 157 """
158 158 return the ancestor context of self and c2
159 159 """
160 160 n = self._repo.changelog.ancestor(self._node, c2._node)
161 161 return changectx(self._repo, n)
162 162
163 163 def walk(self, match):
164 164 fdict = dict.fromkeys(match.files())
165 165 # for dirstate.walk, files=['.'] means "walk the whole tree".
166 166 # follow that here, too
167 167 fdict.pop('.', None)
168 168 for fn in self:
169 169 for ffn in fdict:
170 170 # match if the file is the exact name or a directory
171 171 if ffn == fn or fn.startswith("%s/" % ffn):
172 172 del fdict[ffn]
173 173 break
174 174 if match(fn):
175 175 yield fn
176 176 for fn in util.sort(fdict):
177 177 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
178 178 yield fn
179 179
180 180 class filectx(object):
181 181 """A filecontext object makes access to data related to a particular
182 182 filerevision convenient."""
183 183 def __init__(self, repo, path, changeid=None, fileid=None,
184 184 filelog=None, changectx=None):
185 185 """changeid can be a changeset revision, node, or tag.
186 186 fileid can be a file revision or node."""
187 187 self._repo = repo
188 188 self._path = path
189 189
190 190 assert (changeid is not None
191 191 or fileid is not None
192 192 or changectx is not None)
193 193
194 194 if filelog:
195 195 self._filelog = filelog
196 196
197 197 if changeid is not None:
198 198 self._changeid = changeid
199 199 if changectx is not None:
200 200 self._changectx = changectx
201 201 if fileid is not None:
202 202 self._fileid = fileid
203 203
204 204 def _changectx(self):
205 205 return changectx(self._repo, self._changeid)
206 206 _changectx = propertycache(_changectx)
207 207
208 208 def _filelog(self):
209 209 return self._repo.file(self._path)
210 210 _filelog = propertycache(_filelog)
211 211
212 212 def _changeid(self):
213 213 if '_changectx' in self.__dict__:
214 214 return self._changectx.rev()
215 215 else:
216 216 return self._filelog.linkrev(self._filerev)
217 217 _changeid = propertycache(_changeid)
218 218
219 219 def _filenode(self):
220 220 if '_fileid' in self.__dict__:
221 221 return self._filelog.lookup(self._fileid)
222 222 else:
223 223 return self._changectx.filenode(self._path)
224 224 _filenode = propertycache(_filenode)
225 225
226 226 def _filerev(self):
227 227 return self._filelog.rev(self._filenode)
228 228 _filerev = propertycache(_filerev)
229 229
230 230 def _repopath(self):
231 231 return self._path
232 232 _repopath = propertycache(_repopath)
233 233
234 234 def __nonzero__(self):
235 235 try:
236 236 n = self._filenode
237 237 return True
238 except revlog.LookupError:
238 except error.LookupError:
239 239 # file is missing
240 240 return False
241 241
242 242 def __str__(self):
243 243 return "%s@%s" % (self.path(), short(self.node()))
244 244
245 245 def __repr__(self):
246 246 return "<filectx %s>" % str(self)
247 247
248 248 def __hash__(self):
249 249 try:
250 250 return hash((self._path, self._fileid))
251 251 except AttributeError:
252 252 return id(self)
253 253
254 254 def __eq__(self, other):
255 255 try:
256 256 return (self._path == other._path
257 257 and self._fileid == other._fileid)
258 258 except AttributeError:
259 259 return False
260 260
261 261 def __ne__(self, other):
262 262 return not (self == other)
263 263
264 264 def filectx(self, fileid):
265 265 '''opens an arbitrary revision of the file without
266 266 opening a new filelog'''
267 267 return filectx(self._repo, self._path, fileid=fileid,
268 268 filelog=self._filelog)
269 269
270 270 def filerev(self): return self._filerev
271 271 def filenode(self): return self._filenode
272 272 def flags(self): return self._changectx.flags(self._path)
273 273 def filelog(self): return self._filelog
274 274
275 275 def rev(self):
276 276 if '_changectx' in self.__dict__:
277 277 return self._changectx.rev()
278 278 if '_changeid' in self.__dict__:
279 279 return self._changectx.rev()
280 280 return self._filelog.linkrev(self._filerev)
281 281
282 282 def linkrev(self): return self._filelog.linkrev(self._filerev)
283 283 def node(self): return self._changectx.node()
284 284 def user(self): return self._changectx.user()
285 285 def date(self): return self._changectx.date()
286 286 def files(self): return self._changectx.files()
287 287 def description(self): return self._changectx.description()
288 288 def branch(self): return self._changectx.branch()
289 289 def manifest(self): return self._changectx.manifest()
290 290 def changectx(self): return self._changectx
291 291
292 292 def data(self): return self._filelog.read(self._filenode)
293 293 def path(self): return self._path
294 294 def size(self): return self._filelog.size(self._filerev)
295 295
296 296 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
297 297
298 298 def renamed(self):
299 299 """check if file was actually renamed in this changeset revision
300 300
301 301 If rename logged in file revision, we report copy for changeset only
302 302 if file revisions linkrev points back to the changeset in question
303 303 or both changeset parents contain different file revisions.
304 304 """
305 305
306 306 renamed = self._filelog.renamed(self._filenode)
307 307 if not renamed:
308 308 return renamed
309 309
310 310 if self.rev() == self.linkrev():
311 311 return renamed
312 312
313 313 name = self.path()
314 314 fnode = self._filenode
315 315 for p in self._changectx.parents():
316 316 try:
317 317 if fnode == p.filenode(name):
318 318 return None
319 except revlog.LookupError:
319 except error.LookupError:
320 320 pass
321 321 return renamed
322 322
323 323 def parents(self):
324 324 p = self._path
325 325 fl = self._filelog
326 326 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
327 327
328 328 r = self._filelog.renamed(self._filenode)
329 329 if r:
330 330 pl[0] = (r[0], r[1], None)
331 331
332 332 return [filectx(self._repo, p, fileid=n, filelog=l)
333 333 for p,n,l in pl if n != nullid]
334 334
335 335 def children(self):
336 336 # hard for renames
337 337 c = self._filelog.children(self._filenode)
338 338 return [filectx(self._repo, self._path, fileid=x,
339 339 filelog=self._filelog) for x in c]
340 340
341 341 def annotate(self, follow=False, linenumber=None):
342 342 '''returns a list of tuples of (ctx, line) for each line
343 343 in the file, where ctx is the filectx of the node where
344 344 that line was last changed.
345 345 This returns tuples of ((ctx, linenumber), line) for each line,
346 346 if "linenumber" parameter is NOT "None".
347 347 In such tuples, linenumber means one at the first appearance
348 348 in the managed file.
349 349 To reduce annotation cost,
350 350 this returns fixed value(False is used) as linenumber,
351 351 if "linenumber" parameter is "False".'''
352 352
353 353 def decorate_compat(text, rev):
354 354 return ([rev] * len(text.splitlines()), text)
355 355
356 356 def without_linenumber(text, rev):
357 357 return ([(rev, False)] * len(text.splitlines()), text)
358 358
359 359 def with_linenumber(text, rev):
360 360 size = len(text.splitlines())
361 361 return ([(rev, i) for i in xrange(1, size + 1)], text)
362 362
363 363 decorate = (((linenumber is None) and decorate_compat) or
364 364 (linenumber and with_linenumber) or
365 365 without_linenumber)
366 366
367 367 def pair(parent, child):
368 368 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
369 369 child[0][b1:b2] = parent[0][a1:a2]
370 370 return child
371 371
372 372 getlog = util.cachefunc(lambda x: self._repo.file(x))
373 373 def getctx(path, fileid):
374 374 log = path == self._path and self._filelog or getlog(path)
375 375 return filectx(self._repo, path, fileid=fileid, filelog=log)
376 376 getctx = util.cachefunc(getctx)
377 377
378 378 def parents(f):
379 379 # we want to reuse filectx objects as much as possible
380 380 p = f._path
381 381 if f._filerev is None: # working dir
382 382 pl = [(n.path(), n.filerev()) for n in f.parents()]
383 383 else:
384 384 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
385 385
386 386 if follow:
387 387 r = f.renamed()
388 388 if r:
389 389 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
390 390
391 391 return [getctx(p, n) for p, n in pl if n != nullrev]
392 392
393 393 # use linkrev to find the first changeset where self appeared
394 394 if self.rev() != self.linkrev():
395 395 base = self.filectx(self.filerev())
396 396 else:
397 397 base = self
398 398
399 399 # find all ancestors
400 400 needed = {base: 1}
401 401 visit = [base]
402 402 files = [base._path]
403 403 while visit:
404 404 f = visit.pop(0)
405 405 for p in parents(f):
406 406 if p not in needed:
407 407 needed[p] = 1
408 408 visit.append(p)
409 409 if p._path not in files:
410 410 files.append(p._path)
411 411 else:
412 412 # count how many times we'll use this
413 413 needed[p] += 1
414 414
415 415 # sort by revision (per file) which is a topological order
416 416 visit = []
417 417 for f in files:
418 418 fn = [(n.rev(), n) for n in needed if n._path == f]
419 419 visit.extend(fn)
420 420
421 421 hist = {}
422 422 for r, f in util.sort(visit):
423 423 curr = decorate(f.data(), f)
424 424 for p in parents(f):
425 425 if p != nullid:
426 426 curr = pair(hist[p], curr)
427 427 # trim the history of unneeded revs
428 428 needed[p] -= 1
429 429 if not needed[p]:
430 430 del hist[p]
431 431 hist[f] = curr
432 432
433 433 return zip(hist[f][0], hist[f][1].splitlines(1))
434 434
435 435 def ancestor(self, fc2):
436 436 """
437 437 find the common ancestor file context, if any, of self, and fc2
438 438 """
439 439
440 440 acache = {}
441 441
442 442 # prime the ancestor cache for the working directory
443 443 for c in (self, fc2):
444 444 if c._filerev == None:
445 445 pl = [(n.path(), n.filenode()) for n in c.parents()]
446 446 acache[(c._path, None)] = pl
447 447
448 448 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
449 449 def parents(vertex):
450 450 if vertex in acache:
451 451 return acache[vertex]
452 452 f, n = vertex
453 453 if f not in flcache:
454 454 flcache[f] = self._repo.file(f)
455 455 fl = flcache[f]
456 456 pl = [(f, p) for p in fl.parents(n) if p != nullid]
457 457 re = fl.renamed(n)
458 458 if re:
459 459 pl.append(re)
460 460 acache[vertex] = pl
461 461 return pl
462 462
463 463 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
464 464 v = ancestor.ancestor(a, b, parents)
465 465 if v:
466 466 f, n = v
467 467 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
468 468
469 469 return None
470 470
471 471 class workingctx(changectx):
472 472 """A workingctx object makes access to data related to
473 473 the current working directory convenient.
474 474 parents - a pair of parent nodeids, or None to use the dirstate.
475 475 date - any valid date string or (unixtime, offset), or None.
476 476 user - username string, or None.
477 477 extra - a dictionary of extra values, or None.
478 478 changes - a list of file lists as returned by localrepo.status()
479 479 or None to use the repository status.
480 480 """
481 481 def __init__(self, repo, parents=None, text="", user=None, date=None,
482 482 extra=None, changes=None):
483 483 self._repo = repo
484 484 self._rev = None
485 485 self._node = None
486 486 self._text = text
487 487 if date:
488 488 self._date = util.parsedate(date)
489 489 if user:
490 490 self._user = user
491 491 if parents:
492 492 self._parents = [changectx(self._repo, p) for p in parents]
493 493 if changes:
494 494 self._status = list(changes)
495 495
496 496 self._extra = {}
497 497 if extra:
498 498 self._extra = extra.copy()
499 499 if 'branch' not in self._extra:
500 500 branch = self._repo.dirstate.branch()
501 501 try:
502 502 branch = branch.decode('UTF-8').encode('UTF-8')
503 503 except UnicodeDecodeError:
504 504 raise util.Abort(_('branch name not in UTF-8!'))
505 505 self._extra['branch'] = branch
506 506 if self._extra['branch'] == '':
507 507 self._extra['branch'] = 'default'
508 508
509 509 def __str__(self):
510 510 return str(self._parents[0]) + "+"
511 511
512 512 def __nonzero__(self):
513 513 return True
514 514
515 515 def __contains__(self, key):
516 516 return self._dirstate[key] not in "?r"
517 517
518 518 def _manifest(self):
519 519 """generate a manifest corresponding to the working directory"""
520 520
521 521 man = self._parents[0].manifest().copy()
522 522 copied = self._repo.dirstate.copies()
523 523 cf = lambda x: man.flags(copied.get(x, x))
524 524 ff = self._repo.dirstate.flagfunc(cf)
525 525 modified, added, removed, deleted, unknown = self._status[:5]
526 526 for i, l in (("a", added), ("m", modified), ("u", unknown)):
527 527 for f in l:
528 528 man[f] = man.get(copied.get(f, f), nullid) + i
529 529 try:
530 530 man.set(f, ff(f))
531 531 except OSError:
532 532 pass
533 533
534 534 for f in deleted + removed:
535 535 if f in man:
536 536 del man[f]
537 537
538 538 return man
539 539 _manifest = propertycache(_manifest)
540 540
541 541 def _status(self):
542 542 return self._repo.status(unknown=True)
543 543 _status = propertycache(_status)
544 544
545 545 def _user(self):
546 546 return self._repo.ui.username()
547 547 _user = propertycache(_user)
548 548
549 549 def _date(self):
550 550 return util.makedate()
551 551 _date = propertycache(_date)
552 552
553 553 def _parents(self):
554 554 p = self._repo.dirstate.parents()
555 555 if p[1] == nullid:
556 556 p = p[:-1]
557 557 self._parents = [changectx(self._repo, x) for x in p]
558 558 return self._parents
559 559 _parents = propertycache(_parents)
560 560
561 561 def manifest(self): return self._manifest
562 562
563 563 def user(self): return self._user or self._repo.ui.username()
564 564 def date(self): return self._date
565 565 def description(self): return self._text
566 566 def files(self):
567 567 return util.sort(self._status[0] + self._status[1] + self._status[2])
568 568
569 569 def modified(self): return self._status[0]
570 570 def added(self): return self._status[1]
571 571 def removed(self): return self._status[2]
572 572 def deleted(self): return self._status[3]
573 573 def unknown(self): return self._status[4]
574 574 def clean(self): return self._status[5]
575 575 def branch(self): return self._extra['branch']
576 576 def extra(self): return self._extra
577 577
578 578 def tags(self):
579 579 t = []
580 580 [t.extend(p.tags()) for p in self.parents()]
581 581 return t
582 582
583 583 def children(self):
584 584 return []
585 585
586 586 def flags(self, path):
587 587 if '_manifest' in self.__dict__:
588 588 try:
589 589 return self._manifest.flags(path)
590 590 except KeyError:
591 591 return ''
592 592
593 593 pnode = self._parents[0].changeset()[0]
594 594 orig = self._repo.dirstate.copies().get(path, path)
595 595 node, flag = self._repo.manifest.find(pnode, orig)
596 596 try:
597 597 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
598 598 return ff(path)
599 599 except OSError:
600 600 pass
601 601
602 602 if not node or path in self.deleted() or path in self.removed():
603 603 return ''
604 604 return flag
605 605
606 606 def filectx(self, path, filelog=None):
607 607 """get a file context from the working directory"""
608 608 return workingfilectx(self._repo, path, workingctx=self,
609 609 filelog=filelog)
610 610
611 611 def ancestor(self, c2):
612 612 """return the ancestor context of self and c2"""
613 613 return self._parents[0].ancestor(c2) # punt on two parents for now
614 614
615 615 def walk(self, match):
616 616 return util.sort(self._repo.dirstate.walk(match, True, False).keys())
617 617
618 618 class workingfilectx(filectx):
619 619 """A workingfilectx object makes access to data related to a particular
620 620 file in the working directory convenient."""
621 621 def __init__(self, repo, path, filelog=None, workingctx=None):
622 622 """changeid can be a changeset revision, node, or tag.
623 623 fileid can be a file revision or node."""
624 624 self._repo = repo
625 625 self._path = path
626 626 self._changeid = None
627 627 self._filerev = self._filenode = None
628 628
629 629 if filelog:
630 630 self._filelog = filelog
631 631 if workingctx:
632 632 self._changectx = workingctx
633 633
634 634 def _changectx(self):
635 635 return workingctx(self._repo)
636 636 _changectx = propertycache(_changectx)
637 637
638 638 def _repopath(self):
639 639 return self._repo.dirstate.copied(self._path) or self._path
640 640 _repopath = propertycache(_repopath)
641 641
642 642 def _filelog(self):
643 643 return self._repo.file(self._repopath)
644 644 _filelog = propertycache(_filelog)
645 645
646 646 def __nonzero__(self):
647 647 return True
648 648
649 649 def __str__(self):
650 650 return "%s@%s" % (self.path(), self._changectx)
651 651
652 652 def filectx(self, fileid):
653 653 '''opens an arbitrary revision of the file without
654 654 opening a new filelog'''
655 655 return filectx(self._repo, self._repopath, fileid=fileid,
656 656 filelog=self._filelog)
657 657
658 658 def rev(self):
659 659 if '_changectx' in self.__dict__:
660 660 return self._changectx.rev()
661 661 return self._filelog.linkrev(self._filerev)
662 662
663 663 def data(self): return self._repo.wread(self._path)
664 664 def renamed(self):
665 665 rp = self._repopath
666 666 if rp == self._path:
667 667 return None
668 668 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
669 669
670 670 def parents(self):
671 671 '''return parent filectxs, following copies if necessary'''
672 672 p = self._path
673 673 rp = self._repopath
674 674 pcl = self._changectx._parents
675 675 fl = self._filelog
676 676 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
677 677 if len(pcl) > 1:
678 678 if rp != p:
679 679 fl = None
680 680 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
681 681
682 682 return [filectx(self._repo, p, fileid=n, filelog=l)
683 683 for p,n,l in pl if n != nullid]
684 684
685 685 def children(self):
686 686 return []
687 687
688 688 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
689 689 def date(self):
690 690 t, tz = self._changectx.date()
691 691 try:
692 692 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
693 693 except OSError, err:
694 694 if err.errno != errno.ENOENT: raise
695 695 return (t, tz)
696 696
697 697 def cmp(self, text): return self._repo.wread(self._path) == text
698 698
699 699 class memctx(object):
700 700 """Use memctx to perform in-memory commits via localrepo.commitctx().
701 701
702 702 Revision information is supplied at initialization time while
703 703 related files data and is made available through a callback
704 704 mechanism. 'repo' is the current localrepo, 'parents' is a
705 705 sequence of two parent revisions identifiers (pass None for every
706 706 missing parent), 'text' is the commit message and 'files' lists
707 707 names of files touched by the revision (normalized and relative to
708 708 repository root).
709 709
710 710 filectxfn(repo, memctx, path) is a callable receiving the
711 711 repository, the current memctx object and the normalized path of
712 712 requested file, relative to repository root. It is fired by the
713 713 commit function for every file in 'files', but calls order is
714 714 undefined. If the file is available in the revision being
715 715 committed (updated or added), filectxfn returns a memfilectx
716 716 object. If the file was removed, filectxfn raises an
717 717 IOError. Moved files are represented by marking the source file
718 718 removed and the new file added with copy information (see
719 719 memfilectx).
720 720
721 721 user receives the committer name and defaults to current
722 722 repository username, date is the commit date in any format
723 723 supported by util.parsedate() and defaults to current date, extra
724 724 is a dictionary of metadata or is left empty.
725 725 """
726 726 def __init__(self, repo, parents, text, files, filectxfn, user=None,
727 727 date=None, extra=None):
728 728 self._repo = repo
729 729 self._rev = None
730 730 self._node = None
731 731 self._text = text
732 732 self._date = date and util.parsedate(date) or util.makedate()
733 733 self._user = user
734 734 parents = [(p or nullid) for p in parents]
735 735 p1, p2 = parents
736 736 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
737 737 files = util.sort(util.unique(files))
738 738 self._status = [files, [], [], [], []]
739 739 self._filectxfn = filectxfn
740 740
741 741 self._extra = extra and extra.copy() or {}
742 742 if 'branch' not in self._extra:
743 743 self._extra['branch'] = 'default'
744 744 elif self._extra.get('branch') == '':
745 745 self._extra['branch'] = 'default'
746 746
747 747 def __str__(self):
748 748 return str(self._parents[0]) + "+"
749 749
750 750 def __int__(self):
751 751 return self._rev
752 752
753 753 def __nonzero__(self):
754 754 return True
755 755
756 756 def user(self): return self._user or self._repo.ui.username()
757 757 def date(self): return self._date
758 758 def description(self): return self._text
759 759 def files(self): return self.modified()
760 760 def modified(self): return self._status[0]
761 761 def added(self): return self._status[1]
762 762 def removed(self): return self._status[2]
763 763 def deleted(self): return self._status[3]
764 764 def unknown(self): return self._status[4]
765 765 def clean(self): return self._status[5]
766 766 def branch(self): return self._extra['branch']
767 767 def extra(self): return self._extra
768 768 def flags(self, f): return self[f].flags()
769 769
770 770 def parents(self):
771 771 """return contexts for each parent changeset"""
772 772 return self._parents
773 773
774 774 def filectx(self, path, filelog=None):
775 775 """get a file context from the working directory"""
776 776 return self._filectxfn(self._repo, self, path)
777 777
778 778 class memfilectx(object):
779 779 """memfilectx represents an in-memory file to commit.
780 780
781 781 See memctx for more details.
782 782 """
783 783 def __init__(self, path, data, islink, isexec, copied):
784 784 """
785 785 path is the normalized file path relative to repository root.
786 786 data is the file content as a string.
787 787 islink is True if the file is a symbolic link.
788 788 isexec is True if the file is executable.
789 789 copied is the source file path if current file was copied in the
790 790 revision being committed, or None."""
791 791 self._path = path
792 792 self._data = data
793 793 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
794 794 self._copied = None
795 795 if copied:
796 796 self._copied = (copied, nullid)
797 797
798 798 def __nonzero__(self): return True
799 799 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
800 800 def path(self): return self._path
801 801 def data(self): return self._data
802 802 def flags(self): return self._flags
803 803 def isexec(self): return 'x' in self._flags
804 804 def islink(self): return 'l' in self._flags
805 805 def renamed(self): return self._copied
806 806
@@ -1,416 +1,416 b''
1 1 # dispatch.py - command dispatching for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 from repo import RepoError
10 10 import os, sys, atexit, signal, pdb, socket, errno, shlex, time
11 import util, commands, hg, lock, fancyopts, revlog, extensions, hook
11 import util, commands, hg, lock, fancyopts, extensions, hook, error
12 12 import cmdutil
13 13 import ui as _ui
14 14
15 15 class ParseError(Exception):
16 16 """Exception raised on errors in parsing the command line."""
17 17
18 18 def run():
19 19 "run the command in sys.argv"
20 20 sys.exit(dispatch(sys.argv[1:]))
21 21
22 22 def dispatch(args):
23 23 "run the command specified in args"
24 24 try:
25 25 u = _ui.ui(traceback='--traceback' in args)
26 26 except util.Abort, inst:
27 27 sys.stderr.write(_("abort: %s\n") % inst)
28 28 return -1
29 29 return _runcatch(u, args)
30 30
31 31 def _runcatch(ui, args):
32 32 def catchterm(*args):
33 33 raise util.SignalInterrupt
34 34
35 35 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
36 36 num = getattr(signal, name, None)
37 37 if num: signal.signal(num, catchterm)
38 38
39 39 try:
40 40 try:
41 41 # enter the debugger before command execution
42 42 if '--debugger' in args:
43 43 pdb.set_trace()
44 44 try:
45 45 return _dispatch(ui, args)
46 46 finally:
47 47 ui.flush()
48 48 except:
49 49 # enter the debugger when we hit an exception
50 50 if '--debugger' in args:
51 51 pdb.post_mortem(sys.exc_info()[2])
52 52 ui.print_exc()
53 53 raise
54 54
55 55 except ParseError, inst:
56 56 if inst.args[0]:
57 57 ui.warn(_("hg %s: %s\n") % (inst.args[0], inst.args[1]))
58 58 commands.help_(ui, inst.args[0])
59 59 else:
60 60 ui.warn(_("hg: %s\n") % inst.args[1])
61 61 commands.help_(ui, 'shortlist')
62 62 except cmdutil.AmbiguousCommand, inst:
63 63 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
64 64 (inst.args[0], " ".join(inst.args[1])))
65 65 except cmdutil.UnknownCommand, inst:
66 66 ui.warn(_("hg: unknown command '%s'\n") % inst.args[0])
67 67 commands.help_(ui, 'shortlist')
68 68 except RepoError, inst:
69 69 ui.warn(_("abort: %s!\n") % inst)
70 70 except lock.LockHeld, inst:
71 71 if inst.errno == errno.ETIMEDOUT:
72 72 reason = _('timed out waiting for lock held by %s') % inst.locker
73 73 else:
74 74 reason = _('lock held by %s') % inst.locker
75 75 ui.warn(_("abort: %s: %s\n") % (inst.desc or inst.filename, reason))
76 76 except lock.LockUnavailable, inst:
77 77 ui.warn(_("abort: could not lock %s: %s\n") %
78 78 (inst.desc or inst.filename, inst.strerror))
79 except revlog.RevlogError, inst:
79 except error.RevlogError, inst:
80 80 ui.warn(_("abort: %s!\n") % inst)
81 81 except util.SignalInterrupt:
82 82 ui.warn(_("killed!\n"))
83 83 except KeyboardInterrupt:
84 84 try:
85 85 ui.warn(_("interrupted!\n"))
86 86 except IOError, inst:
87 87 if inst.errno == errno.EPIPE:
88 88 if ui.debugflag:
89 89 ui.warn(_("\nbroken pipe\n"))
90 90 else:
91 91 raise
92 92 except socket.error, inst:
93 93 ui.warn(_("abort: %s\n") % inst.args[-1])
94 94 except IOError, inst:
95 95 if hasattr(inst, "code"):
96 96 ui.warn(_("abort: %s\n") % inst)
97 97 elif hasattr(inst, "reason"):
98 98 try: # usually it is in the form (errno, strerror)
99 99 reason = inst.reason.args[1]
100 100 except: # it might be anything, for example a string
101 101 reason = inst.reason
102 102 ui.warn(_("abort: error: %s\n") % reason)
103 103 elif hasattr(inst, "args") and inst.args[0] == errno.EPIPE:
104 104 if ui.debugflag:
105 105 ui.warn(_("broken pipe\n"))
106 106 elif getattr(inst, "strerror", None):
107 107 if getattr(inst, "filename", None):
108 108 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
109 109 else:
110 110 ui.warn(_("abort: %s\n") % inst.strerror)
111 111 else:
112 112 raise
113 113 except OSError, inst:
114 114 if getattr(inst, "filename", None):
115 115 ui.warn(_("abort: %s: %s\n") % (inst.strerror, inst.filename))
116 116 else:
117 117 ui.warn(_("abort: %s\n") % inst.strerror)
118 118 except util.UnexpectedOutput, inst:
119 119 ui.warn(_("abort: %s") % inst.args[0])
120 120 if not isinstance(inst.args[1], basestring):
121 121 ui.warn(" %r\n" % (inst.args[1],))
122 122 elif not inst.args[1]:
123 123 ui.warn(_(" empty string\n"))
124 124 else:
125 125 ui.warn("\n%r\n" % util.ellipsis(inst.args[1]))
126 126 except ImportError, inst:
127 127 m = str(inst).split()[-1]
128 128 ui.warn(_("abort: could not import module %s!\n") % m)
129 129 if m in "mpatch bdiff".split():
130 130 ui.warn(_("(did you forget to compile extensions?)\n"))
131 131 elif m in "zlib".split():
132 132 ui.warn(_("(is your Python install correct?)\n"))
133 133
134 134 except util.Abort, inst:
135 135 ui.warn(_("abort: %s\n") % inst)
136 136 except MemoryError:
137 137 ui.warn(_("abort: out of memory\n"))
138 138 except SystemExit, inst:
139 139 # Commands shouldn't sys.exit directly, but give a return code.
140 140 # Just in case catch this and and pass exit code to caller.
141 141 return inst.code
142 142 except:
143 143 ui.warn(_("** unknown exception encountered, details follow\n"))
144 144 ui.warn(_("** report bug details to "
145 145 "http://www.selenic.com/mercurial/bts\n"))
146 146 ui.warn(_("** or mercurial@selenic.com\n"))
147 147 ui.warn(_("** Mercurial Distributed SCM (version %s)\n")
148 148 % util.version())
149 149 ui.warn(_("** Extensions loaded: %s\n")
150 150 % ", ".join([x[0] for x in extensions.extensions()]))
151 151 raise
152 152
153 153 return -1
154 154
155 155 def _findrepo(p):
156 156 while not os.path.isdir(os.path.join(p, ".hg")):
157 157 oldp, p = p, os.path.dirname(p)
158 158 if p == oldp:
159 159 return None
160 160
161 161 return p
162 162
163 163 def _parse(ui, args):
164 164 options = {}
165 165 cmdoptions = {}
166 166
167 167 try:
168 168 args = fancyopts.fancyopts(args, commands.globalopts, options)
169 169 except fancyopts.getopt.GetoptError, inst:
170 170 raise ParseError(None, inst)
171 171
172 172 if args:
173 173 cmd, args = args[0], args[1:]
174 174 aliases, i = cmdutil.findcmd(cmd, commands.table,
175 175 ui.config("ui", "strict"))
176 176 cmd = aliases[0]
177 177 defaults = ui.config("defaults", cmd)
178 178 if defaults:
179 179 args = shlex.split(defaults) + args
180 180 c = list(i[1])
181 181 else:
182 182 cmd = None
183 183 c = []
184 184
185 185 # combine global options into local
186 186 for o in commands.globalopts:
187 187 c.append((o[0], o[1], options[o[1]], o[3]))
188 188
189 189 try:
190 190 args = fancyopts.fancyopts(args, c, cmdoptions)
191 191 except fancyopts.getopt.GetoptError, inst:
192 192 raise ParseError(cmd, inst)
193 193
194 194 # separate global options back out
195 195 for o in commands.globalopts:
196 196 n = o[1]
197 197 options[n] = cmdoptions[n]
198 198 del cmdoptions[n]
199 199
200 200 return (cmd, cmd and i[0] or None, args, options, cmdoptions)
201 201
202 202 def _parseconfig(config):
203 203 """parse the --config options from the command line"""
204 204 parsed = []
205 205 for cfg in config:
206 206 try:
207 207 name, value = cfg.split('=', 1)
208 208 section, name = name.split('.', 1)
209 209 if not section or not name:
210 210 raise IndexError
211 211 parsed.append((section, name, value))
212 212 except (IndexError, ValueError):
213 213 raise util.Abort(_('malformed --config option: %s') % cfg)
214 214 return parsed
215 215
216 216 def _earlygetopt(aliases, args):
217 217 """Return list of values for an option (or aliases).
218 218
219 219 The values are listed in the order they appear in args.
220 220 The options and values are removed from args.
221 221 """
222 222 try:
223 223 argcount = args.index("--")
224 224 except ValueError:
225 225 argcount = len(args)
226 226 shortopts = [opt for opt in aliases if len(opt) == 2]
227 227 values = []
228 228 pos = 0
229 229 while pos < argcount:
230 230 if args[pos] in aliases:
231 231 if pos + 1 >= argcount:
232 232 # ignore and let getopt report an error if there is no value
233 233 break
234 234 del args[pos]
235 235 values.append(args.pop(pos))
236 236 argcount -= 2
237 237 elif args[pos][:2] in shortopts:
238 238 # short option can have no following space, e.g. hg log -Rfoo
239 239 values.append(args.pop(pos)[2:])
240 240 argcount -= 1
241 241 else:
242 242 pos += 1
243 243 return values
244 244
245 245 _loaded = {}
246 246 def _dispatch(ui, args):
247 247 # read --config before doing anything else
248 248 # (e.g. to change trust settings for reading .hg/hgrc)
249 249 config = _earlygetopt(['--config'], args)
250 250 if config:
251 251 ui.updateopts(config=_parseconfig(config))
252 252
253 253 # check for cwd
254 254 cwd = _earlygetopt(['--cwd'], args)
255 255 if cwd:
256 256 os.chdir(cwd[-1])
257 257
258 258 # read the local repository .hgrc into a local ui object
259 259 path = _findrepo(os.getcwd()) or ""
260 260 if not path:
261 261 lui = ui
262 262 if path:
263 263 try:
264 264 lui = _ui.ui(parentui=ui)
265 265 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
266 266 except IOError:
267 267 pass
268 268
269 269 # now we can expand paths, even ones in .hg/hgrc
270 270 rpath = _earlygetopt(["-R", "--repository", "--repo"], args)
271 271 if rpath:
272 272 path = lui.expandpath(rpath[-1])
273 273 lui = _ui.ui(parentui=ui)
274 274 lui.readconfig(os.path.join(path, ".hg", "hgrc"))
275 275
276 276 extensions.loadall(lui)
277 277 for name, module in extensions.extensions():
278 278 if name in _loaded:
279 279 continue
280 280
281 281 # setup extensions
282 282 # TODO this should be generalized to scheme, where extensions can
283 283 # redepend on other extensions. then we should toposort them, and
284 284 # do initialization in correct order
285 285 extsetup = getattr(module, 'extsetup', None)
286 286 if extsetup:
287 287 extsetup()
288 288
289 289 cmdtable = getattr(module, 'cmdtable', {})
290 290 overrides = [cmd for cmd in cmdtable if cmd in commands.table]
291 291 if overrides:
292 292 ui.warn(_("extension '%s' overrides commands: %s\n")
293 293 % (name, " ".join(overrides)))
294 294 commands.table.update(cmdtable)
295 295 _loaded[name] = 1
296 296 # check for fallback encoding
297 297 fallback = lui.config('ui', 'fallbackencoding')
298 298 if fallback:
299 299 util._fallbackencoding = fallback
300 300
301 301 fullargs = args
302 302 cmd, func, args, options, cmdoptions = _parse(lui, args)
303 303
304 304 if options["config"]:
305 305 raise util.Abort(_("Option --config may not be abbreviated!"))
306 306 if options["cwd"]:
307 307 raise util.Abort(_("Option --cwd may not be abbreviated!"))
308 308 if options["repository"]:
309 309 raise util.Abort(_(
310 310 "Option -R has to be separated from other options (i.e. not -qR) "
311 311 "and --repository may only be abbreviated as --repo!"))
312 312
313 313 if options["encoding"]:
314 314 util._encoding = options["encoding"]
315 315 if options["encodingmode"]:
316 316 util._encodingmode = options["encodingmode"]
317 317 if options["time"]:
318 318 def get_times():
319 319 t = os.times()
320 320 if t[4] == 0.0: # Windows leaves this as zero, so use time.clock()
321 321 t = (t[0], t[1], t[2], t[3], time.clock())
322 322 return t
323 323 s = get_times()
324 324 def print_time():
325 325 t = get_times()
326 326 ui.warn(_("Time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
327 327 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
328 328 atexit.register(print_time)
329 329
330 330 ui.updateopts(options["verbose"], options["debug"], options["quiet"],
331 331 not options["noninteractive"], options["traceback"])
332 332
333 333 if options['help']:
334 334 return commands.help_(ui, cmd, options['version'])
335 335 elif options['version']:
336 336 return commands.version_(ui)
337 337 elif not cmd:
338 338 return commands.help_(ui, 'shortlist')
339 339
340 340 repo = None
341 341 if cmd not in commands.norepo.split():
342 342 try:
343 343 repo = hg.repository(ui, path=path)
344 344 ui = repo.ui
345 345 if not repo.local():
346 346 raise util.Abort(_("repository '%s' is not local") % path)
347 347 ui.setconfig("bundle", "mainreporoot", repo.root)
348 348 except RepoError:
349 349 if cmd not in commands.optionalrepo.split():
350 350 if args and not path: # try to infer -R from command args
351 351 repos = map(_findrepo, args)
352 352 guess = repos[0]
353 353 if guess and repos.count(guess) == len(repos):
354 354 return _dispatch(ui, ['--repository', guess] + fullargs)
355 355 if not path:
356 356 raise RepoError(_("There is no Mercurial repository here"
357 357 " (.hg not found)"))
358 358 raise
359 359 args.insert(0, repo)
360 360
361 361 d = lambda: util.checksignature(func)(ui, *args, **cmdoptions)
362 362
363 363 # run pre-hook, and abort if it fails
364 364 ret = hook.hook(lui, repo, "pre-%s" % cmd, False, args=" ".join(fullargs))
365 365 if ret:
366 366 return ret
367 367 ret = _runcommand(ui, options, cmd, d)
368 368 # run post-hook, passing command result
369 369 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
370 370 result = ret)
371 371 return ret
372 372
373 373 def _runcommand(ui, options, cmd, cmdfunc):
374 374 def checkargs():
375 375 try:
376 376 return cmdfunc()
377 377 except util.SignatureError:
378 378 raise ParseError(cmd, _("invalid arguments"))
379 379
380 380 if options['profile']:
381 381 import hotshot, hotshot.stats
382 382 prof = hotshot.Profile("hg.prof")
383 383 try:
384 384 try:
385 385 return prof.runcall(checkargs)
386 386 except:
387 387 try:
388 388 ui.warn(_('exception raised - generating '
389 389 'profile anyway\n'))
390 390 except:
391 391 pass
392 392 raise
393 393 finally:
394 394 prof.close()
395 395 stats = hotshot.stats.load("hg.prof")
396 396 stats.strip_dirs()
397 397 stats.sort_stats('time', 'calls')
398 398 stats.print_stats(40)
399 399 elif options['lsprof']:
400 400 try:
401 401 from mercurial import lsprof
402 402 except ImportError:
403 403 raise util.Abort(_(
404 404 'lsprof not available - install from '
405 405 'http://codespeak.net/svn/user/arigo/hack/misc/lsprof/'))
406 406 p = lsprof.Profiler()
407 407 p.enable(subcalls=True)
408 408 try:
409 409 return checkargs()
410 410 finally:
411 411 p.disable()
412 412 stats = lsprof.Stats(p.getstats())
413 413 stats.sort()
414 414 stats.pprint(top=10, file=sys.stderr, climit=5)
415 415 else:
416 416 return checkargs()
@@ -1,314 +1,314 b''
1 1 # hgweb/hgweb_mod.py - Web interface for a repository.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, mimetypes
10 10 from mercurial.node import hex, nullid
11 11 from mercurial.repo import RepoError
12 from mercurial import ui, hg, util, hook
13 from mercurial import revlog, templater, templatefilters
12 from mercurial import ui, hg, util, hook, error
13 from mercurial import templater, templatefilters
14 14 from common import get_mtime, style_map, ErrorResponse
15 15 from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR
16 16 from common import HTTP_UNAUTHORIZED, HTTP_METHOD_NOT_ALLOWED
17 17 from request import wsgirequest
18 18 import webcommands, protocol, webutil
19 19
20 20 perms = {
21 21 'changegroup': 'pull',
22 22 'changegroupsubset': 'pull',
23 23 'unbundle': 'push',
24 24 'stream_out': 'pull',
25 25 }
26 26
27 27 class hgweb(object):
28 28 def __init__(self, repo, name=None):
29 29 if isinstance(repo, str):
30 30 parentui = ui.ui(report_untrusted=False, interactive=False)
31 31 self.repo = hg.repository(parentui, repo)
32 32 else:
33 33 self.repo = repo
34 34
35 35 hook.redirect(True)
36 36 self.mtime = -1
37 37 self.reponame = name
38 38 self.archives = 'zip', 'gz', 'bz2'
39 39 self.stripecount = 1
40 40 # a repo owner may set web.templates in .hg/hgrc to get any file
41 41 # readable by the user running the CGI script
42 42 self.templatepath = self.config("web", "templates",
43 43 templater.templatepath(),
44 44 untrusted=False)
45 45
46 46 # The CGI scripts are often run by a user different from the repo owner.
47 47 # Trust the settings from the .hg/hgrc files by default.
48 48 def config(self, section, name, default=None, untrusted=True):
49 49 return self.repo.ui.config(section, name, default,
50 50 untrusted=untrusted)
51 51
52 52 def configbool(self, section, name, default=False, untrusted=True):
53 53 return self.repo.ui.configbool(section, name, default,
54 54 untrusted=untrusted)
55 55
56 56 def configlist(self, section, name, default=None, untrusted=True):
57 57 return self.repo.ui.configlist(section, name, default,
58 58 untrusted=untrusted)
59 59
60 60 def refresh(self):
61 61 mtime = get_mtime(self.repo.root)
62 62 if mtime != self.mtime:
63 63 self.mtime = mtime
64 64 self.repo = hg.repository(self.repo.ui, self.repo.root)
65 65 self.maxchanges = int(self.config("web", "maxchanges", 10))
66 66 self.stripecount = int(self.config("web", "stripes", 1))
67 67 self.maxshortchanges = int(self.config("web", "maxshortchanges", 60))
68 68 self.maxfiles = int(self.config("web", "maxfiles", 10))
69 69 self.allowpull = self.configbool("web", "allowpull", True)
70 70 self.encoding = self.config("web", "encoding", util._encoding)
71 71
72 72 def run(self):
73 73 if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."):
74 74 raise RuntimeError("This function is only intended to be called while running as a CGI script.")
75 75 import mercurial.hgweb.wsgicgi as wsgicgi
76 76 wsgicgi.launch(self)
77 77
78 78 def __call__(self, env, respond):
79 79 req = wsgirequest(env, respond)
80 80 return self.run_wsgi(req)
81 81
82 82 def run_wsgi(self, req):
83 83
84 84 self.refresh()
85 85
86 86 # process this if it's a protocol request
87 87 # protocol bits don't need to create any URLs
88 88 # and the clients always use the old URL structure
89 89
90 90 cmd = req.form.get('cmd', [''])[0]
91 91 if cmd and cmd in protocol.__all__:
92 92 try:
93 93 if cmd in perms:
94 94 try:
95 95 self.check_perm(req, perms[cmd])
96 96 except ErrorResponse, inst:
97 97 if cmd == 'unbundle':
98 98 req.drain()
99 99 raise
100 100 method = getattr(protocol, cmd)
101 101 return method(self.repo, req)
102 102 except ErrorResponse, inst:
103 103 req.respond(inst.code, protocol.HGTYPE)
104 104 if not inst.message:
105 105 return []
106 106 return '0\n%s\n' % inst.message,
107 107
108 108 # work with CGI variables to create coherent structure
109 109 # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME
110 110
111 111 req.url = req.env['SCRIPT_NAME']
112 112 if not req.url.endswith('/'):
113 113 req.url += '/'
114 114 if 'REPO_NAME' in req.env:
115 115 req.url += req.env['REPO_NAME'] + '/'
116 116
117 117 if 'PATH_INFO' in req.env:
118 118 parts = req.env['PATH_INFO'].strip('/').split('/')
119 119 repo_parts = req.env.get('REPO_NAME', '').split('/')
120 120 if parts[:len(repo_parts)] == repo_parts:
121 121 parts = parts[len(repo_parts):]
122 122 query = '/'.join(parts)
123 123 else:
124 124 query = req.env['QUERY_STRING'].split('&', 1)[0]
125 125 query = query.split(';', 1)[0]
126 126
127 127 # translate user-visible url structure to internal structure
128 128
129 129 args = query.split('/', 2)
130 130 if 'cmd' not in req.form and args and args[0]:
131 131
132 132 cmd = args.pop(0)
133 133 style = cmd.rfind('-')
134 134 if style != -1:
135 135 req.form['style'] = [cmd[:style]]
136 136 cmd = cmd[style+1:]
137 137
138 138 # avoid accepting e.g. style parameter as command
139 139 if hasattr(webcommands, cmd):
140 140 req.form['cmd'] = [cmd]
141 141 else:
142 142 cmd = ''
143 143
144 144 if cmd == 'static':
145 145 req.form['file'] = ['/'.join(args)]
146 146 else:
147 147 if args and args[0]:
148 148 node = args.pop(0)
149 149 req.form['node'] = [node]
150 150 if args:
151 151 req.form['file'] = args
152 152
153 153 if cmd == 'archive':
154 154 fn = req.form['node'][0]
155 155 for type_, spec in self.archive_specs.iteritems():
156 156 ext = spec[2]
157 157 if fn.endswith(ext):
158 158 req.form['node'] = [fn[:-len(ext)]]
159 159 req.form['type'] = [type_]
160 160
161 161 # process the web interface request
162 162
163 163 try:
164 164 tmpl = self.templater(req)
165 165 ctype = tmpl('mimetype', encoding=self.encoding)
166 166 ctype = templater.stringify(ctype)
167 167
168 168 # check read permissions non-static content
169 169 if cmd != 'static':
170 170 self.check_perm(req, None)
171 171
172 172 if cmd == '':
173 173 req.form['cmd'] = [tmpl.cache['default']]
174 174 cmd = req.form['cmd'][0]
175 175
176 176 if cmd not in webcommands.__all__:
177 177 msg = 'no such method: %s' % cmd
178 178 raise ErrorResponse(HTTP_BAD_REQUEST, msg)
179 179 elif cmd == 'file' and 'raw' in req.form.get('style', []):
180 180 self.ctype = ctype
181 181 content = webcommands.rawfile(self, req, tmpl)
182 182 else:
183 183 content = getattr(webcommands, cmd)(self, req, tmpl)
184 184 req.respond(HTTP_OK, ctype)
185 185
186 186 return content
187 187
188 except revlog.LookupError, err:
188 except error.LookupError, err:
189 189 req.respond(HTTP_NOT_FOUND, ctype)
190 190 msg = str(err)
191 191 if 'manifest' not in msg:
192 192 msg = 'revision not found: %s' % err.name
193 193 return tmpl('error', error=msg)
194 except (RepoError, revlog.RevlogError), inst:
194 except (RepoError, error.RevlogError), inst:
195 195 req.respond(HTTP_SERVER_ERROR, ctype)
196 196 return tmpl('error', error=str(inst))
197 197 except ErrorResponse, inst:
198 198 req.respond(inst.code, ctype)
199 199 return tmpl('error', error=inst.message)
200 200
201 201 def templater(self, req):
202 202
203 203 # determine scheme, port and server name
204 204 # this is needed to create absolute urls
205 205
206 206 proto = req.env.get('wsgi.url_scheme')
207 207 if proto == 'https':
208 208 proto = 'https'
209 209 default_port = "443"
210 210 else:
211 211 proto = 'http'
212 212 default_port = "80"
213 213
214 214 port = req.env["SERVER_PORT"]
215 215 port = port != default_port and (":" + port) or ""
216 216 urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port)
217 217 staticurl = self.config("web", "staticurl") or req.url + 'static/'
218 218 if not staticurl.endswith('/'):
219 219 staticurl += '/'
220 220
221 221 # some functions for the templater
222 222
223 223 def header(**map):
224 224 yield tmpl('header', encoding=self.encoding, **map)
225 225
226 226 def footer(**map):
227 227 yield tmpl("footer", **map)
228 228
229 229 def motd(**map):
230 230 yield self.config("web", "motd", "")
231 231
232 232 # figure out which style to use
233 233
234 234 vars = {}
235 235 style = self.config("web", "style", "paper")
236 236 if 'style' in req.form:
237 237 style = req.form['style'][0]
238 238 vars['style'] = style
239 239
240 240 start = req.url[-1] == '?' and '&' or '?'
241 241 sessionvars = webutil.sessionvars(vars, start)
242 242 mapfile = style_map(self.templatepath, style)
243 243
244 244 if not self.reponame:
245 245 self.reponame = (self.config("web", "name")
246 246 or req.env.get('REPO_NAME')
247 247 or req.url.strip('/') or self.repo.root)
248 248
249 249 # create the templater
250 250
251 251 tmpl = templater.templater(mapfile, templatefilters.filters,
252 252 defaults={"url": req.url,
253 253 "staticurl": staticurl,
254 254 "urlbase": urlbase,
255 255 "repo": self.reponame,
256 256 "header": header,
257 257 "footer": footer,
258 258 "motd": motd,
259 259 "sessionvars": sessionvars
260 260 })
261 261 return tmpl
262 262
263 263 def archivelist(self, nodeid):
264 264 allowed = self.configlist("web", "allow_archive")
265 265 for i, spec in self.archive_specs.iteritems():
266 266 if i in allowed or self.configbool("web", "allow" + i):
267 267 yield {"type" : i, "extension" : spec[2], "node" : nodeid}
268 268
269 269 archive_specs = {
270 270 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None),
271 271 'gz': ('application/x-tar', 'tgz', '.tar.gz', None),
272 272 'zip': ('application/zip', 'zip', '.zip', None),
273 273 }
274 274
275 275 def check_perm(self, req, op):
276 276 '''Check permission for operation based on request data (including
277 277 authentication info). Return if op allowed, else raise an ErrorResponse
278 278 exception.'''
279 279
280 280 user = req.env.get('REMOTE_USER')
281 281
282 282 deny_read = self.configlist('web', 'deny_read')
283 283 if deny_read and (not user or deny_read == ['*'] or user in deny_read):
284 284 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
285 285
286 286 allow_read = self.configlist('web', 'allow_read')
287 287 result = (not allow_read) or (allow_read == ['*'])
288 288 if not result or user in allow_read:
289 289 raise ErrorResponse(HTTP_UNAUTHORIZED, 'read not authorized')
290 290
291 291 if op == 'pull' and not self.allowpull:
292 292 raise ErrorResponse(HTTP_UNAUTHORIZED, 'pull not authorized')
293 293 elif op == 'pull' or op is None: # op is None for interface requests
294 294 return
295 295
296 296 # enforce that you can only push using POST requests
297 297 if req.env['REQUEST_METHOD'] != 'POST':
298 298 msg = 'push requires POST request'
299 299 raise ErrorResponse(HTTP_METHOD_NOT_ALLOWED, msg)
300 300
301 301 # require ssl by default for pushing, auth info cannot be sniffed
302 302 # and replayed
303 303 scheme = req.env.get('wsgi.url_scheme')
304 304 if self.configbool('web', 'push_ssl', True) and scheme != 'https':
305 305 raise ErrorResponse(HTTP_OK, 'ssl required')
306 306
307 307 deny = self.configlist('web', 'deny_push')
308 308 if deny and (not user or deny == ['*'] or user in deny):
309 309 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
310 310
311 311 allow = self.configlist('web', 'allow_push')
312 312 result = allow and (allow == ['*'] or user in allow)
313 313 if not result:
314 314 raise ErrorResponse(HTTP_UNAUTHORIZED, 'push not authorized')
@@ -1,664 +1,664 b''
1 1 #
2 2 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, mimetypes, re, cgi, copy
9 9 import webutil
10 from mercurial import revlog, archival, templatefilters
10 from mercurial import error, archival, templatefilters
11 11 from mercurial.node import short, hex, nullid
12 12 from mercurial.util import binary, datestr
13 13 from mercurial.repo import RepoError
14 14 from common import paritygen, staticfile, get_contact, ErrorResponse
15 15 from common import HTTP_OK, HTTP_FORBIDDEN, HTTP_NOT_FOUND
16 16 from mercurial import graphmod, util
17 17
18 18 # __all__ is populated with the allowed commands. Be sure to add to it if
19 19 # you're adding a new command, or the new command won't work.
20 20
21 21 __all__ = [
22 22 'log', 'rawfile', 'file', 'changelog', 'shortlog', 'changeset', 'rev',
23 23 'manifest', 'tags', 'summary', 'filediff', 'diff', 'annotate', 'filelog',
24 24 'archive', 'static', 'graph',
25 25 ]
26 26
27 27 def log(web, req, tmpl):
28 28 if 'file' in req.form and req.form['file'][0]:
29 29 return filelog(web, req, tmpl)
30 30 else:
31 31 return changelog(web, req, tmpl)
32 32
33 33 def rawfile(web, req, tmpl):
34 34 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
35 35 if not path:
36 36 content = manifest(web, req, tmpl)
37 37 req.respond(HTTP_OK, web.ctype)
38 38 return content
39 39
40 40 try:
41 41 fctx = webutil.filectx(web.repo, req)
42 except revlog.LookupError, inst:
42 except error.LookupError, inst:
43 43 try:
44 44 content = manifest(web, req, tmpl)
45 45 req.respond(HTTP_OK, web.ctype)
46 46 return content
47 47 except ErrorResponse:
48 48 raise inst
49 49
50 50 path = fctx.path()
51 51 text = fctx.data()
52 52 mt = mimetypes.guess_type(path)[0]
53 53 if mt is None:
54 54 mt = binary(text) and 'application/octet-stream' or 'text/plain'
55 55
56 56 req.respond(HTTP_OK, mt, path, len(text))
57 57 return [text]
58 58
59 59 def _filerevision(web, tmpl, fctx):
60 60 f = fctx.path()
61 61 text = fctx.data()
62 62 parity = paritygen(web.stripecount)
63 63
64 64 if binary(text):
65 65 mt = mimetypes.guess_type(f)[0] or 'application/octet-stream'
66 66 text = '(binary:%s)' % mt
67 67
68 68 def lines():
69 69 for lineno, t in enumerate(text.splitlines(1)):
70 70 yield {"line": t,
71 71 "lineid": "l%d" % (lineno + 1),
72 72 "linenumber": "% 6d" % (lineno + 1),
73 73 "parity": parity.next()}
74 74
75 75 return tmpl("filerevision",
76 76 file=f,
77 77 path=webutil.up(f),
78 78 text=lines(),
79 79 rev=fctx.rev(),
80 80 node=hex(fctx.node()),
81 81 author=fctx.user(),
82 82 date=fctx.date(),
83 83 desc=fctx.description(),
84 84 branch=webutil.nodebranchnodefault(fctx),
85 85 parent=webutil.siblings(fctx.parents()),
86 86 child=webutil.siblings(fctx.children()),
87 87 rename=webutil.renamelink(fctx),
88 88 permissions=fctx.manifest().flags(f))
89 89
90 90 def file(web, req, tmpl):
91 91 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
92 92 if not path:
93 93 return manifest(web, req, tmpl)
94 94 try:
95 95 return _filerevision(web, tmpl, webutil.filectx(web.repo, req))
96 except revlog.LookupError, inst:
96 except error.LookupError, inst:
97 97 try:
98 98 return manifest(web, req, tmpl)
99 99 except ErrorResponse:
100 100 raise inst
101 101
102 102 def _search(web, tmpl, query):
103 103
104 104 def changelist(**map):
105 105 cl = web.repo.changelog
106 106 count = 0
107 107 qw = query.lower().split()
108 108
109 109 def revgen():
110 110 for i in xrange(len(cl) - 1, 0, -100):
111 111 l = []
112 112 for j in xrange(max(0, i - 100), i + 1):
113 113 ctx = web.repo[j]
114 114 l.append(ctx)
115 115 l.reverse()
116 116 for e in l:
117 117 yield e
118 118
119 119 for ctx in revgen():
120 120 miss = 0
121 121 for q in qw:
122 122 if not (q in ctx.user().lower() or
123 123 q in ctx.description().lower() or
124 124 q in " ".join(ctx.files()).lower()):
125 125 miss = 1
126 126 break
127 127 if miss:
128 128 continue
129 129
130 130 count += 1
131 131 n = ctx.node()
132 132 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
133 133 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
134 134
135 135 yield tmpl('searchentry',
136 136 parity=parity.next(),
137 137 author=ctx.user(),
138 138 parent=webutil.siblings(ctx.parents()),
139 139 child=webutil.siblings(ctx.children()),
140 140 changelogtag=showtags,
141 141 desc=ctx.description(),
142 142 date=ctx.date(),
143 143 files=files,
144 144 rev=ctx.rev(),
145 145 node=hex(n),
146 146 tags=webutil.nodetagsdict(web.repo, n),
147 147 inbranch=webutil.nodeinbranch(web.repo, ctx),
148 148 branches=webutil.nodebranchdict(web.repo, ctx))
149 149
150 150 if count >= web.maxchanges:
151 151 break
152 152
153 153 cl = web.repo.changelog
154 154 parity = paritygen(web.stripecount)
155 155
156 156 return tmpl('search',
157 157 query=query,
158 158 node=hex(cl.tip()),
159 159 entries=changelist,
160 160 archives=web.archivelist("tip"))
161 161
162 162 def changelog(web, req, tmpl, shortlog = False):
163 163 if 'node' in req.form:
164 164 ctx = webutil.changectx(web.repo, req)
165 165 else:
166 166 if 'rev' in req.form:
167 167 hi = req.form['rev'][0]
168 168 else:
169 169 hi = len(web.repo) - 1
170 170 try:
171 171 ctx = web.repo[hi]
172 172 except RepoError:
173 173 return _search(web, tmpl, hi) # XXX redirect to 404 page?
174 174
175 175 def changelist(limit=0, **map):
176 176 cl = web.repo.changelog
177 177 l = [] # build a list in forward order for efficiency
178 178 for i in xrange(start, end):
179 179 ctx = web.repo[i]
180 180 n = ctx.node()
181 181 showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n)
182 182 files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles)
183 183
184 184 l.insert(0, {"parity": parity.next(),
185 185 "author": ctx.user(),
186 186 "parent": webutil.siblings(ctx.parents(), i - 1),
187 187 "child": webutil.siblings(ctx.children(), i + 1),
188 188 "changelogtag": showtags,
189 189 "desc": ctx.description(),
190 190 "date": ctx.date(),
191 191 "files": files,
192 192 "rev": i,
193 193 "node": hex(n),
194 194 "tags": webutil.nodetagsdict(web.repo, n),
195 195 "inbranch": webutil.nodeinbranch(web.repo, ctx),
196 196 "branches": webutil.nodebranchdict(web.repo, ctx)
197 197 })
198 198
199 199 if limit > 0:
200 200 l = l[:limit]
201 201
202 202 for e in l:
203 203 yield e
204 204
205 205 maxchanges = shortlog and web.maxshortchanges or web.maxchanges
206 206 cl = web.repo.changelog
207 207 count = len(cl)
208 208 pos = ctx.rev()
209 209 start = max(0, pos - maxchanges + 1)
210 210 end = min(count, start + maxchanges)
211 211 pos = end - 1
212 212 parity = paritygen(web.stripecount, offset=start-end)
213 213
214 214 changenav = webutil.revnavgen(pos, maxchanges, count, web.repo.changectx)
215 215
216 216 return tmpl(shortlog and 'shortlog' or 'changelog',
217 217 changenav=changenav,
218 218 node=hex(ctx.node()),
219 219 rev=pos, changesets=count,
220 220 entries=lambda **x: changelist(limit=0,**x),
221 221 latestentry=lambda **x: changelist(limit=1,**x),
222 222 archives=web.archivelist("tip"))
223 223
224 224 def shortlog(web, req, tmpl):
225 225 return changelog(web, req, tmpl, shortlog = True)
226 226
227 227 def changeset(web, req, tmpl):
228 228 ctx = webutil.changectx(web.repo, req)
229 229 showtags = webutil.showtag(web.repo, tmpl, 'changesettag', ctx.node())
230 230 showbranch = webutil.nodebranchnodefault(ctx)
231 231 parents = ctx.parents()
232 232
233 233 files = []
234 234 parity = paritygen(web.stripecount)
235 235 for f in ctx.files():
236 236 template = f in ctx and 'filenodelink' or 'filenolink'
237 237 files.append(tmpl(template,
238 238 node=ctx.hex(), file=f,
239 239 parity=parity.next()))
240 240
241 241 parity = paritygen(web.stripecount)
242 242 diffs = webutil.diffs(web.repo, tmpl, ctx, None, parity)
243 243 return tmpl('changeset',
244 244 diff=diffs,
245 245 rev=ctx.rev(),
246 246 node=ctx.hex(),
247 247 parent=webutil.siblings(parents),
248 248 child=webutil.siblings(ctx.children()),
249 249 changesettag=showtags,
250 250 changesetbranch=showbranch,
251 251 author=ctx.user(),
252 252 desc=ctx.description(),
253 253 date=ctx.date(),
254 254 files=files,
255 255 archives=web.archivelist(ctx.hex()),
256 256 tags=webutil.nodetagsdict(web.repo, ctx.node()),
257 257 branch=webutil.nodebranchnodefault(ctx),
258 258 inbranch=webutil.nodeinbranch(web.repo, ctx),
259 259 branches=webutil.nodebranchdict(web.repo, ctx))
260 260
261 261 rev = changeset
262 262
263 263 def manifest(web, req, tmpl):
264 264 ctx = webutil.changectx(web.repo, req)
265 265 path = webutil.cleanpath(web.repo, req.form.get('file', [''])[0])
266 266 mf = ctx.manifest()
267 267 node = ctx.node()
268 268
269 269 files = {}
270 270 dirs = {}
271 271 parity = paritygen(web.stripecount)
272 272
273 273 if path and path[-1] != "/":
274 274 path += "/"
275 275 l = len(path)
276 276 abspath = "/" + path
277 277
278 278 for f, n in mf.iteritems():
279 279 if f[:l] != path:
280 280 continue
281 281 remain = f[l:]
282 282 elements = remain.split('/')
283 283 if len(elements) == 1:
284 284 files[remain] = f
285 285 else:
286 286 h = dirs # need to retain ref to dirs (root)
287 287 for elem in elements[0:-1]:
288 288 if elem not in h:
289 289 h[elem] = {}
290 290 h = h[elem]
291 291 if len(h) > 1:
292 292 break
293 293 h[None] = None # denotes files present
294 294
295 295 if mf and not files and not dirs:
296 296 raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path)
297 297
298 298 def filelist(**map):
299 299 for f in util.sort(files):
300 300 full = files[f]
301 301
302 302 fctx = ctx.filectx(full)
303 303 yield {"file": full,
304 304 "parity": parity.next(),
305 305 "basename": f,
306 306 "date": fctx.date(),
307 307 "size": fctx.size(),
308 308 "permissions": mf.flags(full)}
309 309
310 310 def dirlist(**map):
311 311 for d in util.sort(dirs):
312 312
313 313 emptydirs = []
314 314 h = dirs[d]
315 315 while isinstance(h, dict) and len(h) == 1:
316 316 k,v = h.items()[0]
317 317 if v:
318 318 emptydirs.append(k)
319 319 h = v
320 320
321 321 path = "%s%s" % (abspath, d)
322 322 yield {"parity": parity.next(),
323 323 "path": path,
324 324 "emptydirs": "/".join(emptydirs),
325 325 "basename": d}
326 326
327 327 return tmpl("manifest",
328 328 rev=ctx.rev(),
329 329 node=hex(node),
330 330 path=abspath,
331 331 up=webutil.up(abspath),
332 332 upparity=parity.next(),
333 333 fentries=filelist,
334 334 dentries=dirlist,
335 335 archives=web.archivelist(hex(node)),
336 336 tags=webutil.nodetagsdict(web.repo, node),
337 337 inbranch=webutil.nodeinbranch(web.repo, ctx),
338 338 branches=webutil.nodebranchdict(web.repo, ctx))
339 339
340 340 def tags(web, req, tmpl):
341 341 i = web.repo.tagslist()
342 342 i.reverse()
343 343 parity = paritygen(web.stripecount)
344 344
345 345 def entries(notip=False,limit=0, **map):
346 346 count = 0
347 347 for k, n in i:
348 348 if notip and k == "tip":
349 349 continue
350 350 if limit > 0 and count >= limit:
351 351 continue
352 352 count = count + 1
353 353 yield {"parity": parity.next(),
354 354 "tag": k,
355 355 "date": web.repo[n].date(),
356 356 "node": hex(n)}
357 357
358 358 return tmpl("tags",
359 359 node=hex(web.repo.changelog.tip()),
360 360 entries=lambda **x: entries(False,0, **x),
361 361 entriesnotip=lambda **x: entries(True,0, **x),
362 362 latestentry=lambda **x: entries(True,1, **x))
363 363
364 364 def summary(web, req, tmpl):
365 365 i = web.repo.tagslist()
366 366 i.reverse()
367 367
368 368 def tagentries(**map):
369 369 parity = paritygen(web.stripecount)
370 370 count = 0
371 371 for k, n in i:
372 372 if k == "tip": # skip tip
373 373 continue
374 374
375 375 count += 1
376 376 if count > 10: # limit to 10 tags
377 377 break
378 378
379 379 yield tmpl("tagentry",
380 380 parity=parity.next(),
381 381 tag=k,
382 382 node=hex(n),
383 383 date=web.repo[n].date())
384 384
385 385 def branches(**map):
386 386 parity = paritygen(web.stripecount)
387 387
388 388 b = web.repo.branchtags()
389 389 l = [(-web.repo.changelog.rev(n), n, t) for t, n in b.iteritems()]
390 390 for r,n,t in util.sort(l):
391 391 yield {'parity': parity.next(),
392 392 'branch': t,
393 393 'node': hex(n),
394 394 'date': web.repo[n].date()}
395 395
396 396 def changelist(**map):
397 397 parity = paritygen(web.stripecount, offset=start-end)
398 398 l = [] # build a list in forward order for efficiency
399 399 for i in xrange(start, end):
400 400 ctx = web.repo[i]
401 401 n = ctx.node()
402 402 hn = hex(n)
403 403
404 404 l.insert(0, tmpl(
405 405 'shortlogentry',
406 406 parity=parity.next(),
407 407 author=ctx.user(),
408 408 desc=ctx.description(),
409 409 date=ctx.date(),
410 410 rev=i,
411 411 node=hn,
412 412 tags=webutil.nodetagsdict(web.repo, n),
413 413 inbranch=webutil.nodeinbranch(web.repo, ctx),
414 414 branches=webutil.nodebranchdict(web.repo, ctx)))
415 415
416 416 yield l
417 417
418 418 cl = web.repo.changelog
419 419 count = len(cl)
420 420 start = max(0, count - web.maxchanges)
421 421 end = min(count, start + web.maxchanges)
422 422
423 423 return tmpl("summary",
424 424 desc=web.config("web", "description", "unknown"),
425 425 owner=get_contact(web.config) or "unknown",
426 426 lastchange=cl.read(cl.tip())[2],
427 427 tags=tagentries,
428 428 branches=branches,
429 429 shortlog=changelist,
430 430 node=hex(cl.tip()),
431 431 archives=web.archivelist("tip"))
432 432
433 433 def filediff(web, req, tmpl):
434 434 fctx, ctx = None, None
435 435 try:
436 436 fctx = webutil.filectx(web.repo, req)
437 437 except LookupError:
438 438 ctx = webutil.changectx(web.repo, req)
439 439 path = webutil.cleanpath(web.repo, req.form['file'][0])
440 440 if path not in ctx.files():
441 441 raise
442 442
443 443 if fctx is not None:
444 444 n = fctx.node()
445 445 path = fctx.path()
446 446 parents = fctx.parents()
447 447 p1 = parents and parents[0].node() or nullid
448 448 else:
449 449 n = ctx.node()
450 450 # path already defined in except clause
451 451 parents = ctx.parents()
452 452
453 453 parity = paritygen(web.stripecount)
454 454 diffs = webutil.diffs(web.repo, tmpl, fctx or ctx, [path], parity)
455 455 rename = fctx and webutil.renamelink(fctx) or []
456 456 ctx = fctx and fctx or ctx
457 457 return tmpl("filediff",
458 458 file=path,
459 459 node=hex(n),
460 460 rev=ctx.rev(),
461 461 date=ctx.date(),
462 462 desc=ctx.description(),
463 463 author=ctx.user(),
464 464 rename=rename,
465 465 branch=webutil.nodebranchnodefault(ctx),
466 466 parent=webutil.siblings(parents),
467 467 child=webutil.siblings(ctx.children()),
468 468 diff=diffs)
469 469
470 470 diff = filediff
471 471
472 472 def annotate(web, req, tmpl):
473 473 fctx = webutil.filectx(web.repo, req)
474 474 f = fctx.path()
475 475 parity = paritygen(web.stripecount)
476 476
477 477 def annotate(**map):
478 478 last = None
479 479 if binary(fctx.data()):
480 480 mt = (mimetypes.guess_type(fctx.path())[0]
481 481 or 'application/octet-stream')
482 482 lines = enumerate([((fctx.filectx(fctx.filerev()), 1),
483 483 '(binary:%s)' % mt)])
484 484 else:
485 485 lines = enumerate(fctx.annotate(follow=True, linenumber=True))
486 486 for lineno, ((f, targetline), l) in lines:
487 487 fnode = f.filenode()
488 488
489 489 if last != fnode:
490 490 last = fnode
491 491
492 492 yield {"parity": parity.next(),
493 493 "node": hex(f.node()),
494 494 "rev": f.rev(),
495 495 "author": f.user(),
496 496 "desc": f.description(),
497 497 "file": f.path(),
498 498 "targetline": targetline,
499 499 "line": l,
500 500 "lineid": "l%d" % (lineno + 1),
501 501 "linenumber": "% 6d" % (lineno + 1)}
502 502
503 503 return tmpl("fileannotate",
504 504 file=f,
505 505 annotate=annotate,
506 506 path=webutil.up(f),
507 507 rev=fctx.rev(),
508 508 node=hex(fctx.node()),
509 509 author=fctx.user(),
510 510 date=fctx.date(),
511 511 desc=fctx.description(),
512 512 rename=webutil.renamelink(fctx),
513 513 branch=webutil.nodebranchnodefault(fctx),
514 514 parent=webutil.siblings(fctx.parents()),
515 515 child=webutil.siblings(fctx.children()),
516 516 permissions=fctx.manifest().flags(f))
517 517
518 518 def filelog(web, req, tmpl):
519 519
520 520 try:
521 521 fctx = webutil.filectx(web.repo, req)
522 522 f = fctx.path()
523 523 fl = fctx.filelog()
524 except revlog.LookupError:
524 except error.LookupError:
525 525 f = webutil.cleanpath(web.repo, req.form['file'][0])
526 526 fl = web.repo.file(f)
527 527 numrevs = len(fl)
528 528 if not numrevs: # file doesn't exist at all
529 529 raise
530 530 rev = webutil.changectx(web.repo, req).rev()
531 531 first = fl.linkrev(0)
532 532 if rev < first: # current rev is from before file existed
533 533 raise
534 534 frev = numrevs - 1
535 535 while fl.linkrev(frev) > rev:
536 536 frev -= 1
537 537 fctx = web.repo.filectx(f, fl.linkrev(frev))
538 538
539 539 count = fctx.filerev() + 1
540 540 pagelen = web.maxshortchanges
541 541 start = max(0, fctx.filerev() - pagelen + 1) # first rev on this page
542 542 end = min(count, start + pagelen) # last rev on this page
543 543 parity = paritygen(web.stripecount, offset=start-end)
544 544
545 545 def entries(limit=0, **map):
546 546 l = []
547 547
548 548 repo = web.repo
549 549 for i in xrange(start, end):
550 550 iterfctx = fctx.filectx(i)
551 551
552 552 l.insert(0, {"parity": parity.next(),
553 553 "filerev": i,
554 554 "file": f,
555 555 "node": hex(iterfctx.node()),
556 556 "author": iterfctx.user(),
557 557 "date": iterfctx.date(),
558 558 "rename": webutil.renamelink(iterfctx),
559 559 "parent": webutil.siblings(iterfctx.parents()),
560 560 "child": webutil.siblings(iterfctx.children()),
561 561 "desc": iterfctx.description(),
562 562 "tags": webutil.nodetagsdict(repo, iterfctx.node()),
563 563 "branch": webutil.nodebranchnodefault(iterfctx),
564 564 "inbranch": webutil.nodeinbranch(repo, iterfctx),
565 565 "branches": webutil.nodebranchdict(repo, iterfctx)})
566 566
567 567 if limit > 0:
568 568 l = l[:limit]
569 569
570 570 for e in l:
571 571 yield e
572 572
573 573 nodefunc = lambda x: fctx.filectx(fileid=x)
574 574 nav = webutil.revnavgen(end - 1, pagelen, count, nodefunc)
575 575 return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav,
576 576 entries=lambda **x: entries(limit=0, **x),
577 577 latestentry=lambda **x: entries(limit=1, **x))
578 578
579 579
580 580 def archive(web, req, tmpl):
581 581 type_ = req.form.get('type', [None])[0]
582 582 allowed = web.configlist("web", "allow_archive")
583 583 key = req.form['node'][0]
584 584
585 585 if type_ not in web.archives:
586 586 msg = 'Unsupported archive type: %s' % type_
587 587 raise ErrorResponse(HTTP_NOT_FOUND, msg)
588 588
589 589 if not ((type_ in allowed or
590 590 web.configbool("web", "allow" + type_, False))):
591 591 msg = 'Archive type not allowed: %s' % type_
592 592 raise ErrorResponse(HTTP_FORBIDDEN, msg)
593 593
594 594 reponame = re.sub(r"\W+", "-", os.path.basename(web.reponame))
595 595 cnode = web.repo.lookup(key)
596 596 arch_version = key
597 597 if cnode == key or key == 'tip':
598 598 arch_version = short(cnode)
599 599 name = "%s-%s" % (reponame, arch_version)
600 600 mimetype, artype, extension, encoding = web.archive_specs[type_]
601 601 headers = [
602 602 ('Content-Type', mimetype),
603 603 ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension))
604 604 ]
605 605 if encoding:
606 606 headers.append(('Content-Encoding', encoding))
607 607 req.header(headers)
608 608 req.respond(HTTP_OK)
609 609 archival.archive(web.repo, req, cnode, artype, prefix=name)
610 610 return []
611 611
612 612
613 613 def static(web, req, tmpl):
614 614 fname = req.form['file'][0]
615 615 # a repo owner may set web.static in .hg/hgrc to get any file
616 616 # readable by the user running the CGI script
617 617 static = web.config("web", "static", None, untrusted=False)
618 618 if not static:
619 619 tp = web.templatepath
620 620 if isinstance(tp, str):
621 621 tp = [tp]
622 622 static = [os.path.join(p, 'static') for p in tp]
623 623 return [staticfile(static, fname, req)]
624 624
625 625 def graph(web, req, tmpl):
626 626 rev = webutil.changectx(web.repo, req).rev()
627 627 bg_height = 39
628 628
629 629 revcount = 25
630 630 if 'revcount' in req.form:
631 631 revcount = int(req.form.get('revcount', [revcount])[0])
632 632 tmpl.defaults['sessionvars']['revcount'] = revcount
633 633
634 634 lessvars = copy.copy(tmpl.defaults['sessionvars'])
635 635 lessvars['revcount'] = revcount / 2
636 636 morevars = copy.copy(tmpl.defaults['sessionvars'])
637 637 morevars['revcount'] = revcount * 2
638 638
639 639 max_rev = len(web.repo) - 1
640 640 revcount = min(max_rev, revcount)
641 641 revnode = web.repo.changelog.node(rev)
642 642 revnode_hex = hex(revnode)
643 643 uprev = min(max_rev, rev + revcount)
644 644 downrev = max(0, rev - revcount)
645 645 count = len(web.repo)
646 646 changenav = webutil.revnavgen(rev, revcount, count, web.repo.changectx)
647 647
648 648 tree = list(graphmod.graph(web.repo, rev, downrev))
649 649 canvasheight = (len(tree) + 1) * bg_height - 27;
650 650 data = []
651 651 for i, (ctx, vtx, edges) in enumerate(tree):
652 652 node = short(ctx.node())
653 653 age = templatefilters.age(ctx.date())
654 654 desc = templatefilters.firstline(ctx.description())
655 655 desc = cgi.escape(desc)
656 656 user = cgi.escape(templatefilters.person(ctx.user()))
657 657 branch = ctx.branch()
658 658 branch = branch, web.repo.branchtags().get(branch) == ctx.node()
659 659 data.append((node, vtx, edges, desc, user, age, branch, ctx.tags()))
660 660
661 661 return tmpl('graph', rev=rev, revcount=revcount, uprev=uprev,
662 662 lessvars=lessvars, morevars=morevars, downrev=downrev,
663 663 canvasheight=canvasheight, jsdata=data, bg_height=bg_height,
664 664 node=revnode_hex, changenav=changenav)
@@ -1,2150 +1,2150 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui, store
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, time, util, extensions, hook, inspect, error
14 14 import match as match_
15 15 import merge as merge_
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 19 supported = ('revlogv1', 'store', 'fncache')
20 20
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.root = os.path.realpath(path)
24 24 self.path = os.path.join(self.root, ".hg")
25 25 self.origroot = path
26 26 self.opener = util.opener(self.path)
27 27 self.wopener = util.opener(self.root)
28 28
29 29 if not os.path.isdir(self.path):
30 30 if create:
31 31 if not os.path.exists(path):
32 32 os.mkdir(path)
33 33 os.mkdir(self.path)
34 34 requirements = ["revlogv1"]
35 35 if parentui.configbool('format', 'usestore', True):
36 36 os.mkdir(os.path.join(self.path, "store"))
37 37 requirements.append("store")
38 38 if parentui.configbool('format', 'usefncache', True):
39 39 requirements.append("fncache")
40 40 # create an invalid changelog
41 41 self.opener("00changelog.i", "a").write(
42 42 '\0\0\0\2' # represents revlogv2
43 43 ' dummy changelog to prevent using the old repo layout'
44 44 )
45 45 reqfile = self.opener("requires", "w")
46 46 for r in requirements:
47 47 reqfile.write("%s\n" % r)
48 48 reqfile.close()
49 49 else:
50 50 raise repo.RepoError(_("repository %s not found") % path)
51 51 elif create:
52 52 raise repo.RepoError(_("repository %s already exists") % path)
53 53 else:
54 54 # find requirements
55 55 requirements = []
56 56 try:
57 57 requirements = self.opener("requires").read().splitlines()
58 58 for r in requirements:
59 59 if r not in self.supported:
60 60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61 61 except IOError, inst:
62 62 if inst.errno != errno.ENOENT:
63 63 raise
64 64
65 65 self.store = store.store(requirements, self.path, util.opener)
66 66 self.spath = self.store.path
67 67 self.sopener = self.store.opener
68 68 self.sjoin = self.store.join
69 69 self.opener.createmode = self.store.createmode
70 70
71 71 self.ui = ui.ui(parentui=parentui)
72 72 try:
73 73 self.ui.readconfig(self.join("hgrc"), self.root)
74 74 extensions.loadall(self.ui)
75 75 except IOError:
76 76 pass
77 77
78 78 self.tagscache = None
79 79 self._tagstypecache = None
80 80 self.branchcache = None
81 81 self._ubranchcache = None # UTF-8 version of branchcache
82 82 self._branchcachetip = None
83 83 self.nodetagscache = None
84 84 self.filterpats = {}
85 85 self._datafilters = {}
86 86 self._transref = self._lockref = self._wlockref = None
87 87
88 88 def __getattr__(self, name):
89 89 if name == 'changelog':
90 90 self.changelog = changelog.changelog(self.sopener)
91 91 self.sopener.defversion = self.changelog.version
92 92 return self.changelog
93 93 if name == 'manifest':
94 94 self.changelog
95 95 self.manifest = manifest.manifest(self.sopener)
96 96 return self.manifest
97 97 if name == 'dirstate':
98 98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 99 return self.dirstate
100 100 else:
101 101 raise AttributeError(name)
102 102
103 103 def __getitem__(self, changeid):
104 104 if changeid == None:
105 105 return context.workingctx(self)
106 106 return context.changectx(self, changeid)
107 107
108 108 def __nonzero__(self):
109 109 return True
110 110
111 111 def __len__(self):
112 112 return len(self.changelog)
113 113
114 114 def __iter__(self):
115 115 for i in xrange(len(self)):
116 116 yield i
117 117
118 118 def url(self):
119 119 return 'file:' + self.root
120 120
121 121 def hook(self, name, throw=False, **args):
122 122 return hook.hook(self.ui, self, name, throw, **args)
123 123
124 124 tag_disallowed = ':\r\n'
125 125
126 126 def _tag(self, names, node, message, local, user, date, parent=None,
127 127 extra={}):
128 128 use_dirstate = parent is None
129 129
130 130 if isinstance(names, str):
131 131 allchars = names
132 132 names = (names,)
133 133 else:
134 134 allchars = ''.join(names)
135 135 for c in self.tag_disallowed:
136 136 if c in allchars:
137 137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138 138
139 139 for name in names:
140 140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 141 local=local)
142 142
143 143 def writetags(fp, names, munge, prevtags):
144 144 fp.seek(0, 2)
145 145 if prevtags and prevtags[-1] != '\n':
146 146 fp.write('\n')
147 147 for name in names:
148 148 m = munge and munge(name) or name
149 149 if self._tagstypecache and name in self._tagstypecache:
150 150 old = self.tagscache.get(name, nullid)
151 151 fp.write('%s %s\n' % (hex(old), m))
152 152 fp.write('%s %s\n' % (hex(node), m))
153 153 fp.close()
154 154
155 155 prevtags = ''
156 156 if local:
157 157 try:
158 158 fp = self.opener('localtags', 'r+')
159 159 except IOError, err:
160 160 fp = self.opener('localtags', 'a')
161 161 else:
162 162 prevtags = fp.read()
163 163
164 164 # local tags are stored in the current charset
165 165 writetags(fp, names, None, prevtags)
166 166 for name in names:
167 167 self.hook('tag', node=hex(node), tag=name, local=local)
168 168 return
169 169
170 170 if use_dirstate:
171 171 try:
172 172 fp = self.wfile('.hgtags', 'rb+')
173 173 except IOError, err:
174 174 fp = self.wfile('.hgtags', 'ab')
175 175 else:
176 176 prevtags = fp.read()
177 177 else:
178 178 try:
179 179 prevtags = self.filectx('.hgtags', parent).data()
180 except revlog.LookupError:
180 except error.LookupError:
181 181 pass
182 182 fp = self.wfile('.hgtags', 'wb')
183 183 if prevtags:
184 184 fp.write(prevtags)
185 185
186 186 # committed tags are stored in UTF-8
187 187 writetags(fp, names, util.fromlocal, prevtags)
188 188
189 189 if use_dirstate and '.hgtags' not in self.dirstate:
190 190 self.add(['.hgtags'])
191 191
192 192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 193 extra=extra)
194 194
195 195 for name in names:
196 196 self.hook('tag', node=hex(node), tag=name, local=local)
197 197
198 198 return tagnode
199 199
200 200 def tag(self, names, node, message, local, user, date):
201 201 '''tag a revision with one or more symbolic names.
202 202
203 203 names is a list of strings or, when adding a single tag, names may be a
204 204 string.
205 205
206 206 if local is True, the tags are stored in a per-repository file.
207 207 otherwise, they are stored in the .hgtags file, and a new
208 208 changeset is committed with the change.
209 209
210 210 keyword arguments:
211 211
212 212 local: whether to store tags in non-version-controlled file
213 213 (default False)
214 214
215 215 message: commit message to use if committing
216 216
217 217 user: name of user to use if committing
218 218
219 219 date: date tuple to use if committing'''
220 220
221 221 for x in self.status()[:5]:
222 222 if '.hgtags' in x:
223 223 raise util.Abort(_('working copy of .hgtags is changed '
224 224 '(please commit .hgtags manually)'))
225 225
226 226 self._tag(names, node, message, local, user, date)
227 227
228 228 def tags(self):
229 229 '''return a mapping of tag to node'''
230 230 if self.tagscache:
231 231 return self.tagscache
232 232
233 233 globaltags = {}
234 234 tagtypes = {}
235 235
236 236 def readtags(lines, fn, tagtype):
237 237 filetags = {}
238 238 count = 0
239 239
240 240 def warn(msg):
241 241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242 242
243 243 for l in lines:
244 244 count += 1
245 245 if not l:
246 246 continue
247 247 s = l.split(" ", 1)
248 248 if len(s) != 2:
249 249 warn(_("cannot parse entry"))
250 250 continue
251 251 node, key = s
252 252 key = util.tolocal(key.strip()) # stored in UTF-8
253 253 try:
254 254 bin_n = bin(node)
255 255 except TypeError:
256 256 warn(_("node '%s' is not well formed") % node)
257 257 continue
258 258 if bin_n not in self.changelog.nodemap:
259 259 warn(_("tag '%s' refers to unknown node") % key)
260 260 continue
261 261
262 262 h = []
263 263 if key in filetags:
264 264 n, h = filetags[key]
265 265 h.append(n)
266 266 filetags[key] = (bin_n, h)
267 267
268 268 for k, nh in filetags.iteritems():
269 269 if k not in globaltags:
270 270 globaltags[k] = nh
271 271 tagtypes[k] = tagtype
272 272 continue
273 273
274 274 # we prefer the global tag if:
275 275 # it supercedes us OR
276 276 # mutual supercedes and it has a higher rank
277 277 # otherwise we win because we're tip-most
278 278 an, ah = nh
279 279 bn, bh = globaltags[k]
280 280 if (bn != an and an in bh and
281 281 (bn not in ah or len(bh) > len(ah))):
282 282 an = bn
283 283 ah.extend([n for n in bh if n not in ah])
284 284 globaltags[k] = an, ah
285 285 tagtypes[k] = tagtype
286 286
287 287 # read the tags file from each head, ending with the tip
288 288 f = None
289 289 for rev, node, fnode in self._hgtagsnodes():
290 290 f = (f and f.filectx(fnode) or
291 291 self.filectx('.hgtags', fileid=fnode))
292 292 readtags(f.data().splitlines(), f, "global")
293 293
294 294 try:
295 295 data = util.fromlocal(self.opener("localtags").read())
296 296 # localtags are stored in the local character set
297 297 # while the internal tag table is stored in UTF-8
298 298 readtags(data.splitlines(), "localtags", "local")
299 299 except IOError:
300 300 pass
301 301
302 302 self.tagscache = {}
303 303 self._tagstypecache = {}
304 304 for k, nh in globaltags.iteritems():
305 305 n = nh[0]
306 306 if n != nullid:
307 307 self.tagscache[k] = n
308 308 self._tagstypecache[k] = tagtypes[k]
309 309 self.tagscache['tip'] = self.changelog.tip()
310 310 return self.tagscache
311 311
312 312 def tagtype(self, tagname):
313 313 '''
314 314 return the type of the given tag. result can be:
315 315
316 316 'local' : a local tag
317 317 'global' : a global tag
318 318 None : tag does not exist
319 319 '''
320 320
321 321 self.tags()
322 322
323 323 return self._tagstypecache.get(tagname)
324 324
325 325 def _hgtagsnodes(self):
326 326 heads = self.heads()
327 327 heads.reverse()
328 328 last = {}
329 329 ret = []
330 330 for node in heads:
331 331 c = self[node]
332 332 rev = c.rev()
333 333 try:
334 334 fnode = c.filenode('.hgtags')
335 except revlog.LookupError:
335 except error.LookupError:
336 336 continue
337 337 ret.append((rev, node, fnode))
338 338 if fnode in last:
339 339 ret[last[fnode]] = None
340 340 last[fnode] = len(ret) - 1
341 341 return [item for item in ret if item]
342 342
343 343 def tagslist(self):
344 344 '''return a list of tags ordered by revision'''
345 345 l = []
346 346 for t, n in self.tags().iteritems():
347 347 try:
348 348 r = self.changelog.rev(n)
349 349 except:
350 350 r = -2 # sort to the beginning of the list if unknown
351 351 l.append((r, t, n))
352 352 return [(t, n) for r, t, n in util.sort(l)]
353 353
354 354 def nodetags(self, node):
355 355 '''return the tags associated with a node'''
356 356 if not self.nodetagscache:
357 357 self.nodetagscache = {}
358 358 for t, n in self.tags().iteritems():
359 359 self.nodetagscache.setdefault(n, []).append(t)
360 360 return self.nodetagscache.get(node, [])
361 361
362 362 def _branchtags(self, partial, lrev):
363 363 tiprev = len(self) - 1
364 364 if lrev != tiprev:
365 365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 367
368 368 return partial
369 369
370 370 def branchtags(self):
371 371 tip = self.changelog.tip()
372 372 if self.branchcache is not None and self._branchcachetip == tip:
373 373 return self.branchcache
374 374
375 375 oldtip = self._branchcachetip
376 376 self._branchcachetip = tip
377 377 if self.branchcache is None:
378 378 self.branchcache = {} # avoid recursion in changectx
379 379 else:
380 380 self.branchcache.clear() # keep using the same dict
381 381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 382 partial, last, lrev = self._readbranchcache()
383 383 else:
384 384 lrev = self.changelog.rev(oldtip)
385 385 partial = self._ubranchcache
386 386
387 387 self._branchtags(partial, lrev)
388 388
389 389 # the branch cache is stored on disk as UTF-8, but in the local
390 390 # charset internally
391 391 for k, v in partial.iteritems():
392 392 self.branchcache[util.tolocal(k)] = v
393 393 self._ubranchcache = partial
394 394 return self.branchcache
395 395
396 396 def _readbranchcache(self):
397 397 partial = {}
398 398 try:
399 399 f = self.opener("branch.cache")
400 400 lines = f.read().split('\n')
401 401 f.close()
402 402 except (IOError, OSError):
403 403 return {}, nullid, nullrev
404 404
405 405 try:
406 406 last, lrev = lines.pop(0).split(" ", 1)
407 407 last, lrev = bin(last), int(lrev)
408 408 if lrev >= len(self) or self[lrev].node() != last:
409 409 # invalidate the cache
410 410 raise ValueError('invalidating branch cache (tip differs)')
411 411 for l in lines:
412 412 if not l: continue
413 413 node, label = l.split(" ", 1)
414 414 partial[label.strip()] = bin(node)
415 415 except (KeyboardInterrupt, util.SignalInterrupt):
416 416 raise
417 417 except Exception, inst:
418 418 if self.ui.debugflag:
419 419 self.ui.warn(str(inst), '\n')
420 420 partial, last, lrev = {}, nullid, nullrev
421 421 return partial, last, lrev
422 422
423 423 def _writebranchcache(self, branches, tip, tiprev):
424 424 try:
425 425 f = self.opener("branch.cache", "w", atomictemp=True)
426 426 f.write("%s %s\n" % (hex(tip), tiprev))
427 427 for label, node in branches.iteritems():
428 428 f.write("%s %s\n" % (hex(node), label))
429 429 f.rename()
430 430 except (IOError, OSError):
431 431 pass
432 432
433 433 def _updatebranchcache(self, partial, start, end):
434 434 for r in xrange(start, end):
435 435 c = self[r]
436 436 b = c.branch()
437 437 partial[b] = c.node()
438 438
439 439 def lookup(self, key):
440 440 if isinstance(key, int):
441 441 return self.changelog.node(key)
442 442 elif key == '.':
443 443 return self.dirstate.parents()[0]
444 444 elif key == 'null':
445 445 return nullid
446 446 elif key == 'tip':
447 447 return self.changelog.tip()
448 448 n = self.changelog._match(key)
449 449 if n:
450 450 return n
451 451 if key in self.tags():
452 452 return self.tags()[key]
453 453 if key in self.branchtags():
454 454 return self.branchtags()[key]
455 455 n = self.changelog._partialmatch(key)
456 456 if n:
457 457 return n
458 458 try:
459 459 if len(key) == 20:
460 460 key = hex(key)
461 461 except:
462 462 pass
463 463 raise repo.RepoError(_("unknown revision '%s'") % key)
464 464
465 465 def local(self):
466 466 return True
467 467
468 468 def join(self, f):
469 469 return os.path.join(self.path, f)
470 470
471 471 def wjoin(self, f):
472 472 return os.path.join(self.root, f)
473 473
474 474 def rjoin(self, f):
475 475 return os.path.join(self.root, util.pconvert(f))
476 476
477 477 def file(self, f):
478 478 if f[0] == '/':
479 479 f = f[1:]
480 480 return filelog.filelog(self.sopener, f)
481 481
482 482 def changectx(self, changeid):
483 483 return self[changeid]
484 484
485 485 def parents(self, changeid=None):
486 486 '''get list of changectxs for parents of changeid'''
487 487 return self[changeid].parents()
488 488
489 489 def filectx(self, path, changeid=None, fileid=None):
490 490 """changeid can be a changeset revision, node, or tag.
491 491 fileid can be a file revision or node."""
492 492 return context.filectx(self, path, changeid, fileid)
493 493
494 494 def getcwd(self):
495 495 return self.dirstate.getcwd()
496 496
497 497 def pathto(self, f, cwd=None):
498 498 return self.dirstate.pathto(f, cwd)
499 499
500 500 def wfile(self, f, mode='r'):
501 501 return self.wopener(f, mode)
502 502
503 503 def _link(self, f):
504 504 return os.path.islink(self.wjoin(f))
505 505
506 506 def _filter(self, filter, filename, data):
507 507 if filter not in self.filterpats:
508 508 l = []
509 509 for pat, cmd in self.ui.configitems(filter):
510 510 if cmd == '!':
511 511 continue
512 512 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 513 fn = None
514 514 params = cmd
515 515 for name, filterfn in self._datafilters.iteritems():
516 516 if cmd.startswith(name):
517 517 fn = filterfn
518 518 params = cmd[len(name):].lstrip()
519 519 break
520 520 if not fn:
521 521 fn = lambda s, c, **kwargs: util.filter(s, c)
522 522 # Wrap old filters not supporting keyword arguments
523 523 if not inspect.getargspec(fn)[2]:
524 524 oldfn = fn
525 525 fn = lambda s, c, **kwargs: oldfn(s, c)
526 526 l.append((mf, fn, params))
527 527 self.filterpats[filter] = l
528 528
529 529 for mf, fn, cmd in self.filterpats[filter]:
530 530 if mf(filename):
531 531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 533 break
534 534
535 535 return data
536 536
537 537 def adddatafilter(self, name, filter):
538 538 self._datafilters[name] = filter
539 539
540 540 def wread(self, filename):
541 541 if self._link(filename):
542 542 data = os.readlink(self.wjoin(filename))
543 543 else:
544 544 data = self.wopener(filename, 'r').read()
545 545 return self._filter("encode", filename, data)
546 546
547 547 def wwrite(self, filename, data, flags):
548 548 data = self._filter("decode", filename, data)
549 549 try:
550 550 os.unlink(self.wjoin(filename))
551 551 except OSError:
552 552 pass
553 553 if 'l' in flags:
554 554 self.wopener.symlink(data, filename)
555 555 else:
556 556 self.wopener(filename, 'w').write(data)
557 557 if 'x' in flags:
558 558 util.set_flags(self.wjoin(filename), False, True)
559 559
560 560 def wwritedata(self, filename, data):
561 561 return self._filter("decode", filename, data)
562 562
563 563 def transaction(self):
564 564 if self._transref and self._transref():
565 565 return self._transref().nest()
566 566
567 567 # abort here if the journal already exists
568 568 if os.path.exists(self.sjoin("journal")):
569 569 raise repo.RepoError(_("journal already exists - run hg recover"))
570 570
571 571 # save dirstate for rollback
572 572 try:
573 573 ds = self.opener("dirstate").read()
574 574 except IOError:
575 575 ds = ""
576 576 self.opener("journal.dirstate", "w").write(ds)
577 577 self.opener("journal.branch", "w").write(self.dirstate.branch())
578 578
579 579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 581 (self.join("journal.branch"), self.join("undo.branch"))]
582 582 tr = transaction.transaction(self.ui.warn, self.sopener,
583 583 self.sjoin("journal"),
584 584 aftertrans(renames),
585 585 self.store.createmode)
586 586 self._transref = weakref.ref(tr)
587 587 return tr
588 588
589 589 def recover(self):
590 590 l = self.lock()
591 591 try:
592 592 if os.path.exists(self.sjoin("journal")):
593 593 self.ui.status(_("rolling back interrupted transaction\n"))
594 594 transaction.rollback(self.sopener, self.sjoin("journal"))
595 595 self.invalidate()
596 596 return True
597 597 else:
598 598 self.ui.warn(_("no interrupted transaction available\n"))
599 599 return False
600 600 finally:
601 601 del l
602 602
603 603 def rollback(self):
604 604 wlock = lock = None
605 605 try:
606 606 wlock = self.wlock()
607 607 lock = self.lock()
608 608 if os.path.exists(self.sjoin("undo")):
609 609 self.ui.status(_("rolling back last transaction\n"))
610 610 transaction.rollback(self.sopener, self.sjoin("undo"))
611 611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 612 try:
613 613 branch = self.opener("undo.branch").read()
614 614 self.dirstate.setbranch(branch)
615 615 except IOError:
616 616 self.ui.warn(_("Named branch could not be reset, "
617 617 "current branch still is: %s\n")
618 618 % util.tolocal(self.dirstate.branch()))
619 619 self.invalidate()
620 620 self.dirstate.invalidate()
621 621 else:
622 622 self.ui.warn(_("no rollback information available\n"))
623 623 finally:
624 624 del lock, wlock
625 625
626 626 def invalidate(self):
627 627 for a in "changelog manifest".split():
628 628 if a in self.__dict__:
629 629 delattr(self, a)
630 630 self.tagscache = None
631 631 self._tagstypecache = None
632 632 self.nodetagscache = None
633 633 self.branchcache = None
634 634 self._ubranchcache = None
635 635 self._branchcachetip = None
636 636
637 637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 638 try:
639 639 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 640 except lock.LockHeld, inst:
641 641 if not wait:
642 642 raise
643 643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 644 (desc, inst.locker))
645 645 # default to 600 seconds timeout
646 646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 647 releasefn, desc=desc)
648 648 if acquirefn:
649 649 acquirefn()
650 650 return l
651 651
652 652 def lock(self, wait=True):
653 653 if self._lockref and self._lockref():
654 654 return self._lockref()
655 655
656 656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 657 _('repository %s') % self.origroot)
658 658 self._lockref = weakref.ref(l)
659 659 return l
660 660
661 661 def wlock(self, wait=True):
662 662 if self._wlockref and self._wlockref():
663 663 return self._wlockref()
664 664
665 665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 666 self.dirstate.invalidate, _('working directory of %s') %
667 667 self.origroot)
668 668 self._wlockref = weakref.ref(l)
669 669 return l
670 670
671 671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 672 """
673 673 commit an individual file as part of a larger transaction
674 674 """
675 675
676 676 fn = fctx.path()
677 677 t = fctx.data()
678 678 fl = self.file(fn)
679 679 fp1 = manifest1.get(fn, nullid)
680 680 fp2 = manifest2.get(fn, nullid)
681 681
682 682 meta = {}
683 683 cp = fctx.renamed()
684 684 if cp and cp[0] != fn:
685 685 # Mark the new revision of this file as a copy of another
686 686 # file. This copy data will effectively act as a parent
687 687 # of this new revision. If this is a merge, the first
688 688 # parent will be the nullid (meaning "look up the copy data")
689 689 # and the second one will be the other parent. For example:
690 690 #
691 691 # 0 --- 1 --- 3 rev1 changes file foo
692 692 # \ / rev2 renames foo to bar and changes it
693 693 # \- 2 -/ rev3 should have bar with all changes and
694 694 # should record that bar descends from
695 695 # bar in rev2 and foo in rev1
696 696 #
697 697 # this allows this merge to succeed:
698 698 #
699 699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 700 # \ / merging rev3 and rev4 should use bar@rev2
701 701 # \- 2 --- 4 as the merge base
702 702 #
703 703
704 704 cf = cp[0]
705 705 cr = manifest1.get(cf)
706 706 nfp = fp2
707 707
708 708 if manifest2: # branch merge
709 709 if fp2 == nullid: # copied on remote side
710 710 if fp1 != nullid or cf in manifest2:
711 711 cr = manifest2[cf]
712 712 nfp = fp1
713 713
714 714 # find source in nearest ancestor if we've lost track
715 715 if not cr:
716 716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
717 717 (fn, cf))
718 718 for a in self['.'].ancestors():
719 719 if cf in a:
720 720 cr = a[cf].filenode()
721 721 break
722 722
723 723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
724 724 meta["copy"] = cf
725 725 meta["copyrev"] = hex(cr)
726 726 fp1, fp2 = nullid, nfp
727 727 elif fp2 != nullid:
728 728 # is one parent an ancestor of the other?
729 729 fpa = fl.ancestor(fp1, fp2)
730 730 if fpa == fp1:
731 731 fp1, fp2 = fp2, nullid
732 732 elif fpa == fp2:
733 733 fp2 = nullid
734 734
735 735 # is the file unmodified from the parent? report existing entry
736 736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
737 737 return fp1
738 738
739 739 changelist.append(fn)
740 740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
741 741
742 742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
743 743 if p1 is None:
744 744 p1, p2 = self.dirstate.parents()
745 745 return self.commit(files=files, text=text, user=user, date=date,
746 746 p1=p1, p2=p2, extra=extra, empty_ok=True)
747 747
748 748 def commit(self, files=None, text="", user=None, date=None,
749 749 match=None, force=False, force_editor=False,
750 750 p1=None, p2=None, extra={}, empty_ok=False):
751 751 wlock = lock = None
752 752 if files:
753 753 files = util.unique(files)
754 754 try:
755 755 wlock = self.wlock()
756 756 lock = self.lock()
757 757 use_dirstate = (p1 is None) # not rawcommit
758 758
759 759 if use_dirstate:
760 760 p1, p2 = self.dirstate.parents()
761 761 update_dirstate = True
762 762
763 763 if (not force and p2 != nullid and
764 764 (match and (match.files() or match.anypats()))):
765 765 raise util.Abort(_('cannot partially commit a merge '
766 766 '(do not specify files or patterns)'))
767 767
768 768 if files:
769 769 modified, removed = [], []
770 770 for f in files:
771 771 s = self.dirstate[f]
772 772 if s in 'nma':
773 773 modified.append(f)
774 774 elif s == 'r':
775 775 removed.append(f)
776 776 else:
777 777 self.ui.warn(_("%s not tracked!\n") % f)
778 778 changes = [modified, [], removed, [], []]
779 779 else:
780 780 changes = self.status(match=match)
781 781 else:
782 782 p1, p2 = p1, p2 or nullid
783 783 update_dirstate = (self.dirstate.parents()[0] == p1)
784 784 changes = [files, [], [], [], []]
785 785
786 786 ms = merge_.mergestate(self)
787 787 for f in changes[0]:
788 788 if f in ms and ms[f] == 'u':
789 789 raise util.Abort(_("unresolved merge conflicts "
790 790 "(see hg resolve)"))
791 791 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 792 extra, changes)
793 793 return self._commitctx(wctx, force, force_editor, empty_ok,
794 794 use_dirstate, update_dirstate)
795 795 finally:
796 796 del lock, wlock
797 797
798 798 def commitctx(self, ctx):
799 799 """Add a new revision to current repository.
800 800
801 801 Revision information is passed in the context.memctx argument.
802 802 commitctx() does not touch the working directory.
803 803 """
804 804 wlock = lock = None
805 805 try:
806 806 wlock = self.wlock()
807 807 lock = self.lock()
808 808 return self._commitctx(ctx, force=True, force_editor=False,
809 809 empty_ok=True, use_dirstate=False,
810 810 update_dirstate=False)
811 811 finally:
812 812 del lock, wlock
813 813
814 814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
815 815 use_dirstate=True, update_dirstate=True):
816 816 tr = None
817 817 valid = 0 # don't save the dirstate if this isn't set
818 818 try:
819 819 commit = util.sort(wctx.modified() + wctx.added())
820 820 remove = wctx.removed()
821 821 extra = wctx.extra().copy()
822 822 branchname = extra['branch']
823 823 user = wctx.user()
824 824 text = wctx.description()
825 825
826 826 p1, p2 = [p.node() for p in wctx.parents()]
827 827 c1 = self.changelog.read(p1)
828 828 c2 = self.changelog.read(p2)
829 829 m1 = self.manifest.read(c1[0]).copy()
830 830 m2 = self.manifest.read(c2[0])
831 831
832 832 if use_dirstate:
833 833 oldname = c1[5].get("branch") # stored in UTF-8
834 834 if (not commit and not remove and not force and p2 == nullid
835 835 and branchname == oldname):
836 836 self.ui.status(_("nothing changed\n"))
837 837 return None
838 838
839 839 xp1 = hex(p1)
840 840 if p2 == nullid: xp2 = ''
841 841 else: xp2 = hex(p2)
842 842
843 843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844 844
845 845 tr = self.transaction()
846 846 trp = weakref.proxy(tr)
847 847
848 848 # check in files
849 849 new = {}
850 850 changed = []
851 851 linkrev = len(self)
852 852 for f in commit:
853 853 self.ui.note(f + "\n")
854 854 try:
855 855 fctx = wctx.filectx(f)
856 856 newflags = fctx.flags()
857 857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
858 858 if ((not changed or changed[-1] != f) and
859 859 m2.get(f) != new[f]):
860 860 # mention the file in the changelog if some
861 861 # flag changed, even if there was no content
862 862 # change.
863 863 if m1.flags(f) != newflags:
864 864 changed.append(f)
865 865 m1.set(f, newflags)
866 866 if use_dirstate:
867 867 self.dirstate.normal(f)
868 868
869 869 except (OSError, IOError):
870 870 if use_dirstate:
871 871 self.ui.warn(_("trouble committing %s!\n") % f)
872 872 raise
873 873 else:
874 874 remove.append(f)
875 875
876 876 updated, added = [], []
877 877 for f in util.sort(changed):
878 878 if f in m1 or f in m2:
879 879 updated.append(f)
880 880 else:
881 881 added.append(f)
882 882
883 883 # update manifest
884 884 m1.update(new)
885 885 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
886 886 removed1 = []
887 887
888 888 for f in removed:
889 889 if f in m1:
890 890 del m1[f]
891 891 removed1.append(f)
892 892 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 893 (new, removed1))
894 894
895 895 # add changeset
896 896 if (not empty_ok and not text) or force_editor:
897 897 edittext = []
898 898 if text:
899 899 edittext.append(text)
900 900 edittext.append("")
901 901 edittext.append("") # Empty line between message and comments.
902 902 edittext.append(_("HG: Enter commit message."
903 903 " Lines beginning with 'HG:' are removed."))
904 904 edittext.append("HG: --")
905 905 edittext.append("HG: user: %s" % user)
906 906 if p2 != nullid:
907 907 edittext.append("HG: branch merge")
908 908 if branchname:
909 909 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 910 edittext.extend(["HG: added %s" % f for f in added])
911 911 edittext.extend(["HG: changed %s" % f for f in updated])
912 912 edittext.extend(["HG: removed %s" % f for f in removed])
913 913 if not added and not updated and not removed:
914 914 edittext.append("HG: no files changed")
915 915 edittext.append("")
916 916 # run editor in the repository root
917 917 olddir = os.getcwd()
918 918 os.chdir(self.root)
919 919 text = self.ui.edit("\n".join(edittext), user)
920 920 os.chdir(olddir)
921 921
922 922 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 923 while lines and not lines[0]:
924 924 del lines[0]
925 925 if not lines and use_dirstate:
926 926 raise util.Abort(_("empty commit message"))
927 927 text = '\n'.join(lines)
928 928
929 929 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 930 user, wctx.date(), extra)
931 931 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 932 parent2=xp2)
933 933 tr.close()
934 934
935 935 if self.branchcache:
936 936 self.branchtags()
937 937
938 938 if use_dirstate or update_dirstate:
939 939 self.dirstate.setparents(n)
940 940 if use_dirstate:
941 941 for f in removed:
942 942 self.dirstate.forget(f)
943 943 valid = 1 # our dirstate updates are complete
944 944
945 945 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 946 return n
947 947 finally:
948 948 if not valid: # don't save our updated dirstate
949 949 self.dirstate.invalidate()
950 950 del tr
951 951
952 952 def walk(self, match, node=None):
953 953 '''
954 954 walk recursively through the directory tree or a given
955 955 changeset, finding all files matched by the match
956 956 function
957 957 '''
958 958 return self[node].walk(match)
959 959
960 960 def status(self, node1='.', node2=None, match=None,
961 961 ignored=False, clean=False, unknown=False):
962 962 """return status of files between two nodes or node and working directory
963 963
964 964 If node1 is None, use the first dirstate parent instead.
965 965 If node2 is None, compare node1 with working directory.
966 966 """
967 967
968 968 def mfmatches(ctx):
969 969 mf = ctx.manifest().copy()
970 970 for fn in mf.keys():
971 971 if not match(fn):
972 972 del mf[fn]
973 973 return mf
974 974
975 975 if isinstance(node1, context.changectx):
976 976 ctx1 = node1
977 977 else:
978 978 ctx1 = self[node1]
979 979 if isinstance(node2, context.changectx):
980 980 ctx2 = node2
981 981 else:
982 982 ctx2 = self[node2]
983 983
984 984 working = ctx2.rev() is None
985 985 parentworking = working and ctx1 == self['.']
986 986 match = match or match_.always(self.root, self.getcwd())
987 987 listignored, listclean, listunknown = ignored, clean, unknown
988 988
989 989 # load earliest manifest first for caching reasons
990 990 if not working and ctx2.rev() < ctx1.rev():
991 991 ctx2.manifest()
992 992
993 993 if not parentworking:
994 994 def bad(f, msg):
995 995 if f not in ctx1:
996 996 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 997 return False
998 998 match.bad = bad
999 999
1000 1000 if working: # we need to scan the working dir
1001 1001 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 1002 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 1003
1004 1004 # check for any possibly clean files
1005 1005 if parentworking and cmp:
1006 1006 fixup = []
1007 1007 # do a full compare of any files that might have changed
1008 1008 for f in cmp:
1009 1009 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 1010 or ctx1[f].cmp(ctx2[f].data())):
1011 1011 modified.append(f)
1012 1012 else:
1013 1013 fixup.append(f)
1014 1014
1015 1015 if listclean:
1016 1016 clean += fixup
1017 1017
1018 1018 # update dirstate for files that are actually clean
1019 1019 if fixup:
1020 1020 wlock = None
1021 1021 try:
1022 1022 try:
1023 1023 wlock = self.wlock(False)
1024 1024 for f in fixup:
1025 1025 self.dirstate.normal(f)
1026 1026 except lock.LockException:
1027 1027 pass
1028 1028 finally:
1029 1029 del wlock
1030 1030
1031 1031 if not parentworking:
1032 1032 mf1 = mfmatches(ctx1)
1033 1033 if working:
1034 1034 # we are comparing working dir against non-parent
1035 1035 # generate a pseudo-manifest for the working dir
1036 1036 mf2 = mfmatches(self['.'])
1037 1037 for f in cmp + modified + added:
1038 1038 mf2[f] = None
1039 1039 mf2.set(f, ctx2.flags(f))
1040 1040 for f in removed:
1041 1041 if f in mf2:
1042 1042 del mf2[f]
1043 1043 else:
1044 1044 # we are comparing two revisions
1045 1045 deleted, unknown, ignored = [], [], []
1046 1046 mf2 = mfmatches(ctx2)
1047 1047
1048 1048 modified, added, clean = [], [], []
1049 1049 for fn in mf2:
1050 1050 if fn in mf1:
1051 1051 if (mf1.flags(fn) != mf2.flags(fn) or
1052 1052 (mf1[fn] != mf2[fn] and
1053 1053 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 1054 modified.append(fn)
1055 1055 elif listclean:
1056 1056 clean.append(fn)
1057 1057 del mf1[fn]
1058 1058 else:
1059 1059 added.append(fn)
1060 1060 removed = mf1.keys()
1061 1061
1062 1062 r = modified, added, removed, deleted, unknown, ignored, clean
1063 1063 [l.sort() for l in r]
1064 1064 return r
1065 1065
1066 1066 def add(self, list):
1067 1067 wlock = self.wlock()
1068 1068 try:
1069 1069 rejected = []
1070 1070 for f in list:
1071 1071 p = self.wjoin(f)
1072 1072 try:
1073 1073 st = os.lstat(p)
1074 1074 except:
1075 1075 self.ui.warn(_("%s does not exist!\n") % f)
1076 1076 rejected.append(f)
1077 1077 continue
1078 1078 if st.st_size > 10000000:
1079 1079 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 1080 " performance problems\n"
1081 1081 "(use 'hg revert %s' to unadd the file)\n")
1082 1082 % (f, f))
1083 1083 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 1084 self.ui.warn(_("%s not added: only files and symlinks "
1085 1085 "supported currently\n") % f)
1086 1086 rejected.append(p)
1087 1087 elif self.dirstate[f] in 'amn':
1088 1088 self.ui.warn(_("%s already tracked!\n") % f)
1089 1089 elif self.dirstate[f] == 'r':
1090 1090 self.dirstate.normallookup(f)
1091 1091 else:
1092 1092 self.dirstate.add(f)
1093 1093 return rejected
1094 1094 finally:
1095 1095 del wlock
1096 1096
1097 1097 def forget(self, list):
1098 1098 wlock = self.wlock()
1099 1099 try:
1100 1100 for f in list:
1101 1101 if self.dirstate[f] != 'a':
1102 1102 self.ui.warn(_("%s not added!\n") % f)
1103 1103 else:
1104 1104 self.dirstate.forget(f)
1105 1105 finally:
1106 1106 del wlock
1107 1107
1108 1108 def remove(self, list, unlink=False):
1109 1109 wlock = None
1110 1110 try:
1111 1111 if unlink:
1112 1112 for f in list:
1113 1113 try:
1114 1114 util.unlink(self.wjoin(f))
1115 1115 except OSError, inst:
1116 1116 if inst.errno != errno.ENOENT:
1117 1117 raise
1118 1118 wlock = self.wlock()
1119 1119 for f in list:
1120 1120 if unlink and os.path.exists(self.wjoin(f)):
1121 1121 self.ui.warn(_("%s still exists!\n") % f)
1122 1122 elif self.dirstate[f] == 'a':
1123 1123 self.dirstate.forget(f)
1124 1124 elif f not in self.dirstate:
1125 1125 self.ui.warn(_("%s not tracked!\n") % f)
1126 1126 else:
1127 1127 self.dirstate.remove(f)
1128 1128 finally:
1129 1129 del wlock
1130 1130
1131 1131 def undelete(self, list):
1132 1132 wlock = None
1133 1133 try:
1134 1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 1135 for p in self.dirstate.parents() if p != nullid]
1136 1136 wlock = self.wlock()
1137 1137 for f in list:
1138 1138 if self.dirstate[f] != 'r':
1139 1139 self.ui.warn(_("%s not removed!\n") % f)
1140 1140 else:
1141 1141 m = f in manifests[0] and manifests[0] or manifests[1]
1142 1142 t = self.file(f).read(m[f])
1143 1143 self.wwrite(f, t, m.flags(f))
1144 1144 self.dirstate.normal(f)
1145 1145 finally:
1146 1146 del wlock
1147 1147
1148 1148 def copy(self, source, dest):
1149 1149 wlock = None
1150 1150 try:
1151 1151 p = self.wjoin(dest)
1152 1152 if not (os.path.exists(p) or os.path.islink(p)):
1153 1153 self.ui.warn(_("%s does not exist!\n") % dest)
1154 1154 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 1155 self.ui.warn(_("copy failed: %s is not a file or a "
1156 1156 "symbolic link\n") % dest)
1157 1157 else:
1158 1158 wlock = self.wlock()
1159 1159 if self.dirstate[dest] in '?r':
1160 1160 self.dirstate.add(dest)
1161 1161 self.dirstate.copy(source, dest)
1162 1162 finally:
1163 1163 del wlock
1164 1164
1165 1165 def heads(self, start=None):
1166 1166 heads = self.changelog.heads(start)
1167 1167 # sort the output in rev descending order
1168 1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 1169 return [n for (r, n) in util.sort(heads)]
1170 1170
1171 1171 def branchheads(self, branch=None, start=None):
1172 1172 if branch is None:
1173 1173 branch = self[None].branch()
1174 1174 branches = self.branchtags()
1175 1175 if branch not in branches:
1176 1176 return []
1177 1177 # The basic algorithm is this:
1178 1178 #
1179 1179 # Start from the branch tip since there are no later revisions that can
1180 1180 # possibly be in this branch, and the tip is a guaranteed head.
1181 1181 #
1182 1182 # Remember the tip's parents as the first ancestors, since these by
1183 1183 # definition are not heads.
1184 1184 #
1185 1185 # Step backwards from the brach tip through all the revisions. We are
1186 1186 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 1187 # nodes in reverse topological order (children before parents).
1188 1188 #
1189 1189 # If a revision is one of the ancestors of a head then we can toss it
1190 1190 # out of the ancestors set (we've already found it and won't be
1191 1191 # visiting it again) and put its parents in the ancestors set.
1192 1192 #
1193 1193 # Otherwise, if a revision is in the branch it's another head, since it
1194 1194 # wasn't in the ancestor list of an existing head. So add it to the
1195 1195 # head list, and add its parents to the ancestor list.
1196 1196 #
1197 1197 # If it is not in the branch ignore it.
1198 1198 #
1199 1199 # Once we have a list of heads, use nodesbetween to filter out all the
1200 1200 # heads that cannot be reached from startrev. There may be a more
1201 1201 # efficient way to do this as part of the previous algorithm.
1202 1202
1203 1203 set = util.set
1204 1204 heads = [self.changelog.rev(branches[branch])]
1205 1205 # Don't care if ancestors contains nullrev or not.
1206 1206 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 1207 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 1208 if rev in ancestors:
1209 1209 ancestors.update(self.changelog.parentrevs(rev))
1210 1210 ancestors.remove(rev)
1211 1211 elif self[rev].branch() == branch:
1212 1212 heads.append(rev)
1213 1213 ancestors.update(self.changelog.parentrevs(rev))
1214 1214 heads = [self.changelog.node(rev) for rev in heads]
1215 1215 if start is not None:
1216 1216 heads = self.changelog.nodesbetween([start], heads)[2]
1217 1217 return heads
1218 1218
1219 1219 def branches(self, nodes):
1220 1220 if not nodes:
1221 1221 nodes = [self.changelog.tip()]
1222 1222 b = []
1223 1223 for n in nodes:
1224 1224 t = n
1225 1225 while 1:
1226 1226 p = self.changelog.parents(n)
1227 1227 if p[1] != nullid or p[0] == nullid:
1228 1228 b.append((t, n, p[0], p[1]))
1229 1229 break
1230 1230 n = p[0]
1231 1231 return b
1232 1232
1233 1233 def between(self, pairs):
1234 1234 r = []
1235 1235
1236 1236 for top, bottom in pairs:
1237 1237 n, l, i = top, [], 0
1238 1238 f = 1
1239 1239
1240 1240 while n != bottom:
1241 1241 p = self.changelog.parents(n)[0]
1242 1242 if i == f:
1243 1243 l.append(n)
1244 1244 f = f * 2
1245 1245 n = p
1246 1246 i += 1
1247 1247
1248 1248 r.append(l)
1249 1249
1250 1250 return r
1251 1251
1252 1252 def findincoming(self, remote, base=None, heads=None, force=False):
1253 1253 """Return list of roots of the subsets of missing nodes from remote
1254 1254
1255 1255 If base dict is specified, assume that these nodes and their parents
1256 1256 exist on the remote side and that no child of a node of base exists
1257 1257 in both remote and self.
1258 1258 Furthermore base will be updated to include the nodes that exists
1259 1259 in self and remote but no children exists in self and remote.
1260 1260 If a list of heads is specified, return only nodes which are heads
1261 1261 or ancestors of these heads.
1262 1262
1263 1263 All the ancestors of base are in self and in remote.
1264 1264 All the descendants of the list returned are missing in self.
1265 1265 (and so we know that the rest of the nodes are missing in remote, see
1266 1266 outgoing)
1267 1267 """
1268 1268 return self.findcommonincoming(remote, base, heads, force)[1]
1269 1269
1270 1270 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1271 1271 """Return a tuple (common, missing roots, heads) used to identify
1272 1272 missing nodes from remote.
1273 1273
1274 1274 If base dict is specified, assume that these nodes and their parents
1275 1275 exist on the remote side and that no child of a node of base exists
1276 1276 in both remote and self.
1277 1277 Furthermore base will be updated to include the nodes that exists
1278 1278 in self and remote but no children exists in self and remote.
1279 1279 If a list of heads is specified, return only nodes which are heads
1280 1280 or ancestors of these heads.
1281 1281
1282 1282 All the ancestors of base are in self and in remote.
1283 1283 """
1284 1284 m = self.changelog.nodemap
1285 1285 search = []
1286 1286 fetch = {}
1287 1287 seen = {}
1288 1288 seenbranch = {}
1289 1289 if base == None:
1290 1290 base = {}
1291 1291
1292 1292 if not heads:
1293 1293 heads = remote.heads()
1294 1294
1295 1295 if self.changelog.tip() == nullid:
1296 1296 base[nullid] = 1
1297 1297 if heads != [nullid]:
1298 1298 return [nullid], [nullid], list(heads)
1299 1299 return [nullid], [], []
1300 1300
1301 1301 # assume we're closer to the tip than the root
1302 1302 # and start by examining the heads
1303 1303 self.ui.status(_("searching for changes\n"))
1304 1304
1305 1305 unknown = []
1306 1306 for h in heads:
1307 1307 if h not in m:
1308 1308 unknown.append(h)
1309 1309 else:
1310 1310 base[h] = 1
1311 1311
1312 1312 heads = unknown
1313 1313 if not unknown:
1314 1314 return base.keys(), [], []
1315 1315
1316 1316 req = dict.fromkeys(unknown)
1317 1317 reqcnt = 0
1318 1318
1319 1319 # search through remote branches
1320 1320 # a 'branch' here is a linear segment of history, with four parts:
1321 1321 # head, root, first parent, second parent
1322 1322 # (a branch always has two parents (or none) by definition)
1323 1323 unknown = remote.branches(unknown)
1324 1324 while unknown:
1325 1325 r = []
1326 1326 while unknown:
1327 1327 n = unknown.pop(0)
1328 1328 if n[0] in seen:
1329 1329 continue
1330 1330
1331 1331 self.ui.debug(_("examining %s:%s\n")
1332 1332 % (short(n[0]), short(n[1])))
1333 1333 if n[0] == nullid: # found the end of the branch
1334 1334 pass
1335 1335 elif n in seenbranch:
1336 1336 self.ui.debug(_("branch already found\n"))
1337 1337 continue
1338 1338 elif n[1] and n[1] in m: # do we know the base?
1339 1339 self.ui.debug(_("found incomplete branch %s:%s\n")
1340 1340 % (short(n[0]), short(n[1])))
1341 1341 search.append(n[0:2]) # schedule branch range for scanning
1342 1342 seenbranch[n] = 1
1343 1343 else:
1344 1344 if n[1] not in seen and n[1] not in fetch:
1345 1345 if n[2] in m and n[3] in m:
1346 1346 self.ui.debug(_("found new changeset %s\n") %
1347 1347 short(n[1]))
1348 1348 fetch[n[1]] = 1 # earliest unknown
1349 1349 for p in n[2:4]:
1350 1350 if p in m:
1351 1351 base[p] = 1 # latest known
1352 1352
1353 1353 for p in n[2:4]:
1354 1354 if p not in req and p not in m:
1355 1355 r.append(p)
1356 1356 req[p] = 1
1357 1357 seen[n[0]] = 1
1358 1358
1359 1359 if r:
1360 1360 reqcnt += 1
1361 1361 self.ui.debug(_("request %d: %s\n") %
1362 1362 (reqcnt, " ".join(map(short, r))))
1363 1363 for p in xrange(0, len(r), 10):
1364 1364 for b in remote.branches(r[p:p+10]):
1365 1365 self.ui.debug(_("received %s:%s\n") %
1366 1366 (short(b[0]), short(b[1])))
1367 1367 unknown.append(b)
1368 1368
1369 1369 # do binary search on the branches we found
1370 1370 while search:
1371 1371 newsearch = []
1372 1372 reqcnt += 1
1373 1373 for n, l in zip(search, remote.between(search)):
1374 1374 l.append(n[1])
1375 1375 p = n[0]
1376 1376 f = 1
1377 1377 for i in l:
1378 1378 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1379 1379 if i in m:
1380 1380 if f <= 2:
1381 1381 self.ui.debug(_("found new branch changeset %s\n") %
1382 1382 short(p))
1383 1383 fetch[p] = 1
1384 1384 base[i] = 1
1385 1385 else:
1386 1386 self.ui.debug(_("narrowed branch search to %s:%s\n")
1387 1387 % (short(p), short(i)))
1388 1388 newsearch.append((p, i))
1389 1389 break
1390 1390 p, f = i, f * 2
1391 1391 search = newsearch
1392 1392
1393 1393 # sanity check our fetch list
1394 1394 for f in fetch.keys():
1395 1395 if f in m:
1396 1396 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1397 1397
1398 1398 if base.keys() == [nullid]:
1399 1399 if force:
1400 1400 self.ui.warn(_("warning: repository is unrelated\n"))
1401 1401 else:
1402 1402 raise util.Abort(_("repository is unrelated"))
1403 1403
1404 1404 self.ui.debug(_("found new changesets starting at ") +
1405 1405 " ".join([short(f) for f in fetch]) + "\n")
1406 1406
1407 1407 self.ui.debug(_("%d total queries\n") % reqcnt)
1408 1408
1409 1409 return base.keys(), fetch.keys(), heads
1410 1410
1411 1411 def findoutgoing(self, remote, base=None, heads=None, force=False):
1412 1412 """Return list of nodes that are roots of subsets not in remote
1413 1413
1414 1414 If base dict is specified, assume that these nodes and their parents
1415 1415 exist on the remote side.
1416 1416 If a list of heads is specified, return only nodes which are heads
1417 1417 or ancestors of these heads, and return a second element which
1418 1418 contains all remote heads which get new children.
1419 1419 """
1420 1420 if base == None:
1421 1421 base = {}
1422 1422 self.findincoming(remote, base, heads, force=force)
1423 1423
1424 1424 self.ui.debug(_("common changesets up to ")
1425 1425 + " ".join(map(short, base.keys())) + "\n")
1426 1426
1427 1427 remain = dict.fromkeys(self.changelog.nodemap)
1428 1428
1429 1429 # prune everything remote has from the tree
1430 1430 del remain[nullid]
1431 1431 remove = base.keys()
1432 1432 while remove:
1433 1433 n = remove.pop(0)
1434 1434 if n in remain:
1435 1435 del remain[n]
1436 1436 for p in self.changelog.parents(n):
1437 1437 remove.append(p)
1438 1438
1439 1439 # find every node whose parents have been pruned
1440 1440 subset = []
1441 1441 # find every remote head that will get new children
1442 1442 updated_heads = {}
1443 1443 for n in remain:
1444 1444 p1, p2 = self.changelog.parents(n)
1445 1445 if p1 not in remain and p2 not in remain:
1446 1446 subset.append(n)
1447 1447 if heads:
1448 1448 if p1 in heads:
1449 1449 updated_heads[p1] = True
1450 1450 if p2 in heads:
1451 1451 updated_heads[p2] = True
1452 1452
1453 1453 # this is the set of all roots we have to push
1454 1454 if heads:
1455 1455 return subset, updated_heads.keys()
1456 1456 else:
1457 1457 return subset
1458 1458
1459 1459 def pull(self, remote, heads=None, force=False):
1460 1460 lock = self.lock()
1461 1461 try:
1462 1462 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1463 1463 force=force)
1464 1464 if fetch == [nullid]:
1465 1465 self.ui.status(_("requesting all changes\n"))
1466 1466
1467 1467 if not fetch:
1468 1468 self.ui.status(_("no changes found\n"))
1469 1469 return 0
1470 1470
1471 1471 if heads is None and remote.capable('changegroupsubset'):
1472 1472 heads = rheads
1473 1473
1474 1474 if heads is None:
1475 1475 cg = remote.changegroup(fetch, 'pull')
1476 1476 else:
1477 1477 if not remote.capable('changegroupsubset'):
1478 1478 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1479 1479 cg = remote.changegroupsubset(fetch, heads, 'pull')
1480 1480 return self.addchangegroup(cg, 'pull', remote.url())
1481 1481 finally:
1482 1482 del lock
1483 1483
1484 1484 def push(self, remote, force=False, revs=None):
1485 1485 # there are two ways to push to remote repo:
1486 1486 #
1487 1487 # addchangegroup assumes local user can lock remote
1488 1488 # repo (local filesystem, old ssh servers).
1489 1489 #
1490 1490 # unbundle assumes local user cannot lock remote repo (new ssh
1491 1491 # servers, http servers).
1492 1492
1493 1493 if remote.capable('unbundle'):
1494 1494 return self.push_unbundle(remote, force, revs)
1495 1495 return self.push_addchangegroup(remote, force, revs)
1496 1496
1497 1497 def prepush(self, remote, force, revs):
1498 1498 common = {}
1499 1499 remote_heads = remote.heads()
1500 1500 inc = self.findincoming(remote, common, remote_heads, force=force)
1501 1501
1502 1502 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1503 1503 if revs is not None:
1504 1504 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1505 1505 else:
1506 1506 bases, heads = update, self.changelog.heads()
1507 1507
1508 1508 if not bases:
1509 1509 self.ui.status(_("no changes found\n"))
1510 1510 return None, 1
1511 1511 elif not force:
1512 1512 # check if we're creating new remote heads
1513 1513 # to be a remote head after push, node must be either
1514 1514 # - unknown locally
1515 1515 # - a local outgoing head descended from update
1516 1516 # - a remote head that's known locally and not
1517 1517 # ancestral to an outgoing head
1518 1518
1519 1519 warn = 0
1520 1520
1521 1521 if remote_heads == [nullid]:
1522 1522 warn = 0
1523 1523 elif not revs and len(heads) > len(remote_heads):
1524 1524 warn = 1
1525 1525 else:
1526 1526 newheads = list(heads)
1527 1527 for r in remote_heads:
1528 1528 if r in self.changelog.nodemap:
1529 1529 desc = self.changelog.heads(r, heads)
1530 1530 l = [h for h in heads if h in desc]
1531 1531 if not l:
1532 1532 newheads.append(r)
1533 1533 else:
1534 1534 newheads.append(r)
1535 1535 if len(newheads) > len(remote_heads):
1536 1536 warn = 1
1537 1537
1538 1538 if warn:
1539 1539 self.ui.warn(_("abort: push creates new remote heads!\n"))
1540 1540 self.ui.status(_("(did you forget to merge?"
1541 1541 " use push -f to force)\n"))
1542 1542 return None, 0
1543 1543 elif inc:
1544 1544 self.ui.warn(_("note: unsynced remote changes!\n"))
1545 1545
1546 1546
1547 1547 if revs is None:
1548 1548 # use the fast path, no race possible on push
1549 1549 cg = self._changegroup(common.keys(), 'push')
1550 1550 else:
1551 1551 cg = self.changegroupsubset(update, revs, 'push')
1552 1552 return cg, remote_heads
1553 1553
1554 1554 def push_addchangegroup(self, remote, force, revs):
1555 1555 lock = remote.lock()
1556 1556 try:
1557 1557 ret = self.prepush(remote, force, revs)
1558 1558 if ret[0] is not None:
1559 1559 cg, remote_heads = ret
1560 1560 return remote.addchangegroup(cg, 'push', self.url())
1561 1561 return ret[1]
1562 1562 finally:
1563 1563 del lock
1564 1564
1565 1565 def push_unbundle(self, remote, force, revs):
1566 1566 # local repo finds heads on server, finds out what revs it
1567 1567 # must push. once revs transferred, if server finds it has
1568 1568 # different heads (someone else won commit/push race), server
1569 1569 # aborts.
1570 1570
1571 1571 ret = self.prepush(remote, force, revs)
1572 1572 if ret[0] is not None:
1573 1573 cg, remote_heads = ret
1574 1574 if force: remote_heads = ['force']
1575 1575 return remote.unbundle(cg, remote_heads, 'push')
1576 1576 return ret[1]
1577 1577
1578 1578 def changegroupinfo(self, nodes, source):
1579 1579 if self.ui.verbose or source == 'bundle':
1580 1580 self.ui.status(_("%d changesets found\n") % len(nodes))
1581 1581 if self.ui.debugflag:
1582 1582 self.ui.debug(_("list of changesets:\n"))
1583 1583 for node in nodes:
1584 1584 self.ui.debug("%s\n" % hex(node))
1585 1585
1586 1586 def changegroupsubset(self, bases, heads, source, extranodes=None):
1587 1587 """This function generates a changegroup consisting of all the nodes
1588 1588 that are descendents of any of the bases, and ancestors of any of
1589 1589 the heads.
1590 1590
1591 1591 It is fairly complex as determining which filenodes and which
1592 1592 manifest nodes need to be included for the changeset to be complete
1593 1593 is non-trivial.
1594 1594
1595 1595 Another wrinkle is doing the reverse, figuring out which changeset in
1596 1596 the changegroup a particular filenode or manifestnode belongs to.
1597 1597
1598 1598 The caller can specify some nodes that must be included in the
1599 1599 changegroup using the extranodes argument. It should be a dict
1600 1600 where the keys are the filenames (or 1 for the manifest), and the
1601 1601 values are lists of (node, linknode) tuples, where node is a wanted
1602 1602 node and linknode is the changelog node that should be transmitted as
1603 1603 the linkrev.
1604 1604 """
1605 1605
1606 1606 if extranodes is None:
1607 1607 # can we go through the fast path ?
1608 1608 heads.sort()
1609 1609 allheads = self.heads()
1610 1610 allheads.sort()
1611 1611 if heads == allheads:
1612 1612 common = []
1613 1613 # parents of bases are known from both sides
1614 1614 for n in bases:
1615 1615 for p in self.changelog.parents(n):
1616 1616 if p != nullid:
1617 1617 common.append(p)
1618 1618 return self._changegroup(common, source)
1619 1619
1620 1620 self.hook('preoutgoing', throw=True, source=source)
1621 1621
1622 1622 # Set up some initial variables
1623 1623 # Make it easy to refer to self.changelog
1624 1624 cl = self.changelog
1625 1625 # msng is short for missing - compute the list of changesets in this
1626 1626 # changegroup.
1627 1627 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1628 1628 self.changegroupinfo(msng_cl_lst, source)
1629 1629 # Some bases may turn out to be superfluous, and some heads may be
1630 1630 # too. nodesbetween will return the minimal set of bases and heads
1631 1631 # necessary to re-create the changegroup.
1632 1632
1633 1633 # Known heads are the list of heads that it is assumed the recipient
1634 1634 # of this changegroup will know about.
1635 1635 knownheads = {}
1636 1636 # We assume that all parents of bases are known heads.
1637 1637 for n in bases:
1638 1638 for p in cl.parents(n):
1639 1639 if p != nullid:
1640 1640 knownheads[p] = 1
1641 1641 knownheads = knownheads.keys()
1642 1642 if knownheads:
1643 1643 # Now that we know what heads are known, we can compute which
1644 1644 # changesets are known. The recipient must know about all
1645 1645 # changesets required to reach the known heads from the null
1646 1646 # changeset.
1647 1647 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1648 1648 junk = None
1649 1649 # Transform the list into an ersatz set.
1650 1650 has_cl_set = dict.fromkeys(has_cl_set)
1651 1651 else:
1652 1652 # If there were no known heads, the recipient cannot be assumed to
1653 1653 # know about any changesets.
1654 1654 has_cl_set = {}
1655 1655
1656 1656 # Make it easy to refer to self.manifest
1657 1657 mnfst = self.manifest
1658 1658 # We don't know which manifests are missing yet
1659 1659 msng_mnfst_set = {}
1660 1660 # Nor do we know which filenodes are missing.
1661 1661 msng_filenode_set = {}
1662 1662
1663 1663 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1664 1664 junk = None
1665 1665
1666 1666 # A changeset always belongs to itself, so the changenode lookup
1667 1667 # function for a changenode is identity.
1668 1668 def identity(x):
1669 1669 return x
1670 1670
1671 1671 # A function generating function. Sets up an environment for the
1672 1672 # inner function.
1673 1673 def cmp_by_rev_func(revlog):
1674 1674 # Compare two nodes by their revision number in the environment's
1675 1675 # revision history. Since the revision number both represents the
1676 1676 # most efficient order to read the nodes in, and represents a
1677 1677 # topological sorting of the nodes, this function is often useful.
1678 1678 def cmp_by_rev(a, b):
1679 1679 return cmp(revlog.rev(a), revlog.rev(b))
1680 1680 return cmp_by_rev
1681 1681
1682 1682 # If we determine that a particular file or manifest node must be a
1683 1683 # node that the recipient of the changegroup will already have, we can
1684 1684 # also assume the recipient will have all the parents. This function
1685 1685 # prunes them from the set of missing nodes.
1686 1686 def prune_parents(revlog, hasset, msngset):
1687 1687 haslst = hasset.keys()
1688 1688 haslst.sort(cmp_by_rev_func(revlog))
1689 1689 for node in haslst:
1690 1690 parentlst = [p for p in revlog.parents(node) if p != nullid]
1691 1691 while parentlst:
1692 1692 n = parentlst.pop()
1693 1693 if n not in hasset:
1694 1694 hasset[n] = 1
1695 1695 p = [p for p in revlog.parents(n) if p != nullid]
1696 1696 parentlst.extend(p)
1697 1697 for n in hasset:
1698 1698 msngset.pop(n, None)
1699 1699
1700 1700 # This is a function generating function used to set up an environment
1701 1701 # for the inner function to execute in.
1702 1702 def manifest_and_file_collector(changedfileset):
1703 1703 # This is an information gathering function that gathers
1704 1704 # information from each changeset node that goes out as part of
1705 1705 # the changegroup. The information gathered is a list of which
1706 1706 # manifest nodes are potentially required (the recipient may
1707 1707 # already have them) and total list of all files which were
1708 1708 # changed in any changeset in the changegroup.
1709 1709 #
1710 1710 # We also remember the first changenode we saw any manifest
1711 1711 # referenced by so we can later determine which changenode 'owns'
1712 1712 # the manifest.
1713 1713 def collect_manifests_and_files(clnode):
1714 1714 c = cl.read(clnode)
1715 1715 for f in c[3]:
1716 1716 # This is to make sure we only have one instance of each
1717 1717 # filename string for each filename.
1718 1718 changedfileset.setdefault(f, f)
1719 1719 msng_mnfst_set.setdefault(c[0], clnode)
1720 1720 return collect_manifests_and_files
1721 1721
1722 1722 # Figure out which manifest nodes (of the ones we think might be part
1723 1723 # of the changegroup) the recipient must know about and remove them
1724 1724 # from the changegroup.
1725 1725 def prune_manifests():
1726 1726 has_mnfst_set = {}
1727 1727 for n in msng_mnfst_set:
1728 1728 # If a 'missing' manifest thinks it belongs to a changenode
1729 1729 # the recipient is assumed to have, obviously the recipient
1730 1730 # must have that manifest.
1731 1731 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1732 1732 if linknode in has_cl_set:
1733 1733 has_mnfst_set[n] = 1
1734 1734 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1735 1735
1736 1736 # Use the information collected in collect_manifests_and_files to say
1737 1737 # which changenode any manifestnode belongs to.
1738 1738 def lookup_manifest_link(mnfstnode):
1739 1739 return msng_mnfst_set[mnfstnode]
1740 1740
1741 1741 # A function generating function that sets up the initial environment
1742 1742 # the inner function.
1743 1743 def filenode_collector(changedfiles):
1744 1744 next_rev = [0]
1745 1745 # This gathers information from each manifestnode included in the
1746 1746 # changegroup about which filenodes the manifest node references
1747 1747 # so we can include those in the changegroup too.
1748 1748 #
1749 1749 # It also remembers which changenode each filenode belongs to. It
1750 1750 # does this by assuming the a filenode belongs to the changenode
1751 1751 # the first manifest that references it belongs to.
1752 1752 def collect_msng_filenodes(mnfstnode):
1753 1753 r = mnfst.rev(mnfstnode)
1754 1754 if r == next_rev[0]:
1755 1755 # If the last rev we looked at was the one just previous,
1756 1756 # we only need to see a diff.
1757 1757 deltamf = mnfst.readdelta(mnfstnode)
1758 1758 # For each line in the delta
1759 1759 for f, fnode in deltamf.iteritems():
1760 1760 f = changedfiles.get(f, None)
1761 1761 # And if the file is in the list of files we care
1762 1762 # about.
1763 1763 if f is not None:
1764 1764 # Get the changenode this manifest belongs to
1765 1765 clnode = msng_mnfst_set[mnfstnode]
1766 1766 # Create the set of filenodes for the file if
1767 1767 # there isn't one already.
1768 1768 ndset = msng_filenode_set.setdefault(f, {})
1769 1769 # And set the filenode's changelog node to the
1770 1770 # manifest's if it hasn't been set already.
1771 1771 ndset.setdefault(fnode, clnode)
1772 1772 else:
1773 1773 # Otherwise we need a full manifest.
1774 1774 m = mnfst.read(mnfstnode)
1775 1775 # For every file in we care about.
1776 1776 for f in changedfiles:
1777 1777 fnode = m.get(f, None)
1778 1778 # If it's in the manifest
1779 1779 if fnode is not None:
1780 1780 # See comments above.
1781 1781 clnode = msng_mnfst_set[mnfstnode]
1782 1782 ndset = msng_filenode_set.setdefault(f, {})
1783 1783 ndset.setdefault(fnode, clnode)
1784 1784 # Remember the revision we hope to see next.
1785 1785 next_rev[0] = r + 1
1786 1786 return collect_msng_filenodes
1787 1787
1788 1788 # We have a list of filenodes we think we need for a file, lets remove
1789 1789 # all those we now the recipient must have.
1790 1790 def prune_filenodes(f, filerevlog):
1791 1791 msngset = msng_filenode_set[f]
1792 1792 hasset = {}
1793 1793 # If a 'missing' filenode thinks it belongs to a changenode we
1794 1794 # assume the recipient must have, then the recipient must have
1795 1795 # that filenode.
1796 1796 for n in msngset:
1797 1797 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1798 1798 if clnode in has_cl_set:
1799 1799 hasset[n] = 1
1800 1800 prune_parents(filerevlog, hasset, msngset)
1801 1801
1802 1802 # A function generator function that sets up the a context for the
1803 1803 # inner function.
1804 1804 def lookup_filenode_link_func(fname):
1805 1805 msngset = msng_filenode_set[fname]
1806 1806 # Lookup the changenode the filenode belongs to.
1807 1807 def lookup_filenode_link(fnode):
1808 1808 return msngset[fnode]
1809 1809 return lookup_filenode_link
1810 1810
1811 1811 # Add the nodes that were explicitly requested.
1812 1812 def add_extra_nodes(name, nodes):
1813 1813 if not extranodes or name not in extranodes:
1814 1814 return
1815 1815
1816 1816 for node, linknode in extranodes[name]:
1817 1817 if node not in nodes:
1818 1818 nodes[node] = linknode
1819 1819
1820 1820 # Now that we have all theses utility functions to help out and
1821 1821 # logically divide up the task, generate the group.
1822 1822 def gengroup():
1823 1823 # The set of changed files starts empty.
1824 1824 changedfiles = {}
1825 1825 # Create a changenode group generator that will call our functions
1826 1826 # back to lookup the owning changenode and collect information.
1827 1827 group = cl.group(msng_cl_lst, identity,
1828 1828 manifest_and_file_collector(changedfiles))
1829 1829 for chnk in group:
1830 1830 yield chnk
1831 1831
1832 1832 # The list of manifests has been collected by the generator
1833 1833 # calling our functions back.
1834 1834 prune_manifests()
1835 1835 add_extra_nodes(1, msng_mnfst_set)
1836 1836 msng_mnfst_lst = msng_mnfst_set.keys()
1837 1837 # Sort the manifestnodes by revision number.
1838 1838 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1839 1839 # Create a generator for the manifestnodes that calls our lookup
1840 1840 # and data collection functions back.
1841 1841 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1842 1842 filenode_collector(changedfiles))
1843 1843 for chnk in group:
1844 1844 yield chnk
1845 1845
1846 1846 # These are no longer needed, dereference and toss the memory for
1847 1847 # them.
1848 1848 msng_mnfst_lst = None
1849 1849 msng_mnfst_set.clear()
1850 1850
1851 1851 if extranodes:
1852 1852 for fname in extranodes:
1853 1853 if isinstance(fname, int):
1854 1854 continue
1855 1855 msng_filenode_set.setdefault(fname, {})
1856 1856 changedfiles[fname] = 1
1857 1857 # Go through all our files in order sorted by name.
1858 1858 for fname in util.sort(changedfiles):
1859 1859 filerevlog = self.file(fname)
1860 1860 if not len(filerevlog):
1861 1861 raise util.Abort(_("empty or missing revlog for %s") % fname)
1862 1862 # Toss out the filenodes that the recipient isn't really
1863 1863 # missing.
1864 1864 if fname in msng_filenode_set:
1865 1865 prune_filenodes(fname, filerevlog)
1866 1866 add_extra_nodes(fname, msng_filenode_set[fname])
1867 1867 msng_filenode_lst = msng_filenode_set[fname].keys()
1868 1868 else:
1869 1869 msng_filenode_lst = []
1870 1870 # If any filenodes are left, generate the group for them,
1871 1871 # otherwise don't bother.
1872 1872 if len(msng_filenode_lst) > 0:
1873 1873 yield changegroup.chunkheader(len(fname))
1874 1874 yield fname
1875 1875 # Sort the filenodes by their revision #
1876 1876 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1877 1877 # Create a group generator and only pass in a changenode
1878 1878 # lookup function as we need to collect no information
1879 1879 # from filenodes.
1880 1880 group = filerevlog.group(msng_filenode_lst,
1881 1881 lookup_filenode_link_func(fname))
1882 1882 for chnk in group:
1883 1883 yield chnk
1884 1884 if fname in msng_filenode_set:
1885 1885 # Don't need this anymore, toss it to free memory.
1886 1886 del msng_filenode_set[fname]
1887 1887 # Signal that no more groups are left.
1888 1888 yield changegroup.closechunk()
1889 1889
1890 1890 if msng_cl_lst:
1891 1891 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1892 1892
1893 1893 return util.chunkbuffer(gengroup())
1894 1894
1895 1895 def changegroup(self, basenodes, source):
1896 1896 # to avoid a race we use changegroupsubset() (issue1320)
1897 1897 return self.changegroupsubset(basenodes, self.heads(), source)
1898 1898
1899 1899 def _changegroup(self, common, source):
1900 1900 """Generate a changegroup of all nodes that we have that a recipient
1901 1901 doesn't.
1902 1902
1903 1903 This is much easier than the previous function as we can assume that
1904 1904 the recipient has any changenode we aren't sending them.
1905 1905
1906 1906 common is the set of common nodes between remote and self"""
1907 1907
1908 1908 self.hook('preoutgoing', throw=True, source=source)
1909 1909
1910 1910 cl = self.changelog
1911 1911 nodes = cl.findmissing(common)
1912 1912 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1913 1913 self.changegroupinfo(nodes, source)
1914 1914
1915 1915 def identity(x):
1916 1916 return x
1917 1917
1918 1918 def gennodelst(log):
1919 1919 for r in log:
1920 1920 if log.linkrev(r) in revset:
1921 1921 yield log.node(r)
1922 1922
1923 1923 def changed_file_collector(changedfileset):
1924 1924 def collect_changed_files(clnode):
1925 1925 c = cl.read(clnode)
1926 1926 for fname in c[3]:
1927 1927 changedfileset[fname] = 1
1928 1928 return collect_changed_files
1929 1929
1930 1930 def lookuprevlink_func(revlog):
1931 1931 def lookuprevlink(n):
1932 1932 return cl.node(revlog.linkrev(revlog.rev(n)))
1933 1933 return lookuprevlink
1934 1934
1935 1935 def gengroup():
1936 1936 # construct a list of all changed files
1937 1937 changedfiles = {}
1938 1938
1939 1939 for chnk in cl.group(nodes, identity,
1940 1940 changed_file_collector(changedfiles)):
1941 1941 yield chnk
1942 1942
1943 1943 mnfst = self.manifest
1944 1944 nodeiter = gennodelst(mnfst)
1945 1945 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1946 1946 yield chnk
1947 1947
1948 1948 for fname in util.sort(changedfiles):
1949 1949 filerevlog = self.file(fname)
1950 1950 if not len(filerevlog):
1951 1951 raise util.Abort(_("empty or missing revlog for %s") % fname)
1952 1952 nodeiter = gennodelst(filerevlog)
1953 1953 nodeiter = list(nodeiter)
1954 1954 if nodeiter:
1955 1955 yield changegroup.chunkheader(len(fname))
1956 1956 yield fname
1957 1957 lookup = lookuprevlink_func(filerevlog)
1958 1958 for chnk in filerevlog.group(nodeiter, lookup):
1959 1959 yield chnk
1960 1960
1961 1961 yield changegroup.closechunk()
1962 1962
1963 1963 if nodes:
1964 1964 self.hook('outgoing', node=hex(nodes[0]), source=source)
1965 1965
1966 1966 return util.chunkbuffer(gengroup())
1967 1967
1968 1968 def addchangegroup(self, source, srctype, url, emptyok=False):
1969 1969 """add changegroup to repo.
1970 1970
1971 1971 return values:
1972 1972 - nothing changed or no source: 0
1973 1973 - more heads than before: 1+added heads (2..n)
1974 1974 - less heads than before: -1-removed heads (-2..-n)
1975 1975 - number of heads stays the same: 1
1976 1976 """
1977 1977 def csmap(x):
1978 1978 self.ui.debug(_("add changeset %s\n") % short(x))
1979 1979 return len(cl)
1980 1980
1981 1981 def revmap(x):
1982 1982 return cl.rev(x)
1983 1983
1984 1984 if not source:
1985 1985 return 0
1986 1986
1987 1987 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1988 1988
1989 1989 changesets = files = revisions = 0
1990 1990
1991 1991 # write changelog data to temp files so concurrent readers will not see
1992 1992 # inconsistent view
1993 1993 cl = self.changelog
1994 1994 cl.delayupdate()
1995 1995 oldheads = len(cl.heads())
1996 1996
1997 1997 tr = self.transaction()
1998 1998 try:
1999 1999 trp = weakref.proxy(tr)
2000 2000 # pull off the changeset group
2001 2001 self.ui.status(_("adding changesets\n"))
2002 2002 cor = len(cl) - 1
2003 2003 chunkiter = changegroup.chunkiter(source)
2004 2004 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2005 2005 raise util.Abort(_("received changelog group is empty"))
2006 2006 cnr = len(cl) - 1
2007 2007 changesets = cnr - cor
2008 2008
2009 2009 # pull off the manifest group
2010 2010 self.ui.status(_("adding manifests\n"))
2011 2011 chunkiter = changegroup.chunkiter(source)
2012 2012 # no need to check for empty manifest group here:
2013 2013 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2014 2014 # no new manifest will be created and the manifest group will
2015 2015 # be empty during the pull
2016 2016 self.manifest.addgroup(chunkiter, revmap, trp)
2017 2017
2018 2018 # process the files
2019 2019 self.ui.status(_("adding file changes\n"))
2020 2020 while 1:
2021 2021 f = changegroup.getchunk(source)
2022 2022 if not f:
2023 2023 break
2024 2024 self.ui.debug(_("adding %s revisions\n") % f)
2025 2025 fl = self.file(f)
2026 2026 o = len(fl)
2027 2027 chunkiter = changegroup.chunkiter(source)
2028 2028 if fl.addgroup(chunkiter, revmap, trp) is None:
2029 2029 raise util.Abort(_("received file revlog group is empty"))
2030 2030 revisions += len(fl) - o
2031 2031 files += 1
2032 2032
2033 2033 # make changelog see real files again
2034 2034 cl.finalize(trp)
2035 2035
2036 2036 newheads = len(self.changelog.heads())
2037 2037 heads = ""
2038 2038 if oldheads and newheads != oldheads:
2039 2039 heads = _(" (%+d heads)") % (newheads - oldheads)
2040 2040
2041 2041 self.ui.status(_("added %d changesets"
2042 2042 " with %d changes to %d files%s\n")
2043 2043 % (changesets, revisions, files, heads))
2044 2044
2045 2045 if changesets > 0:
2046 2046 self.hook('pretxnchangegroup', throw=True,
2047 2047 node=hex(self.changelog.node(cor+1)), source=srctype,
2048 2048 url=url)
2049 2049
2050 2050 tr.close()
2051 2051 finally:
2052 2052 del tr
2053 2053
2054 2054 if changesets > 0:
2055 2055 # forcefully update the on-disk branch cache
2056 2056 self.ui.debug(_("updating the branch cache\n"))
2057 2057 self.branchtags()
2058 2058 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2059 2059 source=srctype, url=url)
2060 2060
2061 2061 for i in xrange(cor + 1, cnr + 1):
2062 2062 self.hook("incoming", node=hex(self.changelog.node(i)),
2063 2063 source=srctype, url=url)
2064 2064
2065 2065 # never return 0 here:
2066 2066 if newheads < oldheads:
2067 2067 return newheads - oldheads - 1
2068 2068 else:
2069 2069 return newheads - oldheads + 1
2070 2070
2071 2071
2072 2072 def stream_in(self, remote):
2073 2073 fp = remote.stream_out()
2074 2074 l = fp.readline()
2075 2075 try:
2076 2076 resp = int(l)
2077 2077 except ValueError:
2078 2078 raise util.UnexpectedOutput(
2079 2079 _('Unexpected response from remote server:'), l)
2080 2080 if resp == 1:
2081 2081 raise util.Abort(_('operation forbidden by server'))
2082 2082 elif resp == 2:
2083 2083 raise util.Abort(_('locking the remote repository failed'))
2084 2084 elif resp != 0:
2085 2085 raise util.Abort(_('the server sent an unknown error code'))
2086 2086 self.ui.status(_('streaming all changes\n'))
2087 2087 l = fp.readline()
2088 2088 try:
2089 2089 total_files, total_bytes = map(int, l.split(' ', 1))
2090 2090 except (ValueError, TypeError):
2091 2091 raise util.UnexpectedOutput(
2092 2092 _('Unexpected response from remote server:'), l)
2093 2093 self.ui.status(_('%d files to transfer, %s of data\n') %
2094 2094 (total_files, util.bytecount(total_bytes)))
2095 2095 start = time.time()
2096 2096 for i in xrange(total_files):
2097 2097 # XXX doesn't support '\n' or '\r' in filenames
2098 2098 l = fp.readline()
2099 2099 try:
2100 2100 name, size = l.split('\0', 1)
2101 2101 size = int(size)
2102 2102 except (ValueError, TypeError):
2103 2103 raise util.UnexpectedOutput(
2104 2104 _('Unexpected response from remote server:'), l)
2105 2105 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2106 2106 ofp = self.sopener(name, 'w')
2107 2107 for chunk in util.filechunkiter(fp, limit=size):
2108 2108 ofp.write(chunk)
2109 2109 ofp.close()
2110 2110 elapsed = time.time() - start
2111 2111 if elapsed <= 0:
2112 2112 elapsed = 0.001
2113 2113 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2114 2114 (util.bytecount(total_bytes), elapsed,
2115 2115 util.bytecount(total_bytes / elapsed)))
2116 2116 self.invalidate()
2117 2117 return len(self.heads()) + 1
2118 2118
2119 2119 def clone(self, remote, heads=[], stream=False):
2120 2120 '''clone remote repository.
2121 2121
2122 2122 keyword arguments:
2123 2123 heads: list of revs to clone (forces use of pull)
2124 2124 stream: use streaming clone if possible'''
2125 2125
2126 2126 # now, all clients that can request uncompressed clones can
2127 2127 # read repo formats supported by all servers that can serve
2128 2128 # them.
2129 2129
2130 2130 # if revlog format changes, client will have to check version
2131 2131 # and format flags on "stream" capability, and use
2132 2132 # uncompressed only if compatible.
2133 2133
2134 2134 if stream and not heads and remote.capable('stream'):
2135 2135 return self.stream_in(remote)
2136 2136 return self.pull(remote, heads)
2137 2137
2138 2138 # used to avoid circular references so destructors work
2139 2139 def aftertrans(files):
2140 2140 renamefiles = [tuple(t) for t in files]
2141 2141 def a():
2142 2142 for src, dest in renamefiles:
2143 2143 util.rename(src, dest)
2144 2144 return a
2145 2145
2146 2146 def instance(ui, path, create):
2147 2147 return localrepository(ui, util.drop_scheme('file', path), create)
2148 2148
2149 2149 def islocal(path):
2150 2150 return True
@@ -1,199 +1,200 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid
9 from revlog import revlog, RevlogError
9 from revlog import revlog
10 10 from i18n import _
11 import array, struct, mdiff, parsers, util
11 import array, struct, mdiff, parsers, util, error
12 12
13 13 class manifestdict(dict):
14 14 def __init__(self, mapping=None, flags=None):
15 15 if mapping is None: mapping = {}
16 16 if flags is None: flags = {}
17 17 dict.__init__(self, mapping)
18 18 self._flags = flags
19 19 def flags(self, f):
20 20 return self._flags.get(f, "")
21 21 def set(self, f, flags):
22 22 self._flags[f] = flags
23 23 def copy(self):
24 24 return manifestdict(dict.copy(self), dict.copy(self._flags))
25 25
26 26 class manifest(revlog):
27 27 def __init__(self, opener):
28 28 self.mapcache = None
29 29 self.listcache = None
30 30 revlog.__init__(self, opener, "00manifest.i")
31 31
32 32 def parse(self, lines):
33 33 mfdict = manifestdict()
34 34 parsers.parse_manifest(mfdict, mfdict._flags, lines)
35 35 return mfdict
36 36
37 37 def readdelta(self, node):
38 38 r = self.rev(node)
39 39 return self.parse(mdiff.patchtext(self.revdiff(r - 1, r)))
40 40
41 41 def read(self, node):
42 42 if node == nullid: return manifestdict() # don't upset local cache
43 43 if self.mapcache and self.mapcache[0] == node:
44 44 return self.mapcache[1]
45 45 text = self.revision(node)
46 46 self.listcache = array.array('c', text)
47 47 mapping = self.parse(text)
48 48 self.mapcache = (node, mapping)
49 49 return mapping
50 50
51 51 def _search(self, m, s, lo=0, hi=None):
52 52 '''return a tuple (start, end) that says where to find s within m.
53 53
54 54 If the string is found m[start:end] are the line containing
55 55 that string. If start == end the string was not found and
56 56 they indicate the proper sorted insertion point. This was
57 57 taken from bisect_left, and modified to find line start/end as
58 58 it goes along.
59 59
60 60 m should be a buffer or a string
61 61 s is a string'''
62 62 def advance(i, c):
63 63 while i < lenm and m[i] != c:
64 64 i += 1
65 65 return i
66 66 if not s:
67 67 return (lo, lo)
68 68 lenm = len(m)
69 69 if not hi:
70 70 hi = lenm
71 71 while lo < hi:
72 72 mid = (lo + hi) // 2
73 73 start = mid
74 74 while start > 0 and m[start-1] != '\n':
75 75 start -= 1
76 76 end = advance(start, '\0')
77 77 if m[start:end] < s:
78 78 # we know that after the null there are 40 bytes of sha1
79 79 # this translates to the bisect lo = mid + 1
80 80 lo = advance(end + 40, '\n') + 1
81 81 else:
82 82 # this translates to the bisect hi = mid
83 83 hi = start
84 84 end = advance(lo, '\0')
85 85 found = m[lo:end]
86 86 if cmp(s, found) == 0:
87 87 # we know that after the null there are 40 bytes of sha1
88 88 end = advance(end + 40, '\n')
89 89 return (lo, end+1)
90 90 else:
91 91 return (lo, lo)
92 92
93 93 def find(self, node, f):
94 94 '''look up entry for a single file efficiently.
95 95 return (node, flags) pair if found, (None, None) if not.'''
96 96 if self.mapcache and node == self.mapcache[0]:
97 97 return self.mapcache[1].get(f), self.mapcache[1].flags(f)
98 98 text = self.revision(node)
99 99 start, end = self._search(text, f)
100 100 if start == end:
101 101 return None, None
102 102 l = text[start:end]
103 103 f, n = l.split('\0')
104 104 return bin(n[:40]), n[40:-1]
105 105
106 106 def add(self, map, transaction, link, p1=None, p2=None,
107 107 changed=None):
108 108 # apply the changes collected during the bisect loop to our addlist
109 109 # return a delta suitable for addrevision
110 110 def addlistdelta(addlist, x):
111 111 # start from the bottom up
112 112 # so changes to the offsets don't mess things up.
113 113 i = len(x)
114 114 while i > 0:
115 115 i -= 1
116 116 start = x[i][0]
117 117 end = x[i][1]
118 118 if x[i][2]:
119 119 addlist[start:end] = array.array('c', x[i][2])
120 120 else:
121 121 del addlist[start:end]
122 122 return "".join([struct.pack(">lll", d[0], d[1], len(d[2])) + d[2]
123 123 for d in x ])
124 124
125 125 def checkforbidden(l):
126 126 for f in l:
127 127 if '\n' in f or '\r' in f:
128 raise RevlogError(_("'\\n' and '\\r' disallowed in filenames"))
128 raise error.RevlogError(
129 _("'\\n' and '\\r' disallowed in filenames"))
129 130
130 131 # if we're using the listcache, make sure it is valid and
131 132 # parented by the same node we're diffing against
132 133 if not (changed and self.listcache and p1 and self.mapcache[0] == p1):
133 134 files = util.sort(map)
134 135 checkforbidden(files)
135 136
136 137 # if this is changed to support newlines in filenames,
137 138 # be sure to check the templates/ dir again (especially *-raw.tmpl)
138 139 text = ["%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
139 140 for f in files]
140 141 self.listcache = array.array('c', "".join(text))
141 142 cachedelta = None
142 143 else:
143 144 addlist = self.listcache
144 145
145 146 checkforbidden(changed[0])
146 147 # combine the changed lists into one list for sorting
147 148 work = [[x, 0] for x in changed[0]]
148 149 work[len(work):] = [[x, 1] for x in changed[1]]
149 150 work.sort()
150 151
151 152 delta = []
152 153 dstart = None
153 154 dend = None
154 155 dline = [""]
155 156 start = 0
156 157 # zero copy representation of addlist as a buffer
157 158 addbuf = buffer(addlist)
158 159
159 160 # start with a readonly loop that finds the offset of
160 161 # each line and creates the deltas
161 162 for w in work:
162 163 f = w[0]
163 164 # bs will either be the index of the item or the insert point
164 165 start, end = self._search(addbuf, f, start)
165 166 if w[1] == 0:
166 167 l = "%s\000%s%s\n" % (f, hex(map[f]), map.flags(f))
167 168 else:
168 169 l = ""
169 170 if start == end and w[1] == 1:
170 171 # item we want to delete was not found, error out
171 172 raise AssertionError(
172 173 _("failed to remove %s from manifest") % f)
173 174 if dstart != None and dstart <= start and dend >= start:
174 175 if dend < end:
175 176 dend = end
176 177 if l:
177 178 dline.append(l)
178 179 else:
179 180 if dstart != None:
180 181 delta.append([dstart, dend, "".join(dline)])
181 182 dstart = start
182 183 dend = end
183 184 dline = [l]
184 185
185 186 if dstart != None:
186 187 delta.append([dstart, dend, "".join(dline)])
187 188 # apply the delta to the addlist, and get a delta for addrevision
188 189 cachedelta = addlistdelta(addlist, delta)
189 190
190 191 # the delta is only valid if we've been processing the tip revision
191 192 if self.mapcache[0] != self.tip():
192 193 cachedelta = None
193 194 self.listcache = addlist
194 195
195 196 n = self.addrevision(buffer(self.listcache), transaction, link,
196 197 p1, p2, cachedelta)
197 198 self.mapcache = (n, map)
198 199
199 200 return n
@@ -1,1370 +1,1360 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import bin, hex, nullid, nullrev, short
14 14 from i18n import _
15 15 import changegroup, errno, ancestor, mdiff, parsers
16 import struct, util, zlib
16 import struct, util, zlib, error
17 17
18 18 _pack = struct.pack
19 19 _unpack = struct.unpack
20 20 _compress = zlib.compress
21 21 _decompress = zlib.decompress
22 22 _sha = util.sha1
23 23
24 24 # revlog flags
25 25 REVLOGV0 = 0
26 26 REVLOGNG = 1
27 27 REVLOGNGINLINEDATA = (1 << 16)
28 28 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 29 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 30 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 31
32 class RevlogError(Exception):
33 pass
34
35 class LookupError(RevlogError, KeyError):
36 def __init__(self, name, index, message):
37 self.name = name
38 if isinstance(name, str) and len(name) == 20:
39 name = short(name)
40 RevlogError.__init__(self, _('%s@%s: %s') % (index, name, message))
41
42 def __str__(self):
43 return RevlogError.__str__(self)
32 RevlogError = error.RevlogError
33 LookupError = error.LookupError
44 34
45 35 def getoffset(q):
46 36 return int(q >> 16)
47 37
48 38 def gettype(q):
49 39 return int(q & 0xFFFF)
50 40
51 41 def offset_type(offset, type):
52 42 return long(long(offset) << 16 | type)
53 43
54 44 def hash(text, p1, p2):
55 45 """generate a hash from the given text and its parent hashes
56 46
57 47 This hash combines both the current file contents and its history
58 48 in a manner that makes it easy to distinguish nodes with the same
59 49 content in the revision graph.
60 50 """
61 51 l = [p1, p2]
62 52 l.sort()
63 53 s = _sha(l[0])
64 54 s.update(l[1])
65 55 s.update(text)
66 56 return s.digest()
67 57
68 58 def compress(text):
69 59 """ generate a possibly-compressed representation of text """
70 60 if not text:
71 61 return ("", text)
72 62 l = len(text)
73 63 bin = None
74 64 if l < 44:
75 65 pass
76 66 elif l > 1000000:
77 67 # zlib makes an internal copy, thus doubling memory usage for
78 68 # large files, so lets do this in pieces
79 69 z = zlib.compressobj()
80 70 p = []
81 71 pos = 0
82 72 while pos < l:
83 73 pos2 = pos + 2**20
84 74 p.append(z.compress(text[pos:pos2]))
85 75 pos = pos2
86 76 p.append(z.flush())
87 77 if sum(map(len, p)) < l:
88 78 bin = "".join(p)
89 79 else:
90 80 bin = _compress(text)
91 81 if bin is None or len(bin) > l:
92 82 if text[0] == '\0':
93 83 return ("", text)
94 84 return ('u', text)
95 85 return ("", bin)
96 86
97 87 def decompress(bin):
98 88 """ decompress the given input """
99 89 if not bin:
100 90 return bin
101 91 t = bin[0]
102 92 if t == '\0':
103 93 return bin
104 94 if t == 'x':
105 95 return _decompress(bin)
106 96 if t == 'u':
107 97 return bin[1:]
108 98 raise RevlogError(_("unknown compression type %r") % t)
109 99
110 100 class lazyparser(object):
111 101 """
112 102 this class avoids the need to parse the entirety of large indices
113 103 """
114 104
115 105 # lazyparser is not safe to use on windows if win32 extensions not
116 106 # available. it keeps file handle open, which make it not possible
117 107 # to break hardlinks on local cloned repos.
118 108
119 109 def __init__(self, dataf, size):
120 110 self.dataf = dataf
121 111 self.s = struct.calcsize(indexformatng)
122 112 self.datasize = size
123 113 self.l = size/self.s
124 114 self.index = [None] * self.l
125 115 self.map = {nullid: nullrev}
126 116 self.allmap = 0
127 117 self.all = 0
128 118 self.mapfind_count = 0
129 119
130 120 def loadmap(self):
131 121 """
132 122 during a commit, we need to make sure the rev being added is
133 123 not a duplicate. This requires loading the entire index,
134 124 which is fairly slow. loadmap can load up just the node map,
135 125 which takes much less time.
136 126 """
137 127 if self.allmap:
138 128 return
139 129 end = self.datasize
140 130 self.allmap = 1
141 131 cur = 0
142 132 count = 0
143 133 blocksize = self.s * 256
144 134 self.dataf.seek(0)
145 135 while cur < end:
146 136 data = self.dataf.read(blocksize)
147 137 off = 0
148 138 for x in xrange(256):
149 139 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 140 self.map[n] = count
151 141 count += 1
152 142 if count >= self.l:
153 143 break
154 144 off += self.s
155 145 cur += blocksize
156 146
157 147 def loadblock(self, blockstart, blocksize, data=None):
158 148 if self.all:
159 149 return
160 150 if data is None:
161 151 self.dataf.seek(blockstart)
162 152 if blockstart + blocksize > self.datasize:
163 153 # the revlog may have grown since we've started running,
164 154 # but we don't have space in self.index for more entries.
165 155 # limit blocksize so that we don't get too much data.
166 156 blocksize = max(self.datasize - blockstart, 0)
167 157 data = self.dataf.read(blocksize)
168 158 lend = len(data) / self.s
169 159 i = blockstart / self.s
170 160 off = 0
171 161 # lazyindex supports __delitem__
172 162 if lend > len(self.index) - i:
173 163 lend = len(self.index) - i
174 164 for x in xrange(lend):
175 165 if self.index[i + x] == None:
176 166 b = data[off : off + self.s]
177 167 self.index[i + x] = b
178 168 n = b[ngshaoffset:ngshaoffset + 20]
179 169 self.map[n] = i + x
180 170 off += self.s
181 171
182 172 def findnode(self, node):
183 173 """search backwards through the index file for a specific node"""
184 174 if self.allmap:
185 175 return None
186 176
187 177 # hg log will cause many many searches for the manifest
188 178 # nodes. After we get called a few times, just load the whole
189 179 # thing.
190 180 if self.mapfind_count > 8:
191 181 self.loadmap()
192 182 if node in self.map:
193 183 return node
194 184 return None
195 185 self.mapfind_count += 1
196 186 last = self.l - 1
197 187 while self.index[last] != None:
198 188 if last == 0:
199 189 self.all = 1
200 190 self.allmap = 1
201 191 return None
202 192 last -= 1
203 193 end = (last + 1) * self.s
204 194 blocksize = self.s * 256
205 195 while end >= 0:
206 196 start = max(end - blocksize, 0)
207 197 self.dataf.seek(start)
208 198 data = self.dataf.read(end - start)
209 199 findend = end - start
210 200 while True:
211 201 # we're searching backwards, so we have to make sure
212 202 # we don't find a changeset where this node is a parent
213 203 off = data.find(node, 0, findend)
214 204 findend = off
215 205 if off >= 0:
216 206 i = off / self.s
217 207 off = i * self.s
218 208 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 209 if n == node:
220 210 self.map[n] = i + start / self.s
221 211 return node
222 212 else:
223 213 break
224 214 end -= blocksize
225 215 return None
226 216
227 217 def loadindex(self, i=None, end=None):
228 218 if self.all:
229 219 return
230 220 all = False
231 221 if i == None:
232 222 blockstart = 0
233 223 blocksize = (65536 / self.s) * self.s
234 224 end = self.datasize
235 225 all = True
236 226 else:
237 227 if end:
238 228 blockstart = i * self.s
239 229 end = end * self.s
240 230 blocksize = end - blockstart
241 231 else:
242 232 blockstart = (i & ~1023) * self.s
243 233 blocksize = self.s * 1024
244 234 end = blockstart + blocksize
245 235 while blockstart < end:
246 236 self.loadblock(blockstart, blocksize)
247 237 blockstart += blocksize
248 238 if all:
249 239 self.all = True
250 240
251 241 class lazyindex(object):
252 242 """a lazy version of the index array"""
253 243 def __init__(self, parser):
254 244 self.p = parser
255 245 def __len__(self):
256 246 return len(self.p.index)
257 247 def load(self, pos):
258 248 if pos < 0:
259 249 pos += len(self.p.index)
260 250 self.p.loadindex(pos)
261 251 return self.p.index[pos]
262 252 def __getitem__(self, pos):
263 253 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 254 def __setitem__(self, pos, item):
265 255 self.p.index[pos] = _pack(indexformatng, *item)
266 256 def __delitem__(self, pos):
267 257 del self.p.index[pos]
268 258 def insert(self, pos, e):
269 259 self.p.index.insert(pos, _pack(indexformatng, *e))
270 260 def append(self, e):
271 261 self.p.index.append(_pack(indexformatng, *e))
272 262
273 263 class lazymap(object):
274 264 """a lazy version of the node map"""
275 265 def __init__(self, parser):
276 266 self.p = parser
277 267 def load(self, key):
278 268 n = self.p.findnode(key)
279 269 if n == None:
280 270 raise KeyError(key)
281 271 def __contains__(self, key):
282 272 if key in self.p.map:
283 273 return True
284 274 self.p.loadmap()
285 275 return key in self.p.map
286 276 def __iter__(self):
287 277 yield nullid
288 278 for i in xrange(self.p.l):
289 279 ret = self.p.index[i]
290 280 if not ret:
291 281 self.p.loadindex(i)
292 282 ret = self.p.index[i]
293 283 if isinstance(ret, str):
294 284 ret = _unpack(indexformatng, ret)
295 285 yield ret[7]
296 286 def __getitem__(self, key):
297 287 try:
298 288 return self.p.map[key]
299 289 except KeyError:
300 290 try:
301 291 self.load(key)
302 292 return self.p.map[key]
303 293 except KeyError:
304 294 raise KeyError("node " + hex(key))
305 295 def __setitem__(self, key, val):
306 296 self.p.map[key] = val
307 297 def __delitem__(self, key):
308 298 del self.p.map[key]
309 299
310 300 indexformatv0 = ">4l20s20s20s"
311 301 v0shaoffset = 56
312 302
313 303 class revlogoldio(object):
314 304 def __init__(self):
315 305 self.size = struct.calcsize(indexformatv0)
316 306
317 307 def parseindex(self, fp, inline):
318 308 s = self.size
319 309 index = []
320 310 nodemap = {nullid: nullrev}
321 311 n = off = 0
322 312 data = fp.read()
323 313 l = len(data)
324 314 while off + s <= l:
325 315 cur = data[off:off + s]
326 316 off += s
327 317 e = _unpack(indexformatv0, cur)
328 318 # transform to revlogv1 format
329 319 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 320 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 321 index.append(e2)
332 322 nodemap[e[6]] = n
333 323 n += 1
334 324
335 325 return index, nodemap, None
336 326
337 327 def packentry(self, entry, node, version, rev):
338 328 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 329 node(entry[5]), node(entry[6]), entry[7])
340 330 return _pack(indexformatv0, *e2)
341 331
342 332 # index ng:
343 333 # 6 bytes offset
344 334 # 2 bytes flags
345 335 # 4 bytes compressed length
346 336 # 4 bytes uncompressed length
347 337 # 4 bytes: base rev
348 338 # 4 bytes link rev
349 339 # 4 bytes parent 1 rev
350 340 # 4 bytes parent 2 rev
351 341 # 32 bytes: nodeid
352 342 indexformatng = ">Qiiiiii20s12x"
353 343 ngshaoffset = 32
354 344 versionformat = ">I"
355 345
356 346 class revlogio(object):
357 347 def __init__(self):
358 348 self.size = struct.calcsize(indexformatng)
359 349
360 350 def parseindex(self, fp, inline):
361 351 try:
362 352 size = util.fstat(fp).st_size
363 353 except AttributeError:
364 354 size = 0
365 355
366 356 if util.openhardlinks() and not inline and size > 1000000:
367 357 # big index, let's parse it on demand
368 358 parser = lazyparser(fp, size)
369 359 index = lazyindex(parser)
370 360 nodemap = lazymap(parser)
371 361 e = list(index[0])
372 362 type = gettype(e[0])
373 363 e[0] = offset_type(0, type)
374 364 index[0] = e
375 365 return index, nodemap, None
376 366
377 367 data = fp.read()
378 368 # call the C implementation to parse the index data
379 369 index, nodemap, cache = parsers.parse_index(data, inline)
380 370 return index, nodemap, cache
381 371
382 372 def packentry(self, entry, node, version, rev):
383 373 p = _pack(indexformatng, *entry)
384 374 if rev == 0:
385 375 p = _pack(versionformat, version) + p[4:]
386 376 return p
387 377
388 378 class revlog(object):
389 379 """
390 380 the underlying revision storage object
391 381
392 382 A revlog consists of two parts, an index and the revision data.
393 383
394 384 The index is a file with a fixed record size containing
395 385 information on each revision, including its nodeid (hash), the
396 386 nodeids of its parents, the position and offset of its data within
397 387 the data file, and the revision it's based on. Finally, each entry
398 388 contains a linkrev entry that can serve as a pointer to external
399 389 data.
400 390
401 391 The revision data itself is a linear collection of data chunks.
402 392 Each chunk represents a revision and is usually represented as a
403 393 delta against the previous chunk. To bound lookup time, runs of
404 394 deltas are limited to about 2 times the length of the original
405 395 version data. This makes retrieval of a version proportional to
406 396 its size, or O(1) relative to the number of revisions.
407 397
408 398 Both pieces of the revlog are written to in an append-only
409 399 fashion, which means we never need to rewrite a file to insert or
410 400 remove data, and can use some simple techniques to avoid the need
411 401 for locking while reading.
412 402 """
413 403 def __init__(self, opener, indexfile):
414 404 """
415 405 create a revlog object
416 406
417 407 opener is a function that abstracts the file opening operation
418 408 and can be used to implement COW semantics or the like.
419 409 """
420 410 self.indexfile = indexfile
421 411 self.datafile = indexfile[:-2] + ".d"
422 412 self.opener = opener
423 413 self._cache = None
424 414 self._chunkcache = None
425 415 self.nodemap = {nullid: nullrev}
426 416 self.index = []
427 417
428 418 v = REVLOG_DEFAULT_VERSION
429 419 if hasattr(opener, "defversion"):
430 420 v = opener.defversion
431 421 if v & REVLOGNG:
432 422 v |= REVLOGNGINLINEDATA
433 423
434 424 i = ""
435 425 try:
436 426 f = self.opener(self.indexfile)
437 427 i = f.read(4)
438 428 f.seek(0)
439 429 if len(i) > 0:
440 430 v = struct.unpack(versionformat, i)[0]
441 431 except IOError, inst:
442 432 if inst.errno != errno.ENOENT:
443 433 raise
444 434
445 435 self.version = v
446 436 self._inline = v & REVLOGNGINLINEDATA
447 437 flags = v & ~0xFFFF
448 438 fmt = v & 0xFFFF
449 439 if fmt == REVLOGV0 and flags:
450 440 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 441 % (self.indexfile, flags >> 16))
452 442 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 443 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 444 % (self.indexfile, flags >> 16))
455 445 elif fmt > REVLOGNG:
456 446 raise RevlogError(_("index %s unknown format %d")
457 447 % (self.indexfile, fmt))
458 448
459 449 self._io = revlogio()
460 450 if self.version == REVLOGV0:
461 451 self._io = revlogoldio()
462 452 if i:
463 453 d = self._io.parseindex(f, self._inline)
464 454 self.index, self.nodemap, self._chunkcache = d
465 455
466 456 # add the magic null revision at -1 (if it hasn't been done already)
467 457 if (self.index == [] or isinstance(self.index, lazyindex) or
468 458 self.index[-1][7] != nullid) :
469 459 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
470 460
471 461 def _loadindex(self, start, end):
472 462 """load a block of indexes all at once from the lazy parser"""
473 463 if isinstance(self.index, lazyindex):
474 464 self.index.p.loadindex(start, end)
475 465
476 466 def _loadindexmap(self):
477 467 """loads both the map and the index from the lazy parser"""
478 468 if isinstance(self.index, lazyindex):
479 469 p = self.index.p
480 470 p.loadindex()
481 471 self.nodemap = p.map
482 472
483 473 def _loadmap(self):
484 474 """loads the map from the lazy parser"""
485 475 if isinstance(self.nodemap, lazymap):
486 476 self.nodemap.p.loadmap()
487 477 self.nodemap = self.nodemap.p.map
488 478
489 479 def tip(self):
490 480 return self.node(len(self.index) - 2)
491 481 def __len__(self):
492 482 return len(self.index) - 1
493 483 def __iter__(self):
494 484 for i in xrange(len(self)):
495 485 yield i
496 486 def rev(self, node):
497 487 try:
498 488 return self.nodemap[node]
499 489 except KeyError:
500 490 raise LookupError(node, self.indexfile, _('no node'))
501 491 def node(self, rev):
502 492 return self.index[rev][7]
503 493 def linkrev(self, rev):
504 494 return self.index[rev][4]
505 495 def parents(self, node):
506 496 i = self.index
507 497 d = i[self.rev(node)]
508 498 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
509 499 def parentrevs(self, rev):
510 500 return self.index[rev][5:7]
511 501 def start(self, rev):
512 502 return int(self.index[rev][0] >> 16)
513 503 def end(self, rev):
514 504 return self.start(rev) + self.length(rev)
515 505 def length(self, rev):
516 506 return self.index[rev][1]
517 507 def base(self, rev):
518 508 return self.index[rev][3]
519 509
520 510 def size(self, rev):
521 511 """return the length of the uncompressed text for a given revision"""
522 512 l = self.index[rev][2]
523 513 if l >= 0:
524 514 return l
525 515
526 516 t = self.revision(self.node(rev))
527 517 return len(t)
528 518
529 519 # alternate implementation, The advantage to this code is it
530 520 # will be faster for a single revision. But, the results are not
531 521 # cached, so finding the size of every revision will be slower.
532 522 """
533 523 if self.cache and self.cache[1] == rev:
534 524 return len(self.cache[2])
535 525
536 526 base = self.base(rev)
537 527 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
538 528 base = self.cache[1]
539 529 text = self.cache[2]
540 530 else:
541 531 text = self.revision(self.node(base))
542 532
543 533 l = len(text)
544 534 for x in xrange(base + 1, rev + 1):
545 535 l = mdiff.patchedsize(l, self.chunk(x))
546 536 return l
547 537 """
548 538
549 539 def reachable(self, node, stop=None):
550 540 """return a hash of all nodes ancestral to a given node, including
551 541 the node itself, stopping when stop is matched"""
552 542 reachable = {}
553 543 visit = [node]
554 544 reachable[node] = 1
555 545 if stop:
556 546 stopn = self.rev(stop)
557 547 else:
558 548 stopn = 0
559 549 while visit:
560 550 n = visit.pop(0)
561 551 if n == stop:
562 552 continue
563 553 if n == nullid:
564 554 continue
565 555 for p in self.parents(n):
566 556 if self.rev(p) < stopn:
567 557 continue
568 558 if p not in reachable:
569 559 reachable[p] = 1
570 560 visit.append(p)
571 561 return reachable
572 562
573 563 def ancestors(self, *revs):
574 564 'Generate the ancestors of revs using a breadth-first visit'
575 565 visit = list(revs)
576 566 seen = util.set([nullrev])
577 567 while visit:
578 568 for parent in self.parentrevs(visit.pop(0)):
579 569 if parent not in seen:
580 570 visit.append(parent)
581 571 seen.add(parent)
582 572 yield parent
583 573
584 574 def descendants(self, *revs):
585 575 'Generate the descendants of revs in topological order'
586 576 seen = util.set(revs)
587 577 for i in xrange(min(revs) + 1, len(self)):
588 578 for x in self.parentrevs(i):
589 579 if x != nullrev and x in seen:
590 580 seen.add(i)
591 581 yield i
592 582 break
593 583
594 584 def findmissing(self, common=None, heads=None):
595 585 '''
596 586 returns the topologically sorted list of nodes from the set:
597 587 missing = (ancestors(heads) \ ancestors(common))
598 588
599 589 where ancestors() is the set of ancestors from heads, heads included
600 590
601 591 if heads is None, the heads of the revlog are used
602 592 if common is None, nullid is assumed to be a common node
603 593 '''
604 594 if common is None:
605 595 common = [nullid]
606 596 if heads is None:
607 597 heads = self.heads()
608 598
609 599 common = [self.rev(n) for n in common]
610 600 heads = [self.rev(n) for n in heads]
611 601
612 602 # we want the ancestors, but inclusive
613 603 has = dict.fromkeys(self.ancestors(*common))
614 604 has[nullrev] = None
615 605 for r in common:
616 606 has[r] = None
617 607
618 608 # take all ancestors from heads that aren't in has
619 609 missing = {}
620 610 visit = [r for r in heads if r not in has]
621 611 while visit:
622 612 r = visit.pop(0)
623 613 if r in missing:
624 614 continue
625 615 else:
626 616 missing[r] = None
627 617 for p in self.parentrevs(r):
628 618 if p not in has:
629 619 visit.append(p)
630 620 missing = missing.keys()
631 621 missing.sort()
632 622 return [self.node(r) for r in missing]
633 623
634 624 def nodesbetween(self, roots=None, heads=None):
635 625 """Return a tuple containing three elements. Elements 1 and 2 contain
636 626 a final list bases and heads after all the unreachable ones have been
637 627 pruned. Element 0 contains a topologically sorted list of all
638 628
639 629 nodes that satisfy these constraints:
640 630 1. All nodes must be descended from a node in roots (the nodes on
641 631 roots are considered descended from themselves).
642 632 2. All nodes must also be ancestors of a node in heads (the nodes in
643 633 heads are considered to be their own ancestors).
644 634
645 635 If roots is unspecified, nullid is assumed as the only root.
646 636 If heads is unspecified, it is taken to be the output of the
647 637 heads method (i.e. a list of all nodes in the repository that
648 638 have no children)."""
649 639 nonodes = ([], [], [])
650 640 if roots is not None:
651 641 roots = list(roots)
652 642 if not roots:
653 643 return nonodes
654 644 lowestrev = min([self.rev(n) for n in roots])
655 645 else:
656 646 roots = [nullid] # Everybody's a descendent of nullid
657 647 lowestrev = nullrev
658 648 if (lowestrev == nullrev) and (heads is None):
659 649 # We want _all_ the nodes!
660 650 return ([self.node(r) for r in self], [nullid], list(self.heads()))
661 651 if heads is None:
662 652 # All nodes are ancestors, so the latest ancestor is the last
663 653 # node.
664 654 highestrev = len(self) - 1
665 655 # Set ancestors to None to signal that every node is an ancestor.
666 656 ancestors = None
667 657 # Set heads to an empty dictionary for later discovery of heads
668 658 heads = {}
669 659 else:
670 660 heads = list(heads)
671 661 if not heads:
672 662 return nonodes
673 663 ancestors = {}
674 664 # Turn heads into a dictionary so we can remove 'fake' heads.
675 665 # Also, later we will be using it to filter out the heads we can't
676 666 # find from roots.
677 667 heads = dict.fromkeys(heads, 0)
678 668 # Start at the top and keep marking parents until we're done.
679 669 nodestotag = heads.keys()
680 670 # Remember where the top was so we can use it as a limit later.
681 671 highestrev = max([self.rev(n) for n in nodestotag])
682 672 while nodestotag:
683 673 # grab a node to tag
684 674 n = nodestotag.pop()
685 675 # Never tag nullid
686 676 if n == nullid:
687 677 continue
688 678 # A node's revision number represents its place in a
689 679 # topologically sorted list of nodes.
690 680 r = self.rev(n)
691 681 if r >= lowestrev:
692 682 if n not in ancestors:
693 683 # If we are possibly a descendent of one of the roots
694 684 # and we haven't already been marked as an ancestor
695 685 ancestors[n] = 1 # Mark as ancestor
696 686 # Add non-nullid parents to list of nodes to tag.
697 687 nodestotag.extend([p for p in self.parents(n) if
698 688 p != nullid])
699 689 elif n in heads: # We've seen it before, is it a fake head?
700 690 # So it is, real heads should not be the ancestors of
701 691 # any other heads.
702 692 heads.pop(n)
703 693 if not ancestors:
704 694 return nonodes
705 695 # Now that we have our set of ancestors, we want to remove any
706 696 # roots that are not ancestors.
707 697
708 698 # If one of the roots was nullid, everything is included anyway.
709 699 if lowestrev > nullrev:
710 700 # But, since we weren't, let's recompute the lowest rev to not
711 701 # include roots that aren't ancestors.
712 702
713 703 # Filter out roots that aren't ancestors of heads
714 704 roots = [n for n in roots if n in ancestors]
715 705 # Recompute the lowest revision
716 706 if roots:
717 707 lowestrev = min([self.rev(n) for n in roots])
718 708 else:
719 709 # No more roots? Return empty list
720 710 return nonodes
721 711 else:
722 712 # We are descending from nullid, and don't need to care about
723 713 # any other roots.
724 714 lowestrev = nullrev
725 715 roots = [nullid]
726 716 # Transform our roots list into a 'set' (i.e. a dictionary where the
727 717 # values don't matter.
728 718 descendents = dict.fromkeys(roots, 1)
729 719 # Also, keep the original roots so we can filter out roots that aren't
730 720 # 'real' roots (i.e. are descended from other roots).
731 721 roots = descendents.copy()
732 722 # Our topologically sorted list of output nodes.
733 723 orderedout = []
734 724 # Don't start at nullid since we don't want nullid in our output list,
735 725 # and if nullid shows up in descedents, empty parents will look like
736 726 # they're descendents.
737 727 for r in xrange(max(lowestrev, 0), highestrev + 1):
738 728 n = self.node(r)
739 729 isdescendent = False
740 730 if lowestrev == nullrev: # Everybody is a descendent of nullid
741 731 isdescendent = True
742 732 elif n in descendents:
743 733 # n is already a descendent
744 734 isdescendent = True
745 735 # This check only needs to be done here because all the roots
746 736 # will start being marked is descendents before the loop.
747 737 if n in roots:
748 738 # If n was a root, check if it's a 'real' root.
749 739 p = tuple(self.parents(n))
750 740 # If any of its parents are descendents, it's not a root.
751 741 if (p[0] in descendents) or (p[1] in descendents):
752 742 roots.pop(n)
753 743 else:
754 744 p = tuple(self.parents(n))
755 745 # A node is a descendent if either of its parents are
756 746 # descendents. (We seeded the dependents list with the roots
757 747 # up there, remember?)
758 748 if (p[0] in descendents) or (p[1] in descendents):
759 749 descendents[n] = 1
760 750 isdescendent = True
761 751 if isdescendent and ((ancestors is None) or (n in ancestors)):
762 752 # Only include nodes that are both descendents and ancestors.
763 753 orderedout.append(n)
764 754 if (ancestors is not None) and (n in heads):
765 755 # We're trying to figure out which heads are reachable
766 756 # from roots.
767 757 # Mark this head as having been reached
768 758 heads[n] = 1
769 759 elif ancestors is None:
770 760 # Otherwise, we're trying to discover the heads.
771 761 # Assume this is a head because if it isn't, the next step
772 762 # will eventually remove it.
773 763 heads[n] = 1
774 764 # But, obviously its parents aren't.
775 765 for p in self.parents(n):
776 766 heads.pop(p, None)
777 767 heads = [n for n in heads.iterkeys() if heads[n] != 0]
778 768 roots = roots.keys()
779 769 assert orderedout
780 770 assert roots
781 771 assert heads
782 772 return (orderedout, roots, heads)
783 773
784 774 def heads(self, start=None, stop=None):
785 775 """return the list of all nodes that have no children
786 776
787 777 if start is specified, only heads that are descendants of
788 778 start will be returned
789 779 if stop is specified, it will consider all the revs from stop
790 780 as if they had no children
791 781 """
792 782 if start is None and stop is None:
793 783 count = len(self)
794 784 if not count:
795 785 return [nullid]
796 786 ishead = [1] * (count + 1)
797 787 index = self.index
798 788 for r in xrange(count):
799 789 e = index[r]
800 790 ishead[e[5]] = ishead[e[6]] = 0
801 791 return [self.node(r) for r in xrange(count) if ishead[r]]
802 792
803 793 if start is None:
804 794 start = nullid
805 795 if stop is None:
806 796 stop = []
807 797 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
808 798 startrev = self.rev(start)
809 799 reachable = {startrev: 1}
810 800 heads = {startrev: 1}
811 801
812 802 parentrevs = self.parentrevs
813 803 for r in xrange(startrev + 1, len(self)):
814 804 for p in parentrevs(r):
815 805 if p in reachable:
816 806 if r not in stoprevs:
817 807 reachable[r] = 1
818 808 heads[r] = 1
819 809 if p in heads and p not in stoprevs:
820 810 del heads[p]
821 811
822 812 return [self.node(r) for r in heads]
823 813
824 814 def children(self, node):
825 815 """find the children of a given node"""
826 816 c = []
827 817 p = self.rev(node)
828 818 for r in range(p + 1, len(self)):
829 819 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
830 820 if prevs:
831 821 for pr in prevs:
832 822 if pr == p:
833 823 c.append(self.node(r))
834 824 elif p == nullrev:
835 825 c.append(self.node(r))
836 826 return c
837 827
838 828 def _match(self, id):
839 829 if isinstance(id, (long, int)):
840 830 # rev
841 831 return self.node(id)
842 832 if len(id) == 20:
843 833 # possibly a binary node
844 834 # odds of a binary node being all hex in ASCII are 1 in 10**25
845 835 try:
846 836 node = id
847 837 r = self.rev(node) # quick search the index
848 838 return node
849 839 except LookupError:
850 840 pass # may be partial hex id
851 841 try:
852 842 # str(rev)
853 843 rev = int(id)
854 844 if str(rev) != id:
855 845 raise ValueError
856 846 if rev < 0:
857 847 rev = len(self) + rev
858 848 if rev < 0 or rev >= len(self):
859 849 raise ValueError
860 850 return self.node(rev)
861 851 except (ValueError, OverflowError):
862 852 pass
863 853 if len(id) == 40:
864 854 try:
865 855 # a full hex nodeid?
866 856 node = bin(id)
867 857 r = self.rev(node)
868 858 return node
869 859 except (TypeError, LookupError):
870 860 pass
871 861
872 862 def _partialmatch(self, id):
873 863 if len(id) < 40:
874 864 try:
875 865 # hex(node)[:...]
876 866 l = len(id) / 2 # grab an even number of digits
877 867 bin_id = bin(id[:l*2])
878 868 nl = [n for n in self.nodemap if n[:l] == bin_id]
879 869 nl = [n for n in nl if hex(n).startswith(id)]
880 870 if len(nl) > 0:
881 871 if len(nl) == 1:
882 872 return nl[0]
883 873 raise LookupError(id, self.indexfile,
884 874 _('ambiguous identifier'))
885 875 return None
886 876 except TypeError:
887 877 pass
888 878
889 879 def lookup(self, id):
890 880 """locate a node based on:
891 881 - revision number or str(revision number)
892 882 - nodeid or subset of hex nodeid
893 883 """
894 884 n = self._match(id)
895 885 if n is not None:
896 886 return n
897 887 n = self._partialmatch(id)
898 888 if n:
899 889 return n
900 890
901 891 raise LookupError(id, self.indexfile, _('no match found'))
902 892
903 893 def cmp(self, node, text):
904 894 """compare text with a given file revision"""
905 895 p1, p2 = self.parents(node)
906 896 return hash(text, p1, p2) != node
907 897
908 898 def chunk(self, rev, df=None):
909 899 def loadcache(df):
910 900 if not df:
911 901 if self._inline:
912 902 df = self.opener(self.indexfile)
913 903 else:
914 904 df = self.opener(self.datafile)
915 905 df.seek(start)
916 906 self._chunkcache = (start, df.read(cache_length))
917 907
918 908 start, length = self.start(rev), self.length(rev)
919 909 if self._inline:
920 910 start += (rev + 1) * self._io.size
921 911 end = start + length
922 912
923 913 offset = 0
924 914 if not self._chunkcache:
925 915 cache_length = max(65536, length)
926 916 loadcache(df)
927 917 else:
928 918 cache_start = self._chunkcache[0]
929 919 cache_length = len(self._chunkcache[1])
930 920 cache_end = cache_start + cache_length
931 921 if start >= cache_start and end <= cache_end:
932 922 # it is cached
933 923 offset = start - cache_start
934 924 else:
935 925 cache_length = max(65536, length)
936 926 loadcache(df)
937 927
938 928 # avoid copying large chunks
939 929 c = self._chunkcache[1]
940 930 if cache_length != length:
941 931 c = c[offset:offset + length]
942 932
943 933 return decompress(c)
944 934
945 935 def revdiff(self, rev1, rev2):
946 936 """return or calculate a delta between two revisions"""
947 937 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
948 938 return self.chunk(rev2)
949 939
950 940 return mdiff.textdiff(self.revision(self.node(rev1)),
951 941 self.revision(self.node(rev2)))
952 942
953 943 def revision(self, node):
954 944 """return an uncompressed revision of a given node"""
955 945 if node == nullid:
956 946 return ""
957 947 if self._cache and self._cache[0] == node:
958 948 return str(self._cache[2])
959 949
960 950 # look up what we need to read
961 951 text = None
962 952 rev = self.rev(node)
963 953 base = self.base(rev)
964 954
965 955 # check rev flags
966 956 if self.index[rev][0] & 0xFFFF:
967 957 raise RevlogError(_('incompatible revision flag %x') %
968 958 (self.index[rev][0] & 0xFFFF))
969 959
970 960 df = None
971 961
972 962 # do we have useful data cached?
973 963 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
974 964 base = self._cache[1]
975 965 text = str(self._cache[2])
976 966 self._loadindex(base, rev + 1)
977 967 if not self._inline and rev > base + 1:
978 968 df = self.opener(self.datafile)
979 969 else:
980 970 self._loadindex(base, rev + 1)
981 971 if not self._inline and rev > base:
982 972 df = self.opener(self.datafile)
983 973 text = self.chunk(base, df=df)
984 974
985 975 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
986 976 text = mdiff.patches(text, bins)
987 977 p1, p2 = self.parents(node)
988 978 if node != hash(text, p1, p2):
989 979 raise RevlogError(_("integrity check failed on %s:%d")
990 980 % (self.datafile, rev))
991 981
992 982 self._cache = (node, rev, text)
993 983 return text
994 984
995 985 def checkinlinesize(self, tr, fp=None):
996 986 if not self._inline:
997 987 return
998 988 if not fp:
999 989 fp = self.opener(self.indexfile, 'r')
1000 990 fp.seek(0, 2)
1001 991 size = fp.tell()
1002 992 if size < 131072:
1003 993 return
1004 994 trinfo = tr.find(self.indexfile)
1005 995 if trinfo == None:
1006 996 raise RevlogError(_("%s not found in the transaction")
1007 997 % self.indexfile)
1008 998
1009 999 trindex = trinfo[2]
1010 1000 dataoff = self.start(trindex)
1011 1001
1012 1002 tr.add(self.datafile, dataoff)
1013 1003 df = self.opener(self.datafile, 'w')
1014 1004 try:
1015 1005 calc = self._io.size
1016 1006 for r in self:
1017 1007 start = self.start(r) + (r + 1) * calc
1018 1008 length = self.length(r)
1019 1009 fp.seek(start)
1020 1010 d = fp.read(length)
1021 1011 df.write(d)
1022 1012 finally:
1023 1013 df.close()
1024 1014
1025 1015 fp.close()
1026 1016 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1027 1017 self.version &= ~(REVLOGNGINLINEDATA)
1028 1018 self._inline = False
1029 1019 for i in self:
1030 1020 e = self._io.packentry(self.index[i], self.node, self.version, i)
1031 1021 fp.write(e)
1032 1022
1033 1023 # if we don't call rename, the temp file will never replace the
1034 1024 # real index
1035 1025 fp.rename()
1036 1026
1037 1027 tr.replace(self.indexfile, trindex * calc)
1038 1028 self._chunkcache = None
1039 1029
1040 1030 def addrevision(self, text, transaction, link, p1, p2, d=None):
1041 1031 """add a revision to the log
1042 1032
1043 1033 text - the revision data to add
1044 1034 transaction - the transaction object used for rollback
1045 1035 link - the linkrev data to add
1046 1036 p1, p2 - the parent nodeids of the revision
1047 1037 d - an optional precomputed delta
1048 1038 """
1049 1039 dfh = None
1050 1040 if not self._inline:
1051 1041 dfh = self.opener(self.datafile, "a")
1052 1042 ifh = self.opener(self.indexfile, "a+")
1053 1043 try:
1054 1044 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1055 1045 finally:
1056 1046 if dfh:
1057 1047 dfh.close()
1058 1048 ifh.close()
1059 1049
1060 1050 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1061 1051 node = hash(text, p1, p2)
1062 1052 if node in self.nodemap:
1063 1053 return node
1064 1054
1065 1055 curr = len(self)
1066 1056 prev = curr - 1
1067 1057 base = self.base(prev)
1068 1058 offset = self.end(prev)
1069 1059
1070 1060 if curr:
1071 1061 if not d:
1072 1062 ptext = self.revision(self.node(prev))
1073 1063 d = mdiff.textdiff(ptext, text)
1074 1064 data = compress(d)
1075 1065 l = len(data[1]) + len(data[0])
1076 1066 dist = l + offset - self.start(base)
1077 1067
1078 1068 # full versions are inserted when the needed deltas
1079 1069 # become comparable to the uncompressed text
1080 1070 if not curr or dist > len(text) * 2:
1081 1071 data = compress(text)
1082 1072 l = len(data[1]) + len(data[0])
1083 1073 base = curr
1084 1074
1085 1075 e = (offset_type(offset, 0), l, len(text),
1086 1076 base, link, self.rev(p1), self.rev(p2), node)
1087 1077 self.index.insert(-1, e)
1088 1078 self.nodemap[node] = curr
1089 1079
1090 1080 entry = self._io.packentry(e, self.node, self.version, curr)
1091 1081 if not self._inline:
1092 1082 transaction.add(self.datafile, offset)
1093 1083 transaction.add(self.indexfile, curr * len(entry))
1094 1084 if data[0]:
1095 1085 dfh.write(data[0])
1096 1086 dfh.write(data[1])
1097 1087 dfh.flush()
1098 1088 ifh.write(entry)
1099 1089 else:
1100 1090 offset += curr * self._io.size
1101 1091 transaction.add(self.indexfile, offset, curr)
1102 1092 ifh.write(entry)
1103 1093 ifh.write(data[0])
1104 1094 ifh.write(data[1])
1105 1095 self.checkinlinesize(transaction, ifh)
1106 1096
1107 1097 self._cache = (node, curr, text)
1108 1098 return node
1109 1099
1110 1100 def ancestor(self, a, b):
1111 1101 """calculate the least common ancestor of nodes a and b"""
1112 1102
1113 1103 def parents(rev):
1114 1104 return [p for p in self.parentrevs(rev) if p != nullrev]
1115 1105
1116 1106 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1117 1107 if c is None:
1118 1108 return nullid
1119 1109
1120 1110 return self.node(c)
1121 1111
1122 1112 def group(self, nodelist, lookup, infocollect=None):
1123 1113 """calculate a delta group
1124 1114
1125 1115 Given a list of changeset revs, return a set of deltas and
1126 1116 metadata corresponding to nodes. the first delta is
1127 1117 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1128 1118 have this parent as it has all history before these
1129 1119 changesets. parent is parent[0]
1130 1120 """
1131 1121 revs = [self.rev(n) for n in nodelist]
1132 1122
1133 1123 # if we don't have any revisions touched by these changesets, bail
1134 1124 if not revs:
1135 1125 yield changegroup.closechunk()
1136 1126 return
1137 1127
1138 1128 # add the parent of the first rev
1139 1129 p = self.parents(self.node(revs[0]))[0]
1140 1130 revs.insert(0, self.rev(p))
1141 1131
1142 1132 # build deltas
1143 1133 for d in xrange(0, len(revs) - 1):
1144 1134 a, b = revs[d], revs[d + 1]
1145 1135 nb = self.node(b)
1146 1136
1147 1137 if infocollect is not None:
1148 1138 infocollect(nb)
1149 1139
1150 1140 p = self.parents(nb)
1151 1141 meta = nb + p[0] + p[1] + lookup(nb)
1152 1142 if a == -1:
1153 1143 d = self.revision(nb)
1154 1144 meta += mdiff.trivialdiffheader(len(d))
1155 1145 else:
1156 1146 d = self.revdiff(a, b)
1157 1147 yield changegroup.chunkheader(len(meta) + len(d))
1158 1148 yield meta
1159 1149 if len(d) > 2**20:
1160 1150 pos = 0
1161 1151 while pos < len(d):
1162 1152 pos2 = pos + 2 ** 18
1163 1153 yield d[pos:pos2]
1164 1154 pos = pos2
1165 1155 else:
1166 1156 yield d
1167 1157
1168 1158 yield changegroup.closechunk()
1169 1159
1170 1160 def addgroup(self, revs, linkmapper, transaction):
1171 1161 """
1172 1162 add a delta group
1173 1163
1174 1164 given a set of deltas, add them to the revision log. the
1175 1165 first delta is against its parent, which should be in our
1176 1166 log, the rest are against the previous delta.
1177 1167 """
1178 1168
1179 1169 #track the base of the current delta log
1180 1170 r = len(self)
1181 1171 t = r - 1
1182 1172 node = None
1183 1173
1184 1174 base = prev = nullrev
1185 1175 start = end = textlen = 0
1186 1176 if r:
1187 1177 end = self.end(t)
1188 1178
1189 1179 ifh = self.opener(self.indexfile, "a+")
1190 1180 isize = r * self._io.size
1191 1181 if self._inline:
1192 1182 transaction.add(self.indexfile, end + isize, r)
1193 1183 dfh = None
1194 1184 else:
1195 1185 transaction.add(self.indexfile, isize, r)
1196 1186 transaction.add(self.datafile, end)
1197 1187 dfh = self.opener(self.datafile, "a")
1198 1188
1199 1189 try:
1200 1190 # loop through our set of deltas
1201 1191 chain = None
1202 1192 for chunk in revs:
1203 1193 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1204 1194 link = linkmapper(cs)
1205 1195 if node in self.nodemap:
1206 1196 # this can happen if two branches make the same change
1207 1197 chain = node
1208 1198 continue
1209 1199 delta = buffer(chunk, 80)
1210 1200 del chunk
1211 1201
1212 1202 for p in (p1, p2):
1213 1203 if not p in self.nodemap:
1214 1204 raise LookupError(p, self.indexfile, _('unknown parent'))
1215 1205
1216 1206 if not chain:
1217 1207 # retrieve the parent revision of the delta chain
1218 1208 chain = p1
1219 1209 if not chain in self.nodemap:
1220 1210 raise LookupError(chain, self.indexfile, _('unknown base'))
1221 1211
1222 1212 # full versions are inserted when the needed deltas become
1223 1213 # comparable to the uncompressed text or when the previous
1224 1214 # version is not the one we have a delta against. We use
1225 1215 # the size of the previous full rev as a proxy for the
1226 1216 # current size.
1227 1217
1228 1218 if chain == prev:
1229 1219 cdelta = compress(delta)
1230 1220 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1231 1221 textlen = mdiff.patchedsize(textlen, delta)
1232 1222
1233 1223 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1234 1224 # flush our writes here so we can read it in revision
1235 1225 if dfh:
1236 1226 dfh.flush()
1237 1227 ifh.flush()
1238 1228 text = self.revision(chain)
1239 1229 if len(text) == 0:
1240 1230 # skip over trivial delta header
1241 1231 text = buffer(delta, 12)
1242 1232 else:
1243 1233 text = mdiff.patches(text, [delta])
1244 1234 del delta
1245 1235 chk = self._addrevision(text, transaction, link, p1, p2, None,
1246 1236 ifh, dfh)
1247 1237 if not dfh and not self._inline:
1248 1238 # addrevision switched from inline to conventional
1249 1239 # reopen the index
1250 1240 dfh = self.opener(self.datafile, "a")
1251 1241 ifh = self.opener(self.indexfile, "a")
1252 1242 if chk != node:
1253 1243 raise RevlogError(_("consistency error adding group"))
1254 1244 textlen = len(text)
1255 1245 else:
1256 1246 e = (offset_type(end, 0), cdeltalen, textlen, base,
1257 1247 link, self.rev(p1), self.rev(p2), node)
1258 1248 self.index.insert(-1, e)
1259 1249 self.nodemap[node] = r
1260 1250 entry = self._io.packentry(e, self.node, self.version, r)
1261 1251 if self._inline:
1262 1252 ifh.write(entry)
1263 1253 ifh.write(cdelta[0])
1264 1254 ifh.write(cdelta[1])
1265 1255 self.checkinlinesize(transaction, ifh)
1266 1256 if not self._inline:
1267 1257 dfh = self.opener(self.datafile, "a")
1268 1258 ifh = self.opener(self.indexfile, "a")
1269 1259 else:
1270 1260 dfh.write(cdelta[0])
1271 1261 dfh.write(cdelta[1])
1272 1262 ifh.write(entry)
1273 1263
1274 1264 t, r, chain, prev = r, r + 1, node, node
1275 1265 base = self.base(t)
1276 1266 start = self.start(base)
1277 1267 end = self.end(t)
1278 1268 finally:
1279 1269 if dfh:
1280 1270 dfh.close()
1281 1271 ifh.close()
1282 1272
1283 1273 return node
1284 1274
1285 1275 def strip(self, minlink):
1286 1276 """truncate the revlog on the first revision with a linkrev >= minlink
1287 1277
1288 1278 This function is called when we're stripping revision minlink and
1289 1279 its descendants from the repository.
1290 1280
1291 1281 We have to remove all revisions with linkrev >= minlink, because
1292 1282 the equivalent changelog revisions will be renumbered after the
1293 1283 strip.
1294 1284
1295 1285 So we truncate the revlog on the first of these revisions, and
1296 1286 trust that the caller has saved the revisions that shouldn't be
1297 1287 removed and that it'll readd them after this truncation.
1298 1288 """
1299 1289 if len(self) == 0:
1300 1290 return
1301 1291
1302 1292 if isinstance(self.index, lazyindex):
1303 1293 self._loadindexmap()
1304 1294
1305 1295 for rev in self:
1306 1296 if self.index[rev][4] >= minlink:
1307 1297 break
1308 1298 else:
1309 1299 return
1310 1300
1311 1301 # first truncate the files on disk
1312 1302 end = self.start(rev)
1313 1303 if not self._inline:
1314 1304 df = self.opener(self.datafile, "a")
1315 1305 df.truncate(end)
1316 1306 end = rev * self._io.size
1317 1307 else:
1318 1308 end += rev * self._io.size
1319 1309
1320 1310 indexf = self.opener(self.indexfile, "a")
1321 1311 indexf.truncate(end)
1322 1312
1323 1313 # then reset internal state in memory to forget those revisions
1324 1314 self._cache = None
1325 1315 self._chunkcache = None
1326 1316 for x in xrange(rev, len(self)):
1327 1317 del self.nodemap[self.node(x)]
1328 1318
1329 1319 del self.index[rev:-1]
1330 1320
1331 1321 def checksize(self):
1332 1322 expected = 0
1333 1323 if len(self):
1334 1324 expected = max(0, self.end(len(self) - 1))
1335 1325
1336 1326 try:
1337 1327 f = self.opener(self.datafile)
1338 1328 f.seek(0, 2)
1339 1329 actual = f.tell()
1340 1330 dd = actual - expected
1341 1331 except IOError, inst:
1342 1332 if inst.errno != errno.ENOENT:
1343 1333 raise
1344 1334 dd = 0
1345 1335
1346 1336 try:
1347 1337 f = self.opener(self.indexfile)
1348 1338 f.seek(0, 2)
1349 1339 actual = f.tell()
1350 1340 s = self._io.size
1351 1341 i = max(0, actual / s)
1352 1342 di = actual - (i * s)
1353 1343 if self._inline:
1354 1344 databytes = 0
1355 1345 for r in self:
1356 1346 databytes += max(0, self.length(r))
1357 1347 dd = 0
1358 1348 di = actual - len(self) * s - databytes
1359 1349 except IOError, inst:
1360 1350 if inst.errno != errno.ENOENT:
1361 1351 raise
1362 1352 di = 0
1363 1353
1364 1354 return (dd, di)
1365 1355
1366 1356 def files(self):
1367 1357 res = [ self.indexfile ]
1368 1358 if not self._inline:
1369 1359 res.append(self.datafile)
1370 1360 return res
General Comments 0
You need to be logged in to leave comments. Login now