##// END OF EJS Templates
merge with crew
Matt Mackall -
r14274:01472f8f merge default
parent child Browse files
Show More
@@ -1,1253 +1,1253 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, glob, tempfile
10 import os, sys, errno, re, tempfile
11 import util, scmutil, templater, patch, error, templatekw, wdutil
11 import util, scmutil, templater, patch, error, templatekw, wdutil
12 import match as matchmod
12 import match as matchmod
13 import similar, revset, subrepo
13 import revset, subrepo
14
14
15 expandpats = wdutil.expandpats
15 expandpats = wdutil.expandpats
16 match = wdutil.match
16 match = wdutil.match
17 matchall = wdutil.matchall
17 matchall = wdutil.matchall
18 matchfiles = wdutil.matchfiles
18 matchfiles = wdutil.matchfiles
19 addremove = wdutil.addremove
19 addremove = wdutil.addremove
20 dirstatecopy = wdutil.dirstatecopy
20 dirstatecopy = wdutil.dirstatecopy
21
21
22 revrangesep = ':'
22 revrangesep = ':'
23
23
24 def parsealiases(cmd):
24 def parsealiases(cmd):
25 return cmd.lstrip("^").split("|")
25 return cmd.lstrip("^").split("|")
26
26
27 def findpossible(cmd, table, strict=False):
27 def findpossible(cmd, table, strict=False):
28 """
28 """
29 Return cmd -> (aliases, command table entry)
29 Return cmd -> (aliases, command table entry)
30 for each matching command.
30 for each matching command.
31 Return debug commands (or their aliases) only if no normal command matches.
31 Return debug commands (or their aliases) only if no normal command matches.
32 """
32 """
33 choice = {}
33 choice = {}
34 debugchoice = {}
34 debugchoice = {}
35 for e in table.keys():
35 for e in table.keys():
36 aliases = parsealiases(e)
36 aliases = parsealiases(e)
37 found = None
37 found = None
38 if cmd in aliases:
38 if cmd in aliases:
39 found = cmd
39 found = cmd
40 elif not strict:
40 elif not strict:
41 for a in aliases:
41 for a in aliases:
42 if a.startswith(cmd):
42 if a.startswith(cmd):
43 found = a
43 found = a
44 break
44 break
45 if found is not None:
45 if found is not None:
46 if aliases[0].startswith("debug") or found.startswith("debug"):
46 if aliases[0].startswith("debug") or found.startswith("debug"):
47 debugchoice[found] = (aliases, table[e])
47 debugchoice[found] = (aliases, table[e])
48 else:
48 else:
49 choice[found] = (aliases, table[e])
49 choice[found] = (aliases, table[e])
50
50
51 if not choice and debugchoice:
51 if not choice and debugchoice:
52 choice = debugchoice
52 choice = debugchoice
53
53
54 return choice
54 return choice
55
55
56 def findcmd(cmd, table, strict=True):
56 def findcmd(cmd, table, strict=True):
57 """Return (aliases, command table entry) for command string."""
57 """Return (aliases, command table entry) for command string."""
58 choice = findpossible(cmd, table, strict)
58 choice = findpossible(cmd, table, strict)
59
59
60 if cmd in choice:
60 if cmd in choice:
61 return choice[cmd]
61 return choice[cmd]
62
62
63 if len(choice) > 1:
63 if len(choice) > 1:
64 clist = choice.keys()
64 clist = choice.keys()
65 clist.sort()
65 clist.sort()
66 raise error.AmbiguousCommand(cmd, clist)
66 raise error.AmbiguousCommand(cmd, clist)
67
67
68 if choice:
68 if choice:
69 return choice.values()[0]
69 return choice.values()[0]
70
70
71 raise error.UnknownCommand(cmd)
71 raise error.UnknownCommand(cmd)
72
72
73 def findrepo(p):
73 def findrepo(p):
74 while not os.path.isdir(os.path.join(p, ".hg")):
74 while not os.path.isdir(os.path.join(p, ".hg")):
75 oldp, p = p, os.path.dirname(p)
75 oldp, p = p, os.path.dirname(p)
76 if p == oldp:
76 if p == oldp:
77 return None
77 return None
78
78
79 return p
79 return p
80
80
81 def bail_if_changed(repo):
81 def bail_if_changed(repo):
82 if repo.dirstate.p2() != nullid:
82 if repo.dirstate.p2() != nullid:
83 raise util.Abort(_('outstanding uncommitted merge'))
83 raise util.Abort(_('outstanding uncommitted merge'))
84 modified, added, removed, deleted = repo.status()[:4]
84 modified, added, removed, deleted = repo.status()[:4]
85 if modified or added or removed or deleted:
85 if modified or added or removed or deleted:
86 raise util.Abort(_("outstanding uncommitted changes"))
86 raise util.Abort(_("outstanding uncommitted changes"))
87
87
88 def logmessage(opts):
88 def logmessage(opts):
89 """ get the log message according to -m and -l option """
89 """ get the log message according to -m and -l option """
90 message = opts.get('message')
90 message = opts.get('message')
91 logfile = opts.get('logfile')
91 logfile = opts.get('logfile')
92
92
93 if message and logfile:
93 if message and logfile:
94 raise util.Abort(_('options --message and --logfile are mutually '
94 raise util.Abort(_('options --message and --logfile are mutually '
95 'exclusive'))
95 'exclusive'))
96 if not message and logfile:
96 if not message and logfile:
97 try:
97 try:
98 if logfile == '-':
98 if logfile == '-':
99 message = sys.stdin.read()
99 message = sys.stdin.read()
100 else:
100 else:
101 message = '\n'.join(util.readfile(logfile).splitlines())
101 message = '\n'.join(util.readfile(logfile).splitlines())
102 except IOError, inst:
102 except IOError, inst:
103 raise util.Abort(_("can't read commit message '%s': %s") %
103 raise util.Abort(_("can't read commit message '%s': %s") %
104 (logfile, inst.strerror))
104 (logfile, inst.strerror))
105 return message
105 return message
106
106
107 def loglimit(opts):
107 def loglimit(opts):
108 """get the log limit according to option -l/--limit"""
108 """get the log limit according to option -l/--limit"""
109 limit = opts.get('limit')
109 limit = opts.get('limit')
110 if limit:
110 if limit:
111 try:
111 try:
112 limit = int(limit)
112 limit = int(limit)
113 except ValueError:
113 except ValueError:
114 raise util.Abort(_('limit must be a positive integer'))
114 raise util.Abort(_('limit must be a positive integer'))
115 if limit <= 0:
115 if limit <= 0:
116 raise util.Abort(_('limit must be positive'))
116 raise util.Abort(_('limit must be positive'))
117 else:
117 else:
118 limit = None
118 limit = None
119 return limit
119 return limit
120
120
121 def revsingle(repo, revspec, default='.'):
121 def revsingle(repo, revspec, default='.'):
122 if not revspec:
122 if not revspec:
123 return repo[default]
123 return repo[default]
124
124
125 l = revrange(repo, [revspec])
125 l = revrange(repo, [revspec])
126 if len(l) < 1:
126 if len(l) < 1:
127 raise util.Abort(_('empty revision set'))
127 raise util.Abort(_('empty revision set'))
128 return repo[l[-1]]
128 return repo[l[-1]]
129
129
130 def revpair(repo, revs):
130 def revpair(repo, revs):
131 if not revs:
131 if not revs:
132 return repo.dirstate.p1(), None
132 return repo.dirstate.p1(), None
133
133
134 l = revrange(repo, revs)
134 l = revrange(repo, revs)
135
135
136 if len(l) == 0:
136 if len(l) == 0:
137 return repo.dirstate.p1(), None
137 return repo.dirstate.p1(), None
138
138
139 if len(l) == 1:
139 if len(l) == 1:
140 return repo.lookup(l[0]), None
140 return repo.lookup(l[0]), None
141
141
142 return repo.lookup(l[0]), repo.lookup(l[-1])
142 return repo.lookup(l[0]), repo.lookup(l[-1])
143
143
144 def revrange(repo, revs):
144 def revrange(repo, revs):
145 """Yield revision as strings from a list of revision specifications."""
145 """Yield revision as strings from a list of revision specifications."""
146
146
147 def revfix(repo, val, defval):
147 def revfix(repo, val, defval):
148 if not val and val != 0 and defval is not None:
148 if not val and val != 0 and defval is not None:
149 return defval
149 return defval
150 return repo.changelog.rev(repo.lookup(val))
150 return repo.changelog.rev(repo.lookup(val))
151
151
152 seen, l = set(), []
152 seen, l = set(), []
153 for spec in revs:
153 for spec in revs:
154 # attempt to parse old-style ranges first to deal with
154 # attempt to parse old-style ranges first to deal with
155 # things like old-tag which contain query metacharacters
155 # things like old-tag which contain query metacharacters
156 try:
156 try:
157 if isinstance(spec, int):
157 if isinstance(spec, int):
158 seen.add(spec)
158 seen.add(spec)
159 l.append(spec)
159 l.append(spec)
160 continue
160 continue
161
161
162 if revrangesep in spec:
162 if revrangesep in spec:
163 start, end = spec.split(revrangesep, 1)
163 start, end = spec.split(revrangesep, 1)
164 start = revfix(repo, start, 0)
164 start = revfix(repo, start, 0)
165 end = revfix(repo, end, len(repo) - 1)
165 end = revfix(repo, end, len(repo) - 1)
166 step = start > end and -1 or 1
166 step = start > end and -1 or 1
167 for rev in xrange(start, end + step, step):
167 for rev in xrange(start, end + step, step):
168 if rev in seen:
168 if rev in seen:
169 continue
169 continue
170 seen.add(rev)
170 seen.add(rev)
171 l.append(rev)
171 l.append(rev)
172 continue
172 continue
173 elif spec and spec in repo: # single unquoted rev
173 elif spec and spec in repo: # single unquoted rev
174 rev = revfix(repo, spec, None)
174 rev = revfix(repo, spec, None)
175 if rev in seen:
175 if rev in seen:
176 continue
176 continue
177 seen.add(rev)
177 seen.add(rev)
178 l.append(rev)
178 l.append(rev)
179 continue
179 continue
180 except error.RepoLookupError:
180 except error.RepoLookupError:
181 pass
181 pass
182
182
183 # fall through to new-style queries if old-style fails
183 # fall through to new-style queries if old-style fails
184 m = revset.match(repo.ui, spec)
184 m = revset.match(repo.ui, spec)
185 for r in m(repo, range(len(repo))):
185 for r in m(repo, range(len(repo))):
186 if r not in seen:
186 if r not in seen:
187 l.append(r)
187 l.append(r)
188 seen.update(l)
188 seen.update(l)
189
189
190 return l
190 return l
191
191
192 def make_filename(repo, pat, node,
192 def make_filename(repo, pat, node,
193 total=None, seqno=None, revwidth=None, pathname=None):
193 total=None, seqno=None, revwidth=None, pathname=None):
194 node_expander = {
194 node_expander = {
195 'H': lambda: hex(node),
195 'H': lambda: hex(node),
196 'R': lambda: str(repo.changelog.rev(node)),
196 'R': lambda: str(repo.changelog.rev(node)),
197 'h': lambda: short(node),
197 'h': lambda: short(node),
198 }
198 }
199 expander = {
199 expander = {
200 '%': lambda: '%',
200 '%': lambda: '%',
201 'b': lambda: os.path.basename(repo.root),
201 'b': lambda: os.path.basename(repo.root),
202 }
202 }
203
203
204 try:
204 try:
205 if node:
205 if node:
206 expander.update(node_expander)
206 expander.update(node_expander)
207 if node:
207 if node:
208 expander['r'] = (lambda:
208 expander['r'] = (lambda:
209 str(repo.changelog.rev(node)).zfill(revwidth or 0))
209 str(repo.changelog.rev(node)).zfill(revwidth or 0))
210 if total is not None:
210 if total is not None:
211 expander['N'] = lambda: str(total)
211 expander['N'] = lambda: str(total)
212 if seqno is not None:
212 if seqno is not None:
213 expander['n'] = lambda: str(seqno)
213 expander['n'] = lambda: str(seqno)
214 if total is not None and seqno is not None:
214 if total is not None and seqno is not None:
215 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
215 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
216 if pathname is not None:
216 if pathname is not None:
217 expander['s'] = lambda: os.path.basename(pathname)
217 expander['s'] = lambda: os.path.basename(pathname)
218 expander['d'] = lambda: os.path.dirname(pathname) or '.'
218 expander['d'] = lambda: os.path.dirname(pathname) or '.'
219 expander['p'] = lambda: pathname
219 expander['p'] = lambda: pathname
220
220
221 newname = []
221 newname = []
222 patlen = len(pat)
222 patlen = len(pat)
223 i = 0
223 i = 0
224 while i < patlen:
224 while i < patlen:
225 c = pat[i]
225 c = pat[i]
226 if c == '%':
226 if c == '%':
227 i += 1
227 i += 1
228 c = pat[i]
228 c = pat[i]
229 c = expander[c]()
229 c = expander[c]()
230 newname.append(c)
230 newname.append(c)
231 i += 1
231 i += 1
232 return ''.join(newname)
232 return ''.join(newname)
233 except KeyError, inst:
233 except KeyError, inst:
234 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
234 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
235 inst.args[0])
235 inst.args[0])
236
236
237 def make_file(repo, pat, node=None,
237 def make_file(repo, pat, node=None,
238 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
238 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
239
239
240 writable = mode not in ('r', 'rb')
240 writable = mode not in ('r', 'rb')
241
241
242 if not pat or pat == '-':
242 if not pat or pat == '-':
243 fp = writable and sys.stdout or sys.stdin
243 fp = writable and sys.stdout or sys.stdin
244 return os.fdopen(os.dup(fp.fileno()), mode)
244 return os.fdopen(os.dup(fp.fileno()), mode)
245 if hasattr(pat, 'write') and writable:
245 if hasattr(pat, 'write') and writable:
246 return pat
246 return pat
247 if hasattr(pat, 'read') and 'r' in mode:
247 if hasattr(pat, 'read') and 'r' in mode:
248 return pat
248 return pat
249 return open(make_filename(repo, pat, node, total, seqno, revwidth,
249 return open(make_filename(repo, pat, node, total, seqno, revwidth,
250 pathname),
250 pathname),
251 mode)
251 mode)
252
252
253 def copy(ui, repo, pats, opts, rename=False):
253 def copy(ui, repo, pats, opts, rename=False):
254 # called with the repo lock held
254 # called with the repo lock held
255 #
255 #
256 # hgsep => pathname that uses "/" to separate directories
256 # hgsep => pathname that uses "/" to separate directories
257 # ossep => pathname that uses os.sep to separate directories
257 # ossep => pathname that uses os.sep to separate directories
258 cwd = repo.getcwd()
258 cwd = repo.getcwd()
259 targets = {}
259 targets = {}
260 after = opts.get("after")
260 after = opts.get("after")
261 dryrun = opts.get("dry_run")
261 dryrun = opts.get("dry_run")
262 wctx = repo[None]
262 wctx = repo[None]
263
263
264 def walkpat(pat):
264 def walkpat(pat):
265 srcs = []
265 srcs = []
266 badstates = after and '?' or '?r'
266 badstates = after and '?' or '?r'
267 m = match(repo, [pat], opts, globbed=True)
267 m = match(repo, [pat], opts, globbed=True)
268 for abs in repo.walk(m):
268 for abs in repo.walk(m):
269 state = repo.dirstate[abs]
269 state = repo.dirstate[abs]
270 rel = m.rel(abs)
270 rel = m.rel(abs)
271 exact = m.exact(abs)
271 exact = m.exact(abs)
272 if state in badstates:
272 if state in badstates:
273 if exact and state == '?':
273 if exact and state == '?':
274 ui.warn(_('%s: not copying - file is not managed\n') % rel)
274 ui.warn(_('%s: not copying - file is not managed\n') % rel)
275 if exact and state == 'r':
275 if exact and state == 'r':
276 ui.warn(_('%s: not copying - file has been marked for'
276 ui.warn(_('%s: not copying - file has been marked for'
277 ' remove\n') % rel)
277 ' remove\n') % rel)
278 continue
278 continue
279 # abs: hgsep
279 # abs: hgsep
280 # rel: ossep
280 # rel: ossep
281 srcs.append((abs, rel, exact))
281 srcs.append((abs, rel, exact))
282 return srcs
282 return srcs
283
283
284 # abssrc: hgsep
284 # abssrc: hgsep
285 # relsrc: ossep
285 # relsrc: ossep
286 # otarget: ossep
286 # otarget: ossep
287 def copyfile(abssrc, relsrc, otarget, exact):
287 def copyfile(abssrc, relsrc, otarget, exact):
288 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
288 abstarget = scmutil.canonpath(repo.root, cwd, otarget)
289 reltarget = repo.pathto(abstarget, cwd)
289 reltarget = repo.pathto(abstarget, cwd)
290 target = repo.wjoin(abstarget)
290 target = repo.wjoin(abstarget)
291 src = repo.wjoin(abssrc)
291 src = repo.wjoin(abssrc)
292 state = repo.dirstate[abstarget]
292 state = repo.dirstate[abstarget]
293
293
294 scmutil.checkportable(ui, abstarget)
294 scmutil.checkportable(ui, abstarget)
295
295
296 # check for collisions
296 # check for collisions
297 prevsrc = targets.get(abstarget)
297 prevsrc = targets.get(abstarget)
298 if prevsrc is not None:
298 if prevsrc is not None:
299 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
299 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
300 (reltarget, repo.pathto(abssrc, cwd),
300 (reltarget, repo.pathto(abssrc, cwd),
301 repo.pathto(prevsrc, cwd)))
301 repo.pathto(prevsrc, cwd)))
302 return
302 return
303
303
304 # check for overwrites
304 # check for overwrites
305 exists = os.path.lexists(target)
305 exists = os.path.lexists(target)
306 if not after and exists or after and state in 'mn':
306 if not after and exists or after and state in 'mn':
307 if not opts['force']:
307 if not opts['force']:
308 ui.warn(_('%s: not overwriting - file exists\n') %
308 ui.warn(_('%s: not overwriting - file exists\n') %
309 reltarget)
309 reltarget)
310 return
310 return
311
311
312 if after:
312 if after:
313 if not exists:
313 if not exists:
314 if rename:
314 if rename:
315 ui.warn(_('%s: not recording move - %s does not exist\n') %
315 ui.warn(_('%s: not recording move - %s does not exist\n') %
316 (relsrc, reltarget))
316 (relsrc, reltarget))
317 else:
317 else:
318 ui.warn(_('%s: not recording copy - %s does not exist\n') %
318 ui.warn(_('%s: not recording copy - %s does not exist\n') %
319 (relsrc, reltarget))
319 (relsrc, reltarget))
320 return
320 return
321 elif not dryrun:
321 elif not dryrun:
322 try:
322 try:
323 if exists:
323 if exists:
324 os.unlink(target)
324 os.unlink(target)
325 targetdir = os.path.dirname(target) or '.'
325 targetdir = os.path.dirname(target) or '.'
326 if not os.path.isdir(targetdir):
326 if not os.path.isdir(targetdir):
327 os.makedirs(targetdir)
327 os.makedirs(targetdir)
328 util.copyfile(src, target)
328 util.copyfile(src, target)
329 except IOError, inst:
329 except IOError, inst:
330 if inst.errno == errno.ENOENT:
330 if inst.errno == errno.ENOENT:
331 ui.warn(_('%s: deleted in working copy\n') % relsrc)
331 ui.warn(_('%s: deleted in working copy\n') % relsrc)
332 else:
332 else:
333 ui.warn(_('%s: cannot copy - %s\n') %
333 ui.warn(_('%s: cannot copy - %s\n') %
334 (relsrc, inst.strerror))
334 (relsrc, inst.strerror))
335 return True # report a failure
335 return True # report a failure
336
336
337 if ui.verbose or not exact:
337 if ui.verbose or not exact:
338 if rename:
338 if rename:
339 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
339 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
340 else:
340 else:
341 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
341 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
342
342
343 targets[abstarget] = abssrc
343 targets[abstarget] = abssrc
344
344
345 # fix up dirstate
345 # fix up dirstate
346 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
346 dirstatecopy(ui, repo, wctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd)
347 if rename and not dryrun:
347 if rename and not dryrun:
348 wctx.remove([abssrc], not after)
348 wctx.remove([abssrc], not after)
349
349
350 # pat: ossep
350 # pat: ossep
351 # dest ossep
351 # dest ossep
352 # srcs: list of (hgsep, hgsep, ossep, bool)
352 # srcs: list of (hgsep, hgsep, ossep, bool)
353 # return: function that takes hgsep and returns ossep
353 # return: function that takes hgsep and returns ossep
354 def targetpathfn(pat, dest, srcs):
354 def targetpathfn(pat, dest, srcs):
355 if os.path.isdir(pat):
355 if os.path.isdir(pat):
356 abspfx = scmutil.canonpath(repo.root, cwd, pat)
356 abspfx = scmutil.canonpath(repo.root, cwd, pat)
357 abspfx = util.localpath(abspfx)
357 abspfx = util.localpath(abspfx)
358 if destdirexists:
358 if destdirexists:
359 striplen = len(os.path.split(abspfx)[0])
359 striplen = len(os.path.split(abspfx)[0])
360 else:
360 else:
361 striplen = len(abspfx)
361 striplen = len(abspfx)
362 if striplen:
362 if striplen:
363 striplen += len(os.sep)
363 striplen += len(os.sep)
364 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
364 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
365 elif destdirexists:
365 elif destdirexists:
366 res = lambda p: os.path.join(dest,
366 res = lambda p: os.path.join(dest,
367 os.path.basename(util.localpath(p)))
367 os.path.basename(util.localpath(p)))
368 else:
368 else:
369 res = lambda p: dest
369 res = lambda p: dest
370 return res
370 return res
371
371
372 # pat: ossep
372 # pat: ossep
373 # dest ossep
373 # dest ossep
374 # srcs: list of (hgsep, hgsep, ossep, bool)
374 # srcs: list of (hgsep, hgsep, ossep, bool)
375 # return: function that takes hgsep and returns ossep
375 # return: function that takes hgsep and returns ossep
376 def targetpathafterfn(pat, dest, srcs):
376 def targetpathafterfn(pat, dest, srcs):
377 if matchmod.patkind(pat):
377 if matchmod.patkind(pat):
378 # a mercurial pattern
378 # a mercurial pattern
379 res = lambda p: os.path.join(dest,
379 res = lambda p: os.path.join(dest,
380 os.path.basename(util.localpath(p)))
380 os.path.basename(util.localpath(p)))
381 else:
381 else:
382 abspfx = scmutil.canonpath(repo.root, cwd, pat)
382 abspfx = scmutil.canonpath(repo.root, cwd, pat)
383 if len(abspfx) < len(srcs[0][0]):
383 if len(abspfx) < len(srcs[0][0]):
384 # A directory. Either the target path contains the last
384 # A directory. Either the target path contains the last
385 # component of the source path or it does not.
385 # component of the source path or it does not.
386 def evalpath(striplen):
386 def evalpath(striplen):
387 score = 0
387 score = 0
388 for s in srcs:
388 for s in srcs:
389 t = os.path.join(dest, util.localpath(s[0])[striplen:])
389 t = os.path.join(dest, util.localpath(s[0])[striplen:])
390 if os.path.lexists(t):
390 if os.path.lexists(t):
391 score += 1
391 score += 1
392 return score
392 return score
393
393
394 abspfx = util.localpath(abspfx)
394 abspfx = util.localpath(abspfx)
395 striplen = len(abspfx)
395 striplen = len(abspfx)
396 if striplen:
396 if striplen:
397 striplen += len(os.sep)
397 striplen += len(os.sep)
398 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
398 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
399 score = evalpath(striplen)
399 score = evalpath(striplen)
400 striplen1 = len(os.path.split(abspfx)[0])
400 striplen1 = len(os.path.split(abspfx)[0])
401 if striplen1:
401 if striplen1:
402 striplen1 += len(os.sep)
402 striplen1 += len(os.sep)
403 if evalpath(striplen1) > score:
403 if evalpath(striplen1) > score:
404 striplen = striplen1
404 striplen = striplen1
405 res = lambda p: os.path.join(dest,
405 res = lambda p: os.path.join(dest,
406 util.localpath(p)[striplen:])
406 util.localpath(p)[striplen:])
407 else:
407 else:
408 # a file
408 # a file
409 if destdirexists:
409 if destdirexists:
410 res = lambda p: os.path.join(dest,
410 res = lambda p: os.path.join(dest,
411 os.path.basename(util.localpath(p)))
411 os.path.basename(util.localpath(p)))
412 else:
412 else:
413 res = lambda p: dest
413 res = lambda p: dest
414 return res
414 return res
415
415
416
416
417 pats = expandpats(pats)
417 pats = expandpats(pats)
418 if not pats:
418 if not pats:
419 raise util.Abort(_('no source or destination specified'))
419 raise util.Abort(_('no source or destination specified'))
420 if len(pats) == 1:
420 if len(pats) == 1:
421 raise util.Abort(_('no destination specified'))
421 raise util.Abort(_('no destination specified'))
422 dest = pats.pop()
422 dest = pats.pop()
423 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
423 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
424 if not destdirexists:
424 if not destdirexists:
425 if len(pats) > 1 or matchmod.patkind(pats[0]):
425 if len(pats) > 1 or matchmod.patkind(pats[0]):
426 raise util.Abort(_('with multiple sources, destination must be an '
426 raise util.Abort(_('with multiple sources, destination must be an '
427 'existing directory'))
427 'existing directory'))
428 if util.endswithsep(dest):
428 if util.endswithsep(dest):
429 raise util.Abort(_('destination %s is not a directory') % dest)
429 raise util.Abort(_('destination %s is not a directory') % dest)
430
430
431 tfn = targetpathfn
431 tfn = targetpathfn
432 if after:
432 if after:
433 tfn = targetpathafterfn
433 tfn = targetpathafterfn
434 copylist = []
434 copylist = []
435 for pat in pats:
435 for pat in pats:
436 srcs = walkpat(pat)
436 srcs = walkpat(pat)
437 if not srcs:
437 if not srcs:
438 continue
438 continue
439 copylist.append((tfn(pat, dest, srcs), srcs))
439 copylist.append((tfn(pat, dest, srcs), srcs))
440 if not copylist:
440 if not copylist:
441 raise util.Abort(_('no files to copy'))
441 raise util.Abort(_('no files to copy'))
442
442
443 errors = 0
443 errors = 0
444 for targetpath, srcs in copylist:
444 for targetpath, srcs in copylist:
445 for abssrc, relsrc, exact in srcs:
445 for abssrc, relsrc, exact in srcs:
446 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
446 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
447 errors += 1
447 errors += 1
448
448
449 if errors:
449 if errors:
450 ui.warn(_('(consider using --after)\n'))
450 ui.warn(_('(consider using --after)\n'))
451
451
452 return errors != 0
452 return errors != 0
453
453
454 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
454 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
455 runargs=None, appendpid=False):
455 runargs=None, appendpid=False):
456 '''Run a command as a service.'''
456 '''Run a command as a service.'''
457
457
458 if opts['daemon'] and not opts['daemon_pipefds']:
458 if opts['daemon'] and not opts['daemon_pipefds']:
459 # Signal child process startup with file removal
459 # Signal child process startup with file removal
460 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
460 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
461 os.close(lockfd)
461 os.close(lockfd)
462 try:
462 try:
463 if not runargs:
463 if not runargs:
464 runargs = util.hgcmd() + sys.argv[1:]
464 runargs = util.hgcmd() + sys.argv[1:]
465 runargs.append('--daemon-pipefds=%s' % lockpath)
465 runargs.append('--daemon-pipefds=%s' % lockpath)
466 # Don't pass --cwd to the child process, because we've already
466 # Don't pass --cwd to the child process, because we've already
467 # changed directory.
467 # changed directory.
468 for i in xrange(1, len(runargs)):
468 for i in xrange(1, len(runargs)):
469 if runargs[i].startswith('--cwd='):
469 if runargs[i].startswith('--cwd='):
470 del runargs[i]
470 del runargs[i]
471 break
471 break
472 elif runargs[i].startswith('--cwd'):
472 elif runargs[i].startswith('--cwd'):
473 del runargs[i:i + 2]
473 del runargs[i:i + 2]
474 break
474 break
475 def condfn():
475 def condfn():
476 return not os.path.exists(lockpath)
476 return not os.path.exists(lockpath)
477 pid = util.rundetached(runargs, condfn)
477 pid = util.rundetached(runargs, condfn)
478 if pid < 0:
478 if pid < 0:
479 raise util.Abort(_('child process failed to start'))
479 raise util.Abort(_('child process failed to start'))
480 finally:
480 finally:
481 try:
481 try:
482 os.unlink(lockpath)
482 os.unlink(lockpath)
483 except OSError, e:
483 except OSError, e:
484 if e.errno != errno.ENOENT:
484 if e.errno != errno.ENOENT:
485 raise
485 raise
486 if parentfn:
486 if parentfn:
487 return parentfn(pid)
487 return parentfn(pid)
488 else:
488 else:
489 return
489 return
490
490
491 if initfn:
491 if initfn:
492 initfn()
492 initfn()
493
493
494 if opts['pid_file']:
494 if opts['pid_file']:
495 mode = appendpid and 'a' or 'w'
495 mode = appendpid and 'a' or 'w'
496 fp = open(opts['pid_file'], mode)
496 fp = open(opts['pid_file'], mode)
497 fp.write(str(os.getpid()) + '\n')
497 fp.write(str(os.getpid()) + '\n')
498 fp.close()
498 fp.close()
499
499
500 if opts['daemon_pipefds']:
500 if opts['daemon_pipefds']:
501 lockpath = opts['daemon_pipefds']
501 lockpath = opts['daemon_pipefds']
502 try:
502 try:
503 os.setsid()
503 os.setsid()
504 except AttributeError:
504 except AttributeError:
505 pass
505 pass
506 os.unlink(lockpath)
506 os.unlink(lockpath)
507 util.hidewindow()
507 util.hidewindow()
508 sys.stdout.flush()
508 sys.stdout.flush()
509 sys.stderr.flush()
509 sys.stderr.flush()
510
510
511 nullfd = os.open(util.nulldev, os.O_RDWR)
511 nullfd = os.open(util.nulldev, os.O_RDWR)
512 logfilefd = nullfd
512 logfilefd = nullfd
513 if logfile:
513 if logfile:
514 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
514 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
515 os.dup2(nullfd, 0)
515 os.dup2(nullfd, 0)
516 os.dup2(logfilefd, 1)
516 os.dup2(logfilefd, 1)
517 os.dup2(logfilefd, 2)
517 os.dup2(logfilefd, 2)
518 if nullfd not in (0, 1, 2):
518 if nullfd not in (0, 1, 2):
519 os.close(nullfd)
519 os.close(nullfd)
520 if logfile and logfilefd not in (0, 1, 2):
520 if logfile and logfilefd not in (0, 1, 2):
521 os.close(logfilefd)
521 os.close(logfilefd)
522
522
523 if runfn:
523 if runfn:
524 return runfn()
524 return runfn()
525
525
526 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
526 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
527 opts=None):
527 opts=None):
528 '''export changesets as hg patches.'''
528 '''export changesets as hg patches.'''
529
529
530 total = len(revs)
530 total = len(revs)
531 revwidth = max([len(str(rev)) for rev in revs])
531 revwidth = max([len(str(rev)) for rev in revs])
532
532
533 def single(rev, seqno, fp):
533 def single(rev, seqno, fp):
534 ctx = repo[rev]
534 ctx = repo[rev]
535 node = ctx.node()
535 node = ctx.node()
536 parents = [p.node() for p in ctx.parents() if p]
536 parents = [p.node() for p in ctx.parents() if p]
537 branch = ctx.branch()
537 branch = ctx.branch()
538 if switch_parent:
538 if switch_parent:
539 parents.reverse()
539 parents.reverse()
540 prev = (parents and parents[0]) or nullid
540 prev = (parents and parents[0]) or nullid
541
541
542 shouldclose = False
542 shouldclose = False
543 if not fp:
543 if not fp:
544 fp = make_file(repo, template, node, total=total, seqno=seqno,
544 fp = make_file(repo, template, node, total=total, seqno=seqno,
545 revwidth=revwidth, mode='ab')
545 revwidth=revwidth, mode='ab')
546 if fp != template:
546 if fp != template:
547 shouldclose = True
547 shouldclose = True
548 if fp != sys.stdout and hasattr(fp, 'name'):
548 if fp != sys.stdout and hasattr(fp, 'name'):
549 repo.ui.note("%s\n" % fp.name)
549 repo.ui.note("%s\n" % fp.name)
550
550
551 fp.write("# HG changeset patch\n")
551 fp.write("# HG changeset patch\n")
552 fp.write("# User %s\n" % ctx.user())
552 fp.write("# User %s\n" % ctx.user())
553 fp.write("# Date %d %d\n" % ctx.date())
553 fp.write("# Date %d %d\n" % ctx.date())
554 if branch and branch != 'default':
554 if branch and branch != 'default':
555 fp.write("# Branch %s\n" % branch)
555 fp.write("# Branch %s\n" % branch)
556 fp.write("# Node ID %s\n" % hex(node))
556 fp.write("# Node ID %s\n" % hex(node))
557 fp.write("# Parent %s\n" % hex(prev))
557 fp.write("# Parent %s\n" % hex(prev))
558 if len(parents) > 1:
558 if len(parents) > 1:
559 fp.write("# Parent %s\n" % hex(parents[1]))
559 fp.write("# Parent %s\n" % hex(parents[1]))
560 fp.write(ctx.description().rstrip())
560 fp.write(ctx.description().rstrip())
561 fp.write("\n\n")
561 fp.write("\n\n")
562
562
563 for chunk in patch.diff(repo, prev, node, opts=opts):
563 for chunk in patch.diff(repo, prev, node, opts=opts):
564 fp.write(chunk)
564 fp.write(chunk)
565
565
566 if shouldclose:
566 if shouldclose:
567 fp.close()
567 fp.close()
568
568
569 for seqno, rev in enumerate(revs):
569 for seqno, rev in enumerate(revs):
570 single(rev, seqno + 1, fp)
570 single(rev, seqno + 1, fp)
571
571
572 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
572 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
573 changes=None, stat=False, fp=None, prefix='',
573 changes=None, stat=False, fp=None, prefix='',
574 listsubrepos=False):
574 listsubrepos=False):
575 '''show diff or diffstat.'''
575 '''show diff or diffstat.'''
576 if fp is None:
576 if fp is None:
577 write = ui.write
577 write = ui.write
578 else:
578 else:
579 def write(s, **kw):
579 def write(s, **kw):
580 fp.write(s)
580 fp.write(s)
581
581
582 if stat:
582 if stat:
583 diffopts = diffopts.copy(context=0)
583 diffopts = diffopts.copy(context=0)
584 width = 80
584 width = 80
585 if not ui.plain():
585 if not ui.plain():
586 width = ui.termwidth()
586 width = ui.termwidth()
587 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
587 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
588 prefix=prefix)
588 prefix=prefix)
589 for chunk, label in patch.diffstatui(util.iterlines(chunks),
589 for chunk, label in patch.diffstatui(util.iterlines(chunks),
590 width=width,
590 width=width,
591 git=diffopts.git):
591 git=diffopts.git):
592 write(chunk, label=label)
592 write(chunk, label=label)
593 else:
593 else:
594 for chunk, label in patch.diffui(repo, node1, node2, match,
594 for chunk, label in patch.diffui(repo, node1, node2, match,
595 changes, diffopts, prefix=prefix):
595 changes, diffopts, prefix=prefix):
596 write(chunk, label=label)
596 write(chunk, label=label)
597
597
598 if listsubrepos:
598 if listsubrepos:
599 ctx1 = repo[node1]
599 ctx1 = repo[node1]
600 ctx2 = repo[node2]
600 ctx2 = repo[node2]
601 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
601 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
602 if node2 is not None:
602 if node2 is not None:
603 node2 = ctx2.substate[subpath][1]
603 node2 = ctx2.substate[subpath][1]
604 submatch = matchmod.narrowmatcher(subpath, match)
604 submatch = matchmod.narrowmatcher(subpath, match)
605 sub.diff(diffopts, node2, submatch, changes=changes,
605 sub.diff(diffopts, node2, submatch, changes=changes,
606 stat=stat, fp=fp, prefix=prefix)
606 stat=stat, fp=fp, prefix=prefix)
607
607
608 class changeset_printer(object):
608 class changeset_printer(object):
609 '''show changeset information when templating not requested.'''
609 '''show changeset information when templating not requested.'''
610
610
611 def __init__(self, ui, repo, patch, diffopts, buffered):
611 def __init__(self, ui, repo, patch, diffopts, buffered):
612 self.ui = ui
612 self.ui = ui
613 self.repo = repo
613 self.repo = repo
614 self.buffered = buffered
614 self.buffered = buffered
615 self.patch = patch
615 self.patch = patch
616 self.diffopts = diffopts
616 self.diffopts = diffopts
617 self.header = {}
617 self.header = {}
618 self.hunk = {}
618 self.hunk = {}
619 self.lastheader = None
619 self.lastheader = None
620 self.footer = None
620 self.footer = None
621
621
622 def flush(self, rev):
622 def flush(self, rev):
623 if rev in self.header:
623 if rev in self.header:
624 h = self.header[rev]
624 h = self.header[rev]
625 if h != self.lastheader:
625 if h != self.lastheader:
626 self.lastheader = h
626 self.lastheader = h
627 self.ui.write(h)
627 self.ui.write(h)
628 del self.header[rev]
628 del self.header[rev]
629 if rev in self.hunk:
629 if rev in self.hunk:
630 self.ui.write(self.hunk[rev])
630 self.ui.write(self.hunk[rev])
631 del self.hunk[rev]
631 del self.hunk[rev]
632 return 1
632 return 1
633 return 0
633 return 0
634
634
635 def close(self):
635 def close(self):
636 if self.footer:
636 if self.footer:
637 self.ui.write(self.footer)
637 self.ui.write(self.footer)
638
638
639 def show(self, ctx, copies=None, matchfn=None, **props):
639 def show(self, ctx, copies=None, matchfn=None, **props):
640 if self.buffered:
640 if self.buffered:
641 self.ui.pushbuffer()
641 self.ui.pushbuffer()
642 self._show(ctx, copies, matchfn, props)
642 self._show(ctx, copies, matchfn, props)
643 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
643 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
644 else:
644 else:
645 self._show(ctx, copies, matchfn, props)
645 self._show(ctx, copies, matchfn, props)
646
646
647 def _show(self, ctx, copies, matchfn, props):
647 def _show(self, ctx, copies, matchfn, props):
648 '''show a single changeset or file revision'''
648 '''show a single changeset or file revision'''
649 changenode = ctx.node()
649 changenode = ctx.node()
650 rev = ctx.rev()
650 rev = ctx.rev()
651
651
652 if self.ui.quiet:
652 if self.ui.quiet:
653 self.ui.write("%d:%s\n" % (rev, short(changenode)),
653 self.ui.write("%d:%s\n" % (rev, short(changenode)),
654 label='log.node')
654 label='log.node')
655 return
655 return
656
656
657 log = self.repo.changelog
657 log = self.repo.changelog
658 date = util.datestr(ctx.date())
658 date = util.datestr(ctx.date())
659
659
660 hexfunc = self.ui.debugflag and hex or short
660 hexfunc = self.ui.debugflag and hex or short
661
661
662 parents = [(p, hexfunc(log.node(p)))
662 parents = [(p, hexfunc(log.node(p)))
663 for p in self._meaningful_parentrevs(log, rev)]
663 for p in self._meaningful_parentrevs(log, rev)]
664
664
665 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
665 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
666 label='log.changeset')
666 label='log.changeset')
667
667
668 branch = ctx.branch()
668 branch = ctx.branch()
669 # don't show the default branch name
669 # don't show the default branch name
670 if branch != 'default':
670 if branch != 'default':
671 self.ui.write(_("branch: %s\n") % branch,
671 self.ui.write(_("branch: %s\n") % branch,
672 label='log.branch')
672 label='log.branch')
673 for bookmark in self.repo.nodebookmarks(changenode):
673 for bookmark in self.repo.nodebookmarks(changenode):
674 self.ui.write(_("bookmark: %s\n") % bookmark,
674 self.ui.write(_("bookmark: %s\n") % bookmark,
675 label='log.bookmark')
675 label='log.bookmark')
676 for tag in self.repo.nodetags(changenode):
676 for tag in self.repo.nodetags(changenode):
677 self.ui.write(_("tag: %s\n") % tag,
677 self.ui.write(_("tag: %s\n") % tag,
678 label='log.tag')
678 label='log.tag')
679 for parent in parents:
679 for parent in parents:
680 self.ui.write(_("parent: %d:%s\n") % parent,
680 self.ui.write(_("parent: %d:%s\n") % parent,
681 label='log.parent')
681 label='log.parent')
682
682
683 if self.ui.debugflag:
683 if self.ui.debugflag:
684 mnode = ctx.manifestnode()
684 mnode = ctx.manifestnode()
685 self.ui.write(_("manifest: %d:%s\n") %
685 self.ui.write(_("manifest: %d:%s\n") %
686 (self.repo.manifest.rev(mnode), hex(mnode)),
686 (self.repo.manifest.rev(mnode), hex(mnode)),
687 label='ui.debug log.manifest')
687 label='ui.debug log.manifest')
688 self.ui.write(_("user: %s\n") % ctx.user(),
688 self.ui.write(_("user: %s\n") % ctx.user(),
689 label='log.user')
689 label='log.user')
690 self.ui.write(_("date: %s\n") % date,
690 self.ui.write(_("date: %s\n") % date,
691 label='log.date')
691 label='log.date')
692
692
693 if self.ui.debugflag:
693 if self.ui.debugflag:
694 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
694 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
695 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
695 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
696 files):
696 files):
697 if value:
697 if value:
698 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
698 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
699 label='ui.debug log.files')
699 label='ui.debug log.files')
700 elif ctx.files() and self.ui.verbose:
700 elif ctx.files() and self.ui.verbose:
701 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
701 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
702 label='ui.note log.files')
702 label='ui.note log.files')
703 if copies and self.ui.verbose:
703 if copies and self.ui.verbose:
704 copies = ['%s (%s)' % c for c in copies]
704 copies = ['%s (%s)' % c for c in copies]
705 self.ui.write(_("copies: %s\n") % ' '.join(copies),
705 self.ui.write(_("copies: %s\n") % ' '.join(copies),
706 label='ui.note log.copies')
706 label='ui.note log.copies')
707
707
708 extra = ctx.extra()
708 extra = ctx.extra()
709 if extra and self.ui.debugflag:
709 if extra and self.ui.debugflag:
710 for key, value in sorted(extra.items()):
710 for key, value in sorted(extra.items()):
711 self.ui.write(_("extra: %s=%s\n")
711 self.ui.write(_("extra: %s=%s\n")
712 % (key, value.encode('string_escape')),
712 % (key, value.encode('string_escape')),
713 label='ui.debug log.extra')
713 label='ui.debug log.extra')
714
714
715 description = ctx.description().strip()
715 description = ctx.description().strip()
716 if description:
716 if description:
717 if self.ui.verbose:
717 if self.ui.verbose:
718 self.ui.write(_("description:\n"),
718 self.ui.write(_("description:\n"),
719 label='ui.note log.description')
719 label='ui.note log.description')
720 self.ui.write(description,
720 self.ui.write(description,
721 label='ui.note log.description')
721 label='ui.note log.description')
722 self.ui.write("\n\n")
722 self.ui.write("\n\n")
723 else:
723 else:
724 self.ui.write(_("summary: %s\n") %
724 self.ui.write(_("summary: %s\n") %
725 description.splitlines()[0],
725 description.splitlines()[0],
726 label='log.summary')
726 label='log.summary')
727 self.ui.write("\n")
727 self.ui.write("\n")
728
728
729 self.showpatch(changenode, matchfn)
729 self.showpatch(changenode, matchfn)
730
730
731 def showpatch(self, node, matchfn):
731 def showpatch(self, node, matchfn):
732 if not matchfn:
732 if not matchfn:
733 matchfn = self.patch
733 matchfn = self.patch
734 if matchfn:
734 if matchfn:
735 stat = self.diffopts.get('stat')
735 stat = self.diffopts.get('stat')
736 diff = self.diffopts.get('patch')
736 diff = self.diffopts.get('patch')
737 diffopts = patch.diffopts(self.ui, self.diffopts)
737 diffopts = patch.diffopts(self.ui, self.diffopts)
738 prev = self.repo.changelog.parents(node)[0]
738 prev = self.repo.changelog.parents(node)[0]
739 if stat:
739 if stat:
740 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
740 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
741 match=matchfn, stat=True)
741 match=matchfn, stat=True)
742 if diff:
742 if diff:
743 if stat:
743 if stat:
744 self.ui.write("\n")
744 self.ui.write("\n")
745 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
745 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
746 match=matchfn, stat=False)
746 match=matchfn, stat=False)
747 self.ui.write("\n")
747 self.ui.write("\n")
748
748
749 def _meaningful_parentrevs(self, log, rev):
749 def _meaningful_parentrevs(self, log, rev):
750 """Return list of meaningful (or all if debug) parentrevs for rev.
750 """Return list of meaningful (or all if debug) parentrevs for rev.
751
751
752 For merges (two non-nullrev revisions) both parents are meaningful.
752 For merges (two non-nullrev revisions) both parents are meaningful.
753 Otherwise the first parent revision is considered meaningful if it
753 Otherwise the first parent revision is considered meaningful if it
754 is not the preceding revision.
754 is not the preceding revision.
755 """
755 """
756 parents = log.parentrevs(rev)
756 parents = log.parentrevs(rev)
757 if not self.ui.debugflag and parents[1] == nullrev:
757 if not self.ui.debugflag and parents[1] == nullrev:
758 if parents[0] >= rev - 1:
758 if parents[0] >= rev - 1:
759 parents = []
759 parents = []
760 else:
760 else:
761 parents = [parents[0]]
761 parents = [parents[0]]
762 return parents
762 return parents
763
763
764
764
765 class changeset_templater(changeset_printer):
765 class changeset_templater(changeset_printer):
766 '''format changeset information.'''
766 '''format changeset information.'''
767
767
768 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
768 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
769 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
769 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
770 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
770 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
771 defaulttempl = {
771 defaulttempl = {
772 'parent': '{rev}:{node|formatnode} ',
772 'parent': '{rev}:{node|formatnode} ',
773 'manifest': '{rev}:{node|formatnode}',
773 'manifest': '{rev}:{node|formatnode}',
774 'file_copy': '{name} ({source})',
774 'file_copy': '{name} ({source})',
775 'extra': '{key}={value|stringescape}'
775 'extra': '{key}={value|stringescape}'
776 }
776 }
777 # filecopy is preserved for compatibility reasons
777 # filecopy is preserved for compatibility reasons
778 defaulttempl['filecopy'] = defaulttempl['file_copy']
778 defaulttempl['filecopy'] = defaulttempl['file_copy']
779 self.t = templater.templater(mapfile, {'formatnode': formatnode},
779 self.t = templater.templater(mapfile, {'formatnode': formatnode},
780 cache=defaulttempl)
780 cache=defaulttempl)
781 self.cache = {}
781 self.cache = {}
782
782
783 def use_template(self, t):
783 def use_template(self, t):
784 '''set template string to use'''
784 '''set template string to use'''
785 self.t.cache['changeset'] = t
785 self.t.cache['changeset'] = t
786
786
787 def _meaningful_parentrevs(self, ctx):
787 def _meaningful_parentrevs(self, ctx):
788 """Return list of meaningful (or all if debug) parentrevs for rev.
788 """Return list of meaningful (or all if debug) parentrevs for rev.
789 """
789 """
790 parents = ctx.parents()
790 parents = ctx.parents()
791 if len(parents) > 1:
791 if len(parents) > 1:
792 return parents
792 return parents
793 if self.ui.debugflag:
793 if self.ui.debugflag:
794 return [parents[0], self.repo['null']]
794 return [parents[0], self.repo['null']]
795 if parents[0].rev() >= ctx.rev() - 1:
795 if parents[0].rev() >= ctx.rev() - 1:
796 return []
796 return []
797 return parents
797 return parents
798
798
799 def _show(self, ctx, copies, matchfn, props):
799 def _show(self, ctx, copies, matchfn, props):
800 '''show a single changeset or file revision'''
800 '''show a single changeset or file revision'''
801
801
802 showlist = templatekw.showlist
802 showlist = templatekw.showlist
803
803
804 # showparents() behaviour depends on ui trace level which
804 # showparents() behaviour depends on ui trace level which
805 # causes unexpected behaviours at templating level and makes
805 # causes unexpected behaviours at templating level and makes
806 # it harder to extract it in a standalone function. Its
806 # it harder to extract it in a standalone function. Its
807 # behaviour cannot be changed so leave it here for now.
807 # behaviour cannot be changed so leave it here for now.
808 def showparents(**args):
808 def showparents(**args):
809 ctx = args['ctx']
809 ctx = args['ctx']
810 parents = [[('rev', p.rev()), ('node', p.hex())]
810 parents = [[('rev', p.rev()), ('node', p.hex())]
811 for p in self._meaningful_parentrevs(ctx)]
811 for p in self._meaningful_parentrevs(ctx)]
812 return showlist('parent', parents, **args)
812 return showlist('parent', parents, **args)
813
813
814 props = props.copy()
814 props = props.copy()
815 props.update(templatekw.keywords)
815 props.update(templatekw.keywords)
816 props['parents'] = showparents
816 props['parents'] = showparents
817 props['templ'] = self.t
817 props['templ'] = self.t
818 props['ctx'] = ctx
818 props['ctx'] = ctx
819 props['repo'] = self.repo
819 props['repo'] = self.repo
820 props['revcache'] = {'copies': copies}
820 props['revcache'] = {'copies': copies}
821 props['cache'] = self.cache
821 props['cache'] = self.cache
822
822
823 # find correct templates for current mode
823 # find correct templates for current mode
824
824
825 tmplmodes = [
825 tmplmodes = [
826 (True, None),
826 (True, None),
827 (self.ui.verbose, 'verbose'),
827 (self.ui.verbose, 'verbose'),
828 (self.ui.quiet, 'quiet'),
828 (self.ui.quiet, 'quiet'),
829 (self.ui.debugflag, 'debug'),
829 (self.ui.debugflag, 'debug'),
830 ]
830 ]
831
831
832 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
832 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
833 for mode, postfix in tmplmodes:
833 for mode, postfix in tmplmodes:
834 for type in types:
834 for type in types:
835 cur = postfix and ('%s_%s' % (type, postfix)) or type
835 cur = postfix and ('%s_%s' % (type, postfix)) or type
836 if mode and cur in self.t:
836 if mode and cur in self.t:
837 types[type] = cur
837 types[type] = cur
838
838
839 try:
839 try:
840
840
841 # write header
841 # write header
842 if types['header']:
842 if types['header']:
843 h = templater.stringify(self.t(types['header'], **props))
843 h = templater.stringify(self.t(types['header'], **props))
844 if self.buffered:
844 if self.buffered:
845 self.header[ctx.rev()] = h
845 self.header[ctx.rev()] = h
846 else:
846 else:
847 if self.lastheader != h:
847 if self.lastheader != h:
848 self.lastheader = h
848 self.lastheader = h
849 self.ui.write(h)
849 self.ui.write(h)
850
850
851 # write changeset metadata, then patch if requested
851 # write changeset metadata, then patch if requested
852 key = types['changeset']
852 key = types['changeset']
853 self.ui.write(templater.stringify(self.t(key, **props)))
853 self.ui.write(templater.stringify(self.t(key, **props)))
854 self.showpatch(ctx.node(), matchfn)
854 self.showpatch(ctx.node(), matchfn)
855
855
856 if types['footer']:
856 if types['footer']:
857 if not self.footer:
857 if not self.footer:
858 self.footer = templater.stringify(self.t(types['footer'],
858 self.footer = templater.stringify(self.t(types['footer'],
859 **props))
859 **props))
860
860
861 except KeyError, inst:
861 except KeyError, inst:
862 msg = _("%s: no key named '%s'")
862 msg = _("%s: no key named '%s'")
863 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
863 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
864 except SyntaxError, inst:
864 except SyntaxError, inst:
865 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
865 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
866
866
867 def show_changeset(ui, repo, opts, buffered=False):
867 def show_changeset(ui, repo, opts, buffered=False):
868 """show one changeset using template or regular display.
868 """show one changeset using template or regular display.
869
869
870 Display format will be the first non-empty hit of:
870 Display format will be the first non-empty hit of:
871 1. option 'template'
871 1. option 'template'
872 2. option 'style'
872 2. option 'style'
873 3. [ui] setting 'logtemplate'
873 3. [ui] setting 'logtemplate'
874 4. [ui] setting 'style'
874 4. [ui] setting 'style'
875 If all of these values are either the unset or the empty string,
875 If all of these values are either the unset or the empty string,
876 regular display via changeset_printer() is done.
876 regular display via changeset_printer() is done.
877 """
877 """
878 # options
878 # options
879 patch = False
879 patch = False
880 if opts.get('patch') or opts.get('stat'):
880 if opts.get('patch') or opts.get('stat'):
881 patch = matchall(repo)
881 patch = matchall(repo)
882
882
883 tmpl = opts.get('template')
883 tmpl = opts.get('template')
884 style = None
884 style = None
885 if tmpl:
885 if tmpl:
886 tmpl = templater.parsestring(tmpl, quoted=False)
886 tmpl = templater.parsestring(tmpl, quoted=False)
887 else:
887 else:
888 style = opts.get('style')
888 style = opts.get('style')
889
889
890 # ui settings
890 # ui settings
891 if not (tmpl or style):
891 if not (tmpl or style):
892 tmpl = ui.config('ui', 'logtemplate')
892 tmpl = ui.config('ui', 'logtemplate')
893 if tmpl:
893 if tmpl:
894 tmpl = templater.parsestring(tmpl)
894 tmpl = templater.parsestring(tmpl)
895 else:
895 else:
896 style = util.expandpath(ui.config('ui', 'style', ''))
896 style = util.expandpath(ui.config('ui', 'style', ''))
897
897
898 if not (tmpl or style):
898 if not (tmpl or style):
899 return changeset_printer(ui, repo, patch, opts, buffered)
899 return changeset_printer(ui, repo, patch, opts, buffered)
900
900
901 mapfile = None
901 mapfile = None
902 if style and not tmpl:
902 if style and not tmpl:
903 mapfile = style
903 mapfile = style
904 if not os.path.split(mapfile)[0]:
904 if not os.path.split(mapfile)[0]:
905 mapname = (templater.templatepath('map-cmdline.' + mapfile)
905 mapname = (templater.templatepath('map-cmdline.' + mapfile)
906 or templater.templatepath(mapfile))
906 or templater.templatepath(mapfile))
907 if mapname:
907 if mapname:
908 mapfile = mapname
908 mapfile = mapname
909
909
910 try:
910 try:
911 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
911 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
912 except SyntaxError, inst:
912 except SyntaxError, inst:
913 raise util.Abort(inst.args[0])
913 raise util.Abort(inst.args[0])
914 if tmpl:
914 if tmpl:
915 t.use_template(tmpl)
915 t.use_template(tmpl)
916 return t
916 return t
917
917
918 def finddate(ui, repo, date):
918 def finddate(ui, repo, date):
919 """Find the tipmost changeset that matches the given date spec"""
919 """Find the tipmost changeset that matches the given date spec"""
920
920
921 df = util.matchdate(date)
921 df = util.matchdate(date)
922 m = matchall(repo)
922 m = matchall(repo)
923 results = {}
923 results = {}
924
924
925 def prep(ctx, fns):
925 def prep(ctx, fns):
926 d = ctx.date()
926 d = ctx.date()
927 if df(d[0]):
927 if df(d[0]):
928 results[ctx.rev()] = d
928 results[ctx.rev()] = d
929
929
930 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
930 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
931 rev = ctx.rev()
931 rev = ctx.rev()
932 if rev in results:
932 if rev in results:
933 ui.status(_("Found revision %s from %s\n") %
933 ui.status(_("Found revision %s from %s\n") %
934 (rev, util.datestr(results[rev])))
934 (rev, util.datestr(results[rev])))
935 return str(rev)
935 return str(rev)
936
936
937 raise util.Abort(_("revision matching date not found"))
937 raise util.Abort(_("revision matching date not found"))
938
938
939 def walkchangerevs(repo, match, opts, prepare):
939 def walkchangerevs(repo, match, opts, prepare):
940 '''Iterate over files and the revs in which they changed.
940 '''Iterate over files and the revs in which they changed.
941
941
942 Callers most commonly need to iterate backwards over the history
942 Callers most commonly need to iterate backwards over the history
943 in which they are interested. Doing so has awful (quadratic-looking)
943 in which they are interested. Doing so has awful (quadratic-looking)
944 performance, so we use iterators in a "windowed" way.
944 performance, so we use iterators in a "windowed" way.
945
945
946 We walk a window of revisions in the desired order. Within the
946 We walk a window of revisions in the desired order. Within the
947 window, we first walk forwards to gather data, then in the desired
947 window, we first walk forwards to gather data, then in the desired
948 order (usually backwards) to display it.
948 order (usually backwards) to display it.
949
949
950 This function returns an iterator yielding contexts. Before
950 This function returns an iterator yielding contexts. Before
951 yielding each context, the iterator will first call the prepare
951 yielding each context, the iterator will first call the prepare
952 function on each context in the window in forward order.'''
952 function on each context in the window in forward order.'''
953
953
954 def increasing_windows(start, end, windowsize=8, sizelimit=512):
954 def increasing_windows(start, end, windowsize=8, sizelimit=512):
955 if start < end:
955 if start < end:
956 while start < end:
956 while start < end:
957 yield start, min(windowsize, end - start)
957 yield start, min(windowsize, end - start)
958 start += windowsize
958 start += windowsize
959 if windowsize < sizelimit:
959 if windowsize < sizelimit:
960 windowsize *= 2
960 windowsize *= 2
961 else:
961 else:
962 while start > end:
962 while start > end:
963 yield start, min(windowsize, start - end - 1)
963 yield start, min(windowsize, start - end - 1)
964 start -= windowsize
964 start -= windowsize
965 if windowsize < sizelimit:
965 if windowsize < sizelimit:
966 windowsize *= 2
966 windowsize *= 2
967
967
968 follow = opts.get('follow') or opts.get('follow_first')
968 follow = opts.get('follow') or opts.get('follow_first')
969
969
970 if not len(repo):
970 if not len(repo):
971 return []
971 return []
972
972
973 if follow:
973 if follow:
974 defrange = '%s:0' % repo['.'].rev()
974 defrange = '%s:0' % repo['.'].rev()
975 else:
975 else:
976 defrange = '-1:0'
976 defrange = '-1:0'
977 revs = revrange(repo, opts['rev'] or [defrange])
977 revs = revrange(repo, opts['rev'] or [defrange])
978 if not revs:
978 if not revs:
979 return []
979 return []
980 wanted = set()
980 wanted = set()
981 slowpath = match.anypats() or (match.files() and opts.get('removed'))
981 slowpath = match.anypats() or (match.files() and opts.get('removed'))
982 fncache = {}
982 fncache = {}
983 change = util.cachefunc(repo.changectx)
983 change = util.cachefunc(repo.changectx)
984
984
985 # First step is to fill wanted, the set of revisions that we want to yield.
985 # First step is to fill wanted, the set of revisions that we want to yield.
986 # When it does not induce extra cost, we also fill fncache for revisions in
986 # When it does not induce extra cost, we also fill fncache for revisions in
987 # wanted: a cache of filenames that were changed (ctx.files()) and that
987 # wanted: a cache of filenames that were changed (ctx.files()) and that
988 # match the file filtering conditions.
988 # match the file filtering conditions.
989
989
990 if not slowpath and not match.files():
990 if not slowpath and not match.files():
991 # No files, no patterns. Display all revs.
991 # No files, no patterns. Display all revs.
992 wanted = set(revs)
992 wanted = set(revs)
993 copies = []
993 copies = []
994
994
995 if not slowpath:
995 if not slowpath:
996 # We only have to read through the filelog to find wanted revisions
996 # We only have to read through the filelog to find wanted revisions
997
997
998 minrev, maxrev = min(revs), max(revs)
998 minrev, maxrev = min(revs), max(revs)
999 def filerevgen(filelog, last):
999 def filerevgen(filelog, last):
1000 """
1000 """
1001 Only files, no patterns. Check the history of each file.
1001 Only files, no patterns. Check the history of each file.
1002
1002
1003 Examines filelog entries within minrev, maxrev linkrev range
1003 Examines filelog entries within minrev, maxrev linkrev range
1004 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1004 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1005 tuples in backwards order
1005 tuples in backwards order
1006 """
1006 """
1007 cl_count = len(repo)
1007 cl_count = len(repo)
1008 revs = []
1008 revs = []
1009 for j in xrange(0, last + 1):
1009 for j in xrange(0, last + 1):
1010 linkrev = filelog.linkrev(j)
1010 linkrev = filelog.linkrev(j)
1011 if linkrev < minrev:
1011 if linkrev < minrev:
1012 continue
1012 continue
1013 # only yield rev for which we have the changelog, it can
1013 # only yield rev for which we have the changelog, it can
1014 # happen while doing "hg log" during a pull or commit
1014 # happen while doing "hg log" during a pull or commit
1015 if linkrev >= cl_count:
1015 if linkrev >= cl_count:
1016 break
1016 break
1017
1017
1018 parentlinkrevs = []
1018 parentlinkrevs = []
1019 for p in filelog.parentrevs(j):
1019 for p in filelog.parentrevs(j):
1020 if p != nullrev:
1020 if p != nullrev:
1021 parentlinkrevs.append(filelog.linkrev(p))
1021 parentlinkrevs.append(filelog.linkrev(p))
1022 n = filelog.node(j)
1022 n = filelog.node(j)
1023 revs.append((linkrev, parentlinkrevs,
1023 revs.append((linkrev, parentlinkrevs,
1024 follow and filelog.renamed(n)))
1024 follow and filelog.renamed(n)))
1025
1025
1026 return reversed(revs)
1026 return reversed(revs)
1027 def iterfiles():
1027 def iterfiles():
1028 for filename in match.files():
1028 for filename in match.files():
1029 yield filename, None
1029 yield filename, None
1030 for filename_node in copies:
1030 for filename_node in copies:
1031 yield filename_node
1031 yield filename_node
1032 for file_, node in iterfiles():
1032 for file_, node in iterfiles():
1033 filelog = repo.file(file_)
1033 filelog = repo.file(file_)
1034 if not len(filelog):
1034 if not len(filelog):
1035 if node is None:
1035 if node is None:
1036 # A zero count may be a directory or deleted file, so
1036 # A zero count may be a directory or deleted file, so
1037 # try to find matching entries on the slow path.
1037 # try to find matching entries on the slow path.
1038 if follow:
1038 if follow:
1039 raise util.Abort(
1039 raise util.Abort(
1040 _('cannot follow nonexistent file: "%s"') % file_)
1040 _('cannot follow nonexistent file: "%s"') % file_)
1041 slowpath = True
1041 slowpath = True
1042 break
1042 break
1043 else:
1043 else:
1044 continue
1044 continue
1045
1045
1046 if node is None:
1046 if node is None:
1047 last = len(filelog) - 1
1047 last = len(filelog) - 1
1048 else:
1048 else:
1049 last = filelog.rev(node)
1049 last = filelog.rev(node)
1050
1050
1051
1051
1052 # keep track of all ancestors of the file
1052 # keep track of all ancestors of the file
1053 ancestors = set([filelog.linkrev(last)])
1053 ancestors = set([filelog.linkrev(last)])
1054
1054
1055 # iterate from latest to oldest revision
1055 # iterate from latest to oldest revision
1056 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1056 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1057 if not follow:
1057 if not follow:
1058 if rev > maxrev:
1058 if rev > maxrev:
1059 continue
1059 continue
1060 else:
1060 else:
1061 # Note that last might not be the first interesting
1061 # Note that last might not be the first interesting
1062 # rev to us:
1062 # rev to us:
1063 # if the file has been changed after maxrev, we'll
1063 # if the file has been changed after maxrev, we'll
1064 # have linkrev(last) > maxrev, and we still need
1064 # have linkrev(last) > maxrev, and we still need
1065 # to explore the file graph
1065 # to explore the file graph
1066 if rev not in ancestors:
1066 if rev not in ancestors:
1067 continue
1067 continue
1068 # XXX insert 1327 fix here
1068 # XXX insert 1327 fix here
1069 if flparentlinkrevs:
1069 if flparentlinkrevs:
1070 ancestors.update(flparentlinkrevs)
1070 ancestors.update(flparentlinkrevs)
1071
1071
1072 fncache.setdefault(rev, []).append(file_)
1072 fncache.setdefault(rev, []).append(file_)
1073 wanted.add(rev)
1073 wanted.add(rev)
1074 if copied:
1074 if copied:
1075 copies.append(copied)
1075 copies.append(copied)
1076 if slowpath:
1076 if slowpath:
1077 # We have to read the changelog to match filenames against
1077 # We have to read the changelog to match filenames against
1078 # changed files
1078 # changed files
1079
1079
1080 if follow:
1080 if follow:
1081 raise util.Abort(_('can only follow copies/renames for explicit '
1081 raise util.Abort(_('can only follow copies/renames for explicit '
1082 'filenames'))
1082 'filenames'))
1083
1083
1084 # The slow path checks files modified in every changeset.
1084 # The slow path checks files modified in every changeset.
1085 for i in sorted(revs):
1085 for i in sorted(revs):
1086 ctx = change(i)
1086 ctx = change(i)
1087 matches = filter(match, ctx.files())
1087 matches = filter(match, ctx.files())
1088 if matches:
1088 if matches:
1089 fncache[i] = matches
1089 fncache[i] = matches
1090 wanted.add(i)
1090 wanted.add(i)
1091
1091
1092 class followfilter(object):
1092 class followfilter(object):
1093 def __init__(self, onlyfirst=False):
1093 def __init__(self, onlyfirst=False):
1094 self.startrev = nullrev
1094 self.startrev = nullrev
1095 self.roots = set()
1095 self.roots = set()
1096 self.onlyfirst = onlyfirst
1096 self.onlyfirst = onlyfirst
1097
1097
1098 def match(self, rev):
1098 def match(self, rev):
1099 def realparents(rev):
1099 def realparents(rev):
1100 if self.onlyfirst:
1100 if self.onlyfirst:
1101 return repo.changelog.parentrevs(rev)[0:1]
1101 return repo.changelog.parentrevs(rev)[0:1]
1102 else:
1102 else:
1103 return filter(lambda x: x != nullrev,
1103 return filter(lambda x: x != nullrev,
1104 repo.changelog.parentrevs(rev))
1104 repo.changelog.parentrevs(rev))
1105
1105
1106 if self.startrev == nullrev:
1106 if self.startrev == nullrev:
1107 self.startrev = rev
1107 self.startrev = rev
1108 return True
1108 return True
1109
1109
1110 if rev > self.startrev:
1110 if rev > self.startrev:
1111 # forward: all descendants
1111 # forward: all descendants
1112 if not self.roots:
1112 if not self.roots:
1113 self.roots.add(self.startrev)
1113 self.roots.add(self.startrev)
1114 for parent in realparents(rev):
1114 for parent in realparents(rev):
1115 if parent in self.roots:
1115 if parent in self.roots:
1116 self.roots.add(rev)
1116 self.roots.add(rev)
1117 return True
1117 return True
1118 else:
1118 else:
1119 # backwards: all parents
1119 # backwards: all parents
1120 if not self.roots:
1120 if not self.roots:
1121 self.roots.update(realparents(self.startrev))
1121 self.roots.update(realparents(self.startrev))
1122 if rev in self.roots:
1122 if rev in self.roots:
1123 self.roots.remove(rev)
1123 self.roots.remove(rev)
1124 self.roots.update(realparents(rev))
1124 self.roots.update(realparents(rev))
1125 return True
1125 return True
1126
1126
1127 return False
1127 return False
1128
1128
1129 # it might be worthwhile to do this in the iterator if the rev range
1129 # it might be worthwhile to do this in the iterator if the rev range
1130 # is descending and the prune args are all within that range
1130 # is descending and the prune args are all within that range
1131 for rev in opts.get('prune', ()):
1131 for rev in opts.get('prune', ()):
1132 rev = repo.changelog.rev(repo.lookup(rev))
1132 rev = repo.changelog.rev(repo.lookup(rev))
1133 ff = followfilter()
1133 ff = followfilter()
1134 stop = min(revs[0], revs[-1])
1134 stop = min(revs[0], revs[-1])
1135 for x in xrange(rev, stop - 1, -1):
1135 for x in xrange(rev, stop - 1, -1):
1136 if ff.match(x):
1136 if ff.match(x):
1137 wanted.discard(x)
1137 wanted.discard(x)
1138
1138
1139 # Now that wanted is correctly initialized, we can iterate over the
1139 # Now that wanted is correctly initialized, we can iterate over the
1140 # revision range, yielding only revisions in wanted.
1140 # revision range, yielding only revisions in wanted.
1141 def iterate():
1141 def iterate():
1142 if follow and not match.files():
1142 if follow and not match.files():
1143 ff = followfilter(onlyfirst=opts.get('follow_first'))
1143 ff = followfilter(onlyfirst=opts.get('follow_first'))
1144 def want(rev):
1144 def want(rev):
1145 return ff.match(rev) and rev in wanted
1145 return ff.match(rev) and rev in wanted
1146 else:
1146 else:
1147 def want(rev):
1147 def want(rev):
1148 return rev in wanted
1148 return rev in wanted
1149
1149
1150 for i, window in increasing_windows(0, len(revs)):
1150 for i, window in increasing_windows(0, len(revs)):
1151 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1151 nrevs = [rev for rev in revs[i:i + window] if want(rev)]
1152 for rev in sorted(nrevs):
1152 for rev in sorted(nrevs):
1153 fns = fncache.get(rev)
1153 fns = fncache.get(rev)
1154 ctx = change(rev)
1154 ctx = change(rev)
1155 if not fns:
1155 if not fns:
1156 def fns_generator():
1156 def fns_generator():
1157 for f in ctx.files():
1157 for f in ctx.files():
1158 if match(f):
1158 if match(f):
1159 yield f
1159 yield f
1160 fns = fns_generator()
1160 fns = fns_generator()
1161 prepare(ctx, fns)
1161 prepare(ctx, fns)
1162 for rev in nrevs:
1162 for rev in nrevs:
1163 yield change(rev)
1163 yield change(rev)
1164 return iterate()
1164 return iterate()
1165
1165
1166 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1166 def add(ui, repo, match, dryrun, listsubrepos, prefix):
1167 join = lambda f: os.path.join(prefix, f)
1167 join = lambda f: os.path.join(prefix, f)
1168 bad = []
1168 bad = []
1169 oldbad = match.bad
1169 oldbad = match.bad
1170 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1170 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
1171 names = []
1171 names = []
1172 wctx = repo[None]
1172 wctx = repo[None]
1173 cca = None
1173 cca = None
1174 abort, warn = scmutil.checkportabilityalert(ui)
1174 abort, warn = scmutil.checkportabilityalert(ui)
1175 if abort or warn:
1175 if abort or warn:
1176 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1176 cca = scmutil.casecollisionauditor(ui, abort, wctx)
1177 for f in repo.walk(match):
1177 for f in repo.walk(match):
1178 exact = match.exact(f)
1178 exact = match.exact(f)
1179 if exact or f not in repo.dirstate:
1179 if exact or f not in repo.dirstate:
1180 if cca:
1180 if cca:
1181 cca(f)
1181 cca(f)
1182 names.append(f)
1182 names.append(f)
1183 if ui.verbose or not exact:
1183 if ui.verbose or not exact:
1184 ui.status(_('adding %s\n') % match.rel(join(f)))
1184 ui.status(_('adding %s\n') % match.rel(join(f)))
1185
1185
1186 if listsubrepos:
1186 if listsubrepos:
1187 for subpath in wctx.substate:
1187 for subpath in wctx.substate:
1188 sub = wctx.sub(subpath)
1188 sub = wctx.sub(subpath)
1189 try:
1189 try:
1190 submatch = matchmod.narrowmatcher(subpath, match)
1190 submatch = matchmod.narrowmatcher(subpath, match)
1191 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1191 bad.extend(sub.add(ui, submatch, dryrun, prefix))
1192 except error.LookupError:
1192 except error.LookupError:
1193 ui.status(_("skipping missing subrepository: %s\n")
1193 ui.status(_("skipping missing subrepository: %s\n")
1194 % join(subpath))
1194 % join(subpath))
1195
1195
1196 if not dryrun:
1196 if not dryrun:
1197 rejected = wctx.add(names, prefix)
1197 rejected = wctx.add(names, prefix)
1198 bad.extend(f for f in rejected if f in match.files())
1198 bad.extend(f for f in rejected if f in match.files())
1199 return bad
1199 return bad
1200
1200
1201 def commit(ui, repo, commitfunc, pats, opts):
1201 def commit(ui, repo, commitfunc, pats, opts):
1202 '''commit the specified files or all outstanding changes'''
1202 '''commit the specified files or all outstanding changes'''
1203 date = opts.get('date')
1203 date = opts.get('date')
1204 if date:
1204 if date:
1205 opts['date'] = util.parsedate(date)
1205 opts['date'] = util.parsedate(date)
1206 message = logmessage(opts)
1206 message = logmessage(opts)
1207
1207
1208 # extract addremove carefully -- this function can be called from a command
1208 # extract addremove carefully -- this function can be called from a command
1209 # that doesn't support addremove
1209 # that doesn't support addremove
1210 if opts.get('addremove'):
1210 if opts.get('addremove'):
1211 addremove(repo, pats, opts)
1211 addremove(repo, pats, opts)
1212
1212
1213 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1213 return commitfunc(ui, repo, message, match(repo, pats, opts), opts)
1214
1214
1215 def commiteditor(repo, ctx, subs):
1215 def commiteditor(repo, ctx, subs):
1216 if ctx.description():
1216 if ctx.description():
1217 return ctx.description()
1217 return ctx.description()
1218 return commitforceeditor(repo, ctx, subs)
1218 return commitforceeditor(repo, ctx, subs)
1219
1219
1220 def commitforceeditor(repo, ctx, subs):
1220 def commitforceeditor(repo, ctx, subs):
1221 edittext = []
1221 edittext = []
1222 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1222 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
1223 if ctx.description():
1223 if ctx.description():
1224 edittext.append(ctx.description())
1224 edittext.append(ctx.description())
1225 edittext.append("")
1225 edittext.append("")
1226 edittext.append("") # Empty line between message and comments.
1226 edittext.append("") # Empty line between message and comments.
1227 edittext.append(_("HG: Enter commit message."
1227 edittext.append(_("HG: Enter commit message."
1228 " Lines beginning with 'HG:' are removed."))
1228 " Lines beginning with 'HG:' are removed."))
1229 edittext.append(_("HG: Leave message empty to abort commit."))
1229 edittext.append(_("HG: Leave message empty to abort commit."))
1230 edittext.append("HG: --")
1230 edittext.append("HG: --")
1231 edittext.append(_("HG: user: %s") % ctx.user())
1231 edittext.append(_("HG: user: %s") % ctx.user())
1232 if ctx.p2():
1232 if ctx.p2():
1233 edittext.append(_("HG: branch merge"))
1233 edittext.append(_("HG: branch merge"))
1234 if ctx.branch():
1234 if ctx.branch():
1235 edittext.append(_("HG: branch '%s'") % ctx.branch())
1235 edittext.append(_("HG: branch '%s'") % ctx.branch())
1236 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1236 edittext.extend([_("HG: subrepo %s") % s for s in subs])
1237 edittext.extend([_("HG: added %s") % f for f in added])
1237 edittext.extend([_("HG: added %s") % f for f in added])
1238 edittext.extend([_("HG: changed %s") % f for f in modified])
1238 edittext.extend([_("HG: changed %s") % f for f in modified])
1239 edittext.extend([_("HG: removed %s") % f for f in removed])
1239 edittext.extend([_("HG: removed %s") % f for f in removed])
1240 if not added and not modified and not removed:
1240 if not added and not modified and not removed:
1241 edittext.append(_("HG: no files changed"))
1241 edittext.append(_("HG: no files changed"))
1242 edittext.append("")
1242 edittext.append("")
1243 # run editor in the repository root
1243 # run editor in the repository root
1244 olddir = os.getcwd()
1244 olddir = os.getcwd()
1245 os.chdir(repo.root)
1245 os.chdir(repo.root)
1246 text = repo.ui.edit("\n".join(edittext), ctx.user())
1246 text = repo.ui.edit("\n".join(edittext), ctx.user())
1247 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1247 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
1248 os.chdir(olddir)
1248 os.chdir(olddir)
1249
1249
1250 if not text.strip():
1250 if not text.strip():
1251 raise util.Abort(_("empty commit message"))
1251 raise util.Abort(_("empty commit message"))
1252
1252
1253 return text
1253 return text
@@ -1,1972 +1,1976 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error
13 import scmutil, util, extensions, hook, error
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 'known', 'getbundle'))
23 'known', 'getbundle'))
24 supportedformats = set(('revlogv1',))
24 supportedformats = set(('revlogv1', 'generaldelta'))
25 supported = supportedformats | set(('store', 'fncache', 'shared',
25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 'dotencode'))
26 'dotencode'))
27
27
28 def __init__(self, baseui, path=None, create=0):
28 def __init__(self, baseui, path=None, create=0):
29 repo.repository.__init__(self)
29 repo.repository.__init__(self)
30 self.root = os.path.realpath(util.expandpath(path))
30 self.root = os.path.realpath(util.expandpath(path))
31 self.path = os.path.join(self.root, ".hg")
31 self.path = os.path.join(self.root, ".hg")
32 self.origroot = path
32 self.origroot = path
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.opener = scmutil.opener(self.path)
34 self.opener = scmutil.opener(self.path)
35 self.wopener = scmutil.opener(self.root)
35 self.wopener = scmutil.opener(self.root)
36 self.baseui = baseui
36 self.baseui = baseui
37 self.ui = baseui.copy()
37 self.ui = baseui.copy()
38
38
39 try:
39 try:
40 self.ui.readconfig(self.join("hgrc"), self.root)
40 self.ui.readconfig(self.join("hgrc"), self.root)
41 extensions.loadall(self.ui)
41 extensions.loadall(self.ui)
42 except IOError:
42 except IOError:
43 pass
43 pass
44
44
45 if not os.path.isdir(self.path):
45 if not os.path.isdir(self.path):
46 if create:
46 if create:
47 if not os.path.exists(path):
47 if not os.path.exists(path):
48 util.makedirs(path)
48 util.makedirs(path)
49 util.makedir(self.path, notindexed=True)
49 util.makedir(self.path, notindexed=True)
50 requirements = ["revlogv1"]
50 requirements = ["revlogv1"]
51 if self.ui.configbool('format', 'usestore', True):
51 if self.ui.configbool('format', 'usestore', True):
52 os.mkdir(os.path.join(self.path, "store"))
52 os.mkdir(os.path.join(self.path, "store"))
53 requirements.append("store")
53 requirements.append("store")
54 if self.ui.configbool('format', 'usefncache', True):
54 if self.ui.configbool('format', 'usefncache', True):
55 requirements.append("fncache")
55 requirements.append("fncache")
56 if self.ui.configbool('format', 'dotencode', True):
56 if self.ui.configbool('format', 'dotencode', True):
57 requirements.append('dotencode')
57 requirements.append('dotencode')
58 # create an invalid changelog
58 # create an invalid changelog
59 self.opener.append(
59 self.opener.append(
60 "00changelog.i",
60 "00changelog.i",
61 '\0\0\0\2' # represents revlogv2
61 '\0\0\0\2' # represents revlogv2
62 ' dummy changelog to prevent using the old repo layout'
62 ' dummy changelog to prevent using the old repo layout'
63 )
63 )
64 if self.ui.configbool('format', 'generaldelta', False):
65 requirements.append("generaldelta")
64 else:
66 else:
65 raise error.RepoError(_("repository %s not found") % path)
67 raise error.RepoError(_("repository %s not found") % path)
66 elif create:
68 elif create:
67 raise error.RepoError(_("repository %s already exists") % path)
69 raise error.RepoError(_("repository %s already exists") % path)
68 else:
70 else:
69 # find requirements
71 # find requirements
70 requirements = set()
72 requirements = set()
71 try:
73 try:
72 requirements = set(self.opener.read("requires").splitlines())
74 requirements = set(self.opener.read("requires").splitlines())
73 except IOError, inst:
75 except IOError, inst:
74 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
75 raise
77 raise
76 for r in requirements - self.supported:
78 for r in requirements - self.supported:
77 raise error.RequirementError(
79 raise error.RequirementError(
78 _("requirement '%s' not supported") % r)
80 _("requirement '%s' not supported") % r)
79
81
80 self.sharedpath = self.path
82 self.sharedpath = self.path
81 try:
83 try:
82 s = os.path.realpath(self.opener.read("sharedpath"))
84 s = os.path.realpath(self.opener.read("sharedpath"))
83 if not os.path.exists(s):
85 if not os.path.exists(s):
84 raise error.RepoError(
86 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
87 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
88 self.sharedpath = s
87 except IOError, inst:
89 except IOError, inst:
88 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
89 raise
91 raise
90
92
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
94 self.spath = self.store.path
93 self.sopener = self.store.opener
95 self.sopener = self.store.opener
94 self.sjoin = self.store.join
96 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
97 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
98 self._applyrequirements(requirements)
97 if create:
99 if create:
98 self._writerequirements()
100 self._writerequirements()
99
101
100 # These two define the set of tags for this repository. _tags
102 # These two define the set of tags for this repository. _tags
101 # maps tag name to node; _tagtypes maps tag name to 'global' or
103 # maps tag name to node; _tagtypes maps tag name to 'global' or
102 # 'local'. (Global tags are defined by .hgtags across all
104 # 'local'. (Global tags are defined by .hgtags across all
103 # heads, and local tags are defined in .hg/localtags.) They
105 # heads, and local tags are defined in .hg/localtags.) They
104 # constitute the in-memory cache of tags.
106 # constitute the in-memory cache of tags.
105 self._tags = None
107 self._tags = None
106 self._tagtypes = None
108 self._tagtypes = None
107
109
108 self._branchcache = None
110 self._branchcache = None
109 self._branchcachetip = None
111 self._branchcachetip = None
110 self.nodetagscache = None
112 self.nodetagscache = None
111 self.filterpats = {}
113 self.filterpats = {}
112 self._datafilters = {}
114 self._datafilters = {}
113 self._transref = self._lockref = self._wlockref = None
115 self._transref = self._lockref = self._wlockref = None
114
116
115 def _applyrequirements(self, requirements):
117 def _applyrequirements(self, requirements):
116 self.requirements = requirements
118 self.requirements = requirements
117 self.sopener.options = {}
119 self.sopener.options = {}
120 if 'generaldelta' in requirements:
121 self.sopener.options['generaldelta'] = 1
118
122
119 def _writerequirements(self):
123 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
124 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
125 for r in self.requirements:
122 reqfile.write("%s\n" % r)
126 reqfile.write("%s\n" % r)
123 reqfile.close()
127 reqfile.close()
124
128
125 def _checknested(self, path):
129 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
130 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
131 if not path.startswith(self.root):
128 return False
132 return False
129 subpath = path[len(self.root) + 1:]
133 subpath = path[len(self.root) + 1:]
130
134
131 # XXX: Checking against the current working copy is wrong in
135 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
136 # the sense that it can reject things like
133 #
137 #
134 # $ hg cat -r 10 sub/x.txt
138 # $ hg cat -r 10 sub/x.txt
135 #
139 #
136 # if sub/ is no longer a subrepository in the working copy
140 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
141 # parent revision.
138 #
142 #
139 # However, it can of course also allow things that would have
143 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
144 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
145 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
146 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
147 # panics when it sees sub/.hg/.
144 #
148 #
145 # All in all, checking against the working copy seems sensible
149 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
150 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
151 # the filesystem *now*.
148 ctx = self[None]
152 ctx = self[None]
149 parts = util.splitpath(subpath)
153 parts = util.splitpath(subpath)
150 while parts:
154 while parts:
151 prefix = os.sep.join(parts)
155 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
156 if prefix in ctx.substate:
153 if prefix == subpath:
157 if prefix == subpath:
154 return True
158 return True
155 else:
159 else:
156 sub = ctx.sub(prefix)
160 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
161 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
162 else:
159 parts.pop()
163 parts.pop()
160 return False
164 return False
161
165
162 @util.propertycache
166 @util.propertycache
163 def _bookmarks(self):
167 def _bookmarks(self):
164 return bookmarks.read(self)
168 return bookmarks.read(self)
165
169
166 @util.propertycache
170 @util.propertycache
167 def _bookmarkcurrent(self):
171 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
172 return bookmarks.readcurrent(self)
169
173
170 @propertycache
174 @propertycache
171 def changelog(self):
175 def changelog(self):
172 c = changelog.changelog(self.sopener)
176 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
177 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
178 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
179 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
180 c.readpending('00changelog.i.a')
177 self.sopener.options['defversion'] = c.version
181 self.sopener.options['defversion'] = c.version
178 return c
182 return c
179
183
180 @propertycache
184 @propertycache
181 def manifest(self):
185 def manifest(self):
182 return manifest.manifest(self.sopener)
186 return manifest.manifest(self.sopener)
183
187
184 @propertycache
188 @propertycache
185 def dirstate(self):
189 def dirstate(self):
186 warned = [0]
190 warned = [0]
187 def validate(node):
191 def validate(node):
188 try:
192 try:
189 self.changelog.rev(node)
193 self.changelog.rev(node)
190 return node
194 return node
191 except error.LookupError:
195 except error.LookupError:
192 if not warned[0]:
196 if not warned[0]:
193 warned[0] = True
197 warned[0] = True
194 self.ui.warn(_("warning: ignoring unknown"
198 self.ui.warn(_("warning: ignoring unknown"
195 " working parent %s!\n") % short(node))
199 " working parent %s!\n") % short(node))
196 return nullid
200 return nullid
197
201
198 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
199
203
200 def __getitem__(self, changeid):
204 def __getitem__(self, changeid):
201 if changeid is None:
205 if changeid is None:
202 return context.workingctx(self)
206 return context.workingctx(self)
203 return context.changectx(self, changeid)
207 return context.changectx(self, changeid)
204
208
205 def __contains__(self, changeid):
209 def __contains__(self, changeid):
206 try:
210 try:
207 return bool(self.lookup(changeid))
211 return bool(self.lookup(changeid))
208 except error.RepoLookupError:
212 except error.RepoLookupError:
209 return False
213 return False
210
214
211 def __nonzero__(self):
215 def __nonzero__(self):
212 return True
216 return True
213
217
214 def __len__(self):
218 def __len__(self):
215 return len(self.changelog)
219 return len(self.changelog)
216
220
217 def __iter__(self):
221 def __iter__(self):
218 for i in xrange(len(self)):
222 for i in xrange(len(self)):
219 yield i
223 yield i
220
224
221 def url(self):
225 def url(self):
222 return 'file:' + self.root
226 return 'file:' + self.root
223
227
224 def hook(self, name, throw=False, **args):
228 def hook(self, name, throw=False, **args):
225 return hook.hook(self.ui, self, name, throw, **args)
229 return hook.hook(self.ui, self, name, throw, **args)
226
230
227 tag_disallowed = ':\r\n'
231 tag_disallowed = ':\r\n'
228
232
229 def _tag(self, names, node, message, local, user, date, extra={}):
233 def _tag(self, names, node, message, local, user, date, extra={}):
230 if isinstance(names, str):
234 if isinstance(names, str):
231 allchars = names
235 allchars = names
232 names = (names,)
236 names = (names,)
233 else:
237 else:
234 allchars = ''.join(names)
238 allchars = ''.join(names)
235 for c in self.tag_disallowed:
239 for c in self.tag_disallowed:
236 if c in allchars:
240 if c in allchars:
237 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 raise util.Abort(_('%r cannot be used in a tag name') % c)
238
242
239 branches = self.branchmap()
243 branches = self.branchmap()
240 for name in names:
244 for name in names:
241 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 self.hook('pretag', throw=True, node=hex(node), tag=name,
242 local=local)
246 local=local)
243 if name in branches:
247 if name in branches:
244 self.ui.warn(_("warning: tag %s conflicts with existing"
248 self.ui.warn(_("warning: tag %s conflicts with existing"
245 " branch name\n") % name)
249 " branch name\n") % name)
246
250
247 def writetags(fp, names, munge, prevtags):
251 def writetags(fp, names, munge, prevtags):
248 fp.seek(0, 2)
252 fp.seek(0, 2)
249 if prevtags and prevtags[-1] != '\n':
253 if prevtags and prevtags[-1] != '\n':
250 fp.write('\n')
254 fp.write('\n')
251 for name in names:
255 for name in names:
252 m = munge and munge(name) or name
256 m = munge and munge(name) or name
253 if self._tagtypes and name in self._tagtypes:
257 if self._tagtypes and name in self._tagtypes:
254 old = self._tags.get(name, nullid)
258 old = self._tags.get(name, nullid)
255 fp.write('%s %s\n' % (hex(old), m))
259 fp.write('%s %s\n' % (hex(old), m))
256 fp.write('%s %s\n' % (hex(node), m))
260 fp.write('%s %s\n' % (hex(node), m))
257 fp.close()
261 fp.close()
258
262
259 prevtags = ''
263 prevtags = ''
260 if local:
264 if local:
261 try:
265 try:
262 fp = self.opener('localtags', 'r+')
266 fp = self.opener('localtags', 'r+')
263 except IOError:
267 except IOError:
264 fp = self.opener('localtags', 'a')
268 fp = self.opener('localtags', 'a')
265 else:
269 else:
266 prevtags = fp.read()
270 prevtags = fp.read()
267
271
268 # local tags are stored in the current charset
272 # local tags are stored in the current charset
269 writetags(fp, names, None, prevtags)
273 writetags(fp, names, None, prevtags)
270 for name in names:
274 for name in names:
271 self.hook('tag', node=hex(node), tag=name, local=local)
275 self.hook('tag', node=hex(node), tag=name, local=local)
272 return
276 return
273
277
274 try:
278 try:
275 fp = self.wfile('.hgtags', 'rb+')
279 fp = self.wfile('.hgtags', 'rb+')
276 except IOError:
280 except IOError:
277 fp = self.wfile('.hgtags', 'ab')
281 fp = self.wfile('.hgtags', 'ab')
278 else:
282 else:
279 prevtags = fp.read()
283 prevtags = fp.read()
280
284
281 # committed tags are stored in UTF-8
285 # committed tags are stored in UTF-8
282 writetags(fp, names, encoding.fromlocal, prevtags)
286 writetags(fp, names, encoding.fromlocal, prevtags)
283
287
284 fp.close()
288 fp.close()
285
289
286 if '.hgtags' not in self.dirstate:
290 if '.hgtags' not in self.dirstate:
287 self[None].add(['.hgtags'])
291 self[None].add(['.hgtags'])
288
292
289 m = matchmod.exact(self.root, '', ['.hgtags'])
293 m = matchmod.exact(self.root, '', ['.hgtags'])
290 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 tagnode = self.commit(message, user, date, extra=extra, match=m)
291
295
292 for name in names:
296 for name in names:
293 self.hook('tag', node=hex(node), tag=name, local=local)
297 self.hook('tag', node=hex(node), tag=name, local=local)
294
298
295 return tagnode
299 return tagnode
296
300
297 def tag(self, names, node, message, local, user, date):
301 def tag(self, names, node, message, local, user, date):
298 '''tag a revision with one or more symbolic names.
302 '''tag a revision with one or more symbolic names.
299
303
300 names is a list of strings or, when adding a single tag, names may be a
304 names is a list of strings or, when adding a single tag, names may be a
301 string.
305 string.
302
306
303 if local is True, the tags are stored in a per-repository file.
307 if local is True, the tags are stored in a per-repository file.
304 otherwise, they are stored in the .hgtags file, and a new
308 otherwise, they are stored in the .hgtags file, and a new
305 changeset is committed with the change.
309 changeset is committed with the change.
306
310
307 keyword arguments:
311 keyword arguments:
308
312
309 local: whether to store tags in non-version-controlled file
313 local: whether to store tags in non-version-controlled file
310 (default False)
314 (default False)
311
315
312 message: commit message to use if committing
316 message: commit message to use if committing
313
317
314 user: name of user to use if committing
318 user: name of user to use if committing
315
319
316 date: date tuple to use if committing'''
320 date: date tuple to use if committing'''
317
321
318 if not local:
322 if not local:
319 for x in self.status()[:5]:
323 for x in self.status()[:5]:
320 if '.hgtags' in x:
324 if '.hgtags' in x:
321 raise util.Abort(_('working copy of .hgtags is changed '
325 raise util.Abort(_('working copy of .hgtags is changed '
322 '(please commit .hgtags manually)'))
326 '(please commit .hgtags manually)'))
323
327
324 self.tags() # instantiate the cache
328 self.tags() # instantiate the cache
325 self._tag(names, node, message, local, user, date)
329 self._tag(names, node, message, local, user, date)
326
330
327 def tags(self):
331 def tags(self):
328 '''return a mapping of tag to node'''
332 '''return a mapping of tag to node'''
329 if self._tags is None:
333 if self._tags is None:
330 (self._tags, self._tagtypes) = self._findtags()
334 (self._tags, self._tagtypes) = self._findtags()
331
335
332 return self._tags
336 return self._tags
333
337
334 def _findtags(self):
338 def _findtags(self):
335 '''Do the hard work of finding tags. Return a pair of dicts
339 '''Do the hard work of finding tags. Return a pair of dicts
336 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 (tags, tagtypes) where tags maps tag name to node, and tagtypes
337 maps tag name to a string like \'global\' or \'local\'.
341 maps tag name to a string like \'global\' or \'local\'.
338 Subclasses or extensions are free to add their own tags, but
342 Subclasses or extensions are free to add their own tags, but
339 should be aware that the returned dicts will be retained for the
343 should be aware that the returned dicts will be retained for the
340 duration of the localrepo object.'''
344 duration of the localrepo object.'''
341
345
342 # XXX what tagtype should subclasses/extensions use? Currently
346 # XXX what tagtype should subclasses/extensions use? Currently
343 # mq and bookmarks add tags, but do not set the tagtype at all.
347 # mq and bookmarks add tags, but do not set the tagtype at all.
344 # Should each extension invent its own tag type? Should there
348 # Should each extension invent its own tag type? Should there
345 # be one tagtype for all such "virtual" tags? Or is the status
349 # be one tagtype for all such "virtual" tags? Or is the status
346 # quo fine?
350 # quo fine?
347
351
348 alltags = {} # map tag name to (node, hist)
352 alltags = {} # map tag name to (node, hist)
349 tagtypes = {}
353 tagtypes = {}
350
354
351 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
352 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
353
357
354 # Build the return dicts. Have to re-encode tag names because
358 # Build the return dicts. Have to re-encode tag names because
355 # the tags module always uses UTF-8 (in order not to lose info
359 # the tags module always uses UTF-8 (in order not to lose info
356 # writing to the cache), but the rest of Mercurial wants them in
360 # writing to the cache), but the rest of Mercurial wants them in
357 # local encoding.
361 # local encoding.
358 tags = {}
362 tags = {}
359 for (name, (node, hist)) in alltags.iteritems():
363 for (name, (node, hist)) in alltags.iteritems():
360 if node != nullid:
364 if node != nullid:
361 try:
365 try:
362 # ignore tags to unknown nodes
366 # ignore tags to unknown nodes
363 self.changelog.lookup(node)
367 self.changelog.lookup(node)
364 tags[encoding.tolocal(name)] = node
368 tags[encoding.tolocal(name)] = node
365 except error.LookupError:
369 except error.LookupError:
366 pass
370 pass
367 tags['tip'] = self.changelog.tip()
371 tags['tip'] = self.changelog.tip()
368 tagtypes = dict([(encoding.tolocal(name), value)
372 tagtypes = dict([(encoding.tolocal(name), value)
369 for (name, value) in tagtypes.iteritems()])
373 for (name, value) in tagtypes.iteritems()])
370 return (tags, tagtypes)
374 return (tags, tagtypes)
371
375
372 def tagtype(self, tagname):
376 def tagtype(self, tagname):
373 '''
377 '''
374 return the type of the given tag. result can be:
378 return the type of the given tag. result can be:
375
379
376 'local' : a local tag
380 'local' : a local tag
377 'global' : a global tag
381 'global' : a global tag
378 None : tag does not exist
382 None : tag does not exist
379 '''
383 '''
380
384
381 self.tags()
385 self.tags()
382
386
383 return self._tagtypes.get(tagname)
387 return self._tagtypes.get(tagname)
384
388
385 def tagslist(self):
389 def tagslist(self):
386 '''return a list of tags ordered by revision'''
390 '''return a list of tags ordered by revision'''
387 l = []
391 l = []
388 for t, n in self.tags().iteritems():
392 for t, n in self.tags().iteritems():
389 r = self.changelog.rev(n)
393 r = self.changelog.rev(n)
390 l.append((r, t, n))
394 l.append((r, t, n))
391 return [(t, n) for r, t, n in sorted(l)]
395 return [(t, n) for r, t, n in sorted(l)]
392
396
393 def nodetags(self, node):
397 def nodetags(self, node):
394 '''return the tags associated with a node'''
398 '''return the tags associated with a node'''
395 if not self.nodetagscache:
399 if not self.nodetagscache:
396 self.nodetagscache = {}
400 self.nodetagscache = {}
397 for t, n in self.tags().iteritems():
401 for t, n in self.tags().iteritems():
398 self.nodetagscache.setdefault(n, []).append(t)
402 self.nodetagscache.setdefault(n, []).append(t)
399 for tags in self.nodetagscache.itervalues():
403 for tags in self.nodetagscache.itervalues():
400 tags.sort()
404 tags.sort()
401 return self.nodetagscache.get(node, [])
405 return self.nodetagscache.get(node, [])
402
406
403 def nodebookmarks(self, node):
407 def nodebookmarks(self, node):
404 marks = []
408 marks = []
405 for bookmark, n in self._bookmarks.iteritems():
409 for bookmark, n in self._bookmarks.iteritems():
406 if n == node:
410 if n == node:
407 marks.append(bookmark)
411 marks.append(bookmark)
408 return sorted(marks)
412 return sorted(marks)
409
413
410 def _branchtags(self, partial, lrev):
414 def _branchtags(self, partial, lrev):
411 # TODO: rename this function?
415 # TODO: rename this function?
412 tiprev = len(self) - 1
416 tiprev = len(self) - 1
413 if lrev != tiprev:
417 if lrev != tiprev:
414 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
415 self._updatebranchcache(partial, ctxgen)
419 self._updatebranchcache(partial, ctxgen)
416 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 self._writebranchcache(partial, self.changelog.tip(), tiprev)
417
421
418 return partial
422 return partial
419
423
420 def updatebranchcache(self):
424 def updatebranchcache(self):
421 tip = self.changelog.tip()
425 tip = self.changelog.tip()
422 if self._branchcache is not None and self._branchcachetip == tip:
426 if self._branchcache is not None and self._branchcachetip == tip:
423 return self._branchcache
427 return self._branchcache
424
428
425 oldtip = self._branchcachetip
429 oldtip = self._branchcachetip
426 self._branchcachetip = tip
430 self._branchcachetip = tip
427 if oldtip is None or oldtip not in self.changelog.nodemap:
431 if oldtip is None or oldtip not in self.changelog.nodemap:
428 partial, last, lrev = self._readbranchcache()
432 partial, last, lrev = self._readbranchcache()
429 else:
433 else:
430 lrev = self.changelog.rev(oldtip)
434 lrev = self.changelog.rev(oldtip)
431 partial = self._branchcache
435 partial = self._branchcache
432
436
433 self._branchtags(partial, lrev)
437 self._branchtags(partial, lrev)
434 # this private cache holds all heads (not just tips)
438 # this private cache holds all heads (not just tips)
435 self._branchcache = partial
439 self._branchcache = partial
436
440
437 def branchmap(self):
441 def branchmap(self):
438 '''returns a dictionary {branch: [branchheads]}'''
442 '''returns a dictionary {branch: [branchheads]}'''
439 self.updatebranchcache()
443 self.updatebranchcache()
440 return self._branchcache
444 return self._branchcache
441
445
442 def branchtags(self):
446 def branchtags(self):
443 '''return a dict where branch names map to the tipmost head of
447 '''return a dict where branch names map to the tipmost head of
444 the branch, open heads come before closed'''
448 the branch, open heads come before closed'''
445 bt = {}
449 bt = {}
446 for bn, heads in self.branchmap().iteritems():
450 for bn, heads in self.branchmap().iteritems():
447 tip = heads[-1]
451 tip = heads[-1]
448 for h in reversed(heads):
452 for h in reversed(heads):
449 if 'close' not in self.changelog.read(h)[5]:
453 if 'close' not in self.changelog.read(h)[5]:
450 tip = h
454 tip = h
451 break
455 break
452 bt[bn] = tip
456 bt[bn] = tip
453 return bt
457 return bt
454
458
455 def _readbranchcache(self):
459 def _readbranchcache(self):
456 partial = {}
460 partial = {}
457 try:
461 try:
458 f = self.opener("cache/branchheads")
462 f = self.opener("cache/branchheads")
459 lines = f.read().split('\n')
463 lines = f.read().split('\n')
460 f.close()
464 f.close()
461 except (IOError, OSError):
465 except (IOError, OSError):
462 return {}, nullid, nullrev
466 return {}, nullid, nullrev
463
467
464 try:
468 try:
465 last, lrev = lines.pop(0).split(" ", 1)
469 last, lrev = lines.pop(0).split(" ", 1)
466 last, lrev = bin(last), int(lrev)
470 last, lrev = bin(last), int(lrev)
467 if lrev >= len(self) or self[lrev].node() != last:
471 if lrev >= len(self) or self[lrev].node() != last:
468 # invalidate the cache
472 # invalidate the cache
469 raise ValueError('invalidating branch cache (tip differs)')
473 raise ValueError('invalidating branch cache (tip differs)')
470 for l in lines:
474 for l in lines:
471 if not l:
475 if not l:
472 continue
476 continue
473 node, label = l.split(" ", 1)
477 node, label = l.split(" ", 1)
474 label = encoding.tolocal(label.strip())
478 label = encoding.tolocal(label.strip())
475 partial.setdefault(label, []).append(bin(node))
479 partial.setdefault(label, []).append(bin(node))
476 except KeyboardInterrupt:
480 except KeyboardInterrupt:
477 raise
481 raise
478 except Exception, inst:
482 except Exception, inst:
479 if self.ui.debugflag:
483 if self.ui.debugflag:
480 self.ui.warn(str(inst), '\n')
484 self.ui.warn(str(inst), '\n')
481 partial, last, lrev = {}, nullid, nullrev
485 partial, last, lrev = {}, nullid, nullrev
482 return partial, last, lrev
486 return partial, last, lrev
483
487
484 def _writebranchcache(self, branches, tip, tiprev):
488 def _writebranchcache(self, branches, tip, tiprev):
485 try:
489 try:
486 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 f = self.opener("cache/branchheads", "w", atomictemp=True)
487 f.write("%s %s\n" % (hex(tip), tiprev))
491 f.write("%s %s\n" % (hex(tip), tiprev))
488 for label, nodes in branches.iteritems():
492 for label, nodes in branches.iteritems():
489 for node in nodes:
493 for node in nodes:
490 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
491 f.rename()
495 f.rename()
492 except (IOError, OSError):
496 except (IOError, OSError):
493 pass
497 pass
494
498
495 def _updatebranchcache(self, partial, ctxgen):
499 def _updatebranchcache(self, partial, ctxgen):
496 # collect new branch entries
500 # collect new branch entries
497 newbranches = {}
501 newbranches = {}
498 for c in ctxgen:
502 for c in ctxgen:
499 newbranches.setdefault(c.branch(), []).append(c.node())
503 newbranches.setdefault(c.branch(), []).append(c.node())
500 # if older branchheads are reachable from new ones, they aren't
504 # if older branchheads are reachable from new ones, they aren't
501 # really branchheads. Note checking parents is insufficient:
505 # really branchheads. Note checking parents is insufficient:
502 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
503 for branch, newnodes in newbranches.iteritems():
507 for branch, newnodes in newbranches.iteritems():
504 bheads = partial.setdefault(branch, [])
508 bheads = partial.setdefault(branch, [])
505 bheads.extend(newnodes)
509 bheads.extend(newnodes)
506 if len(bheads) <= 1:
510 if len(bheads) <= 1:
507 continue
511 continue
508 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 bheads = sorted(bheads, key=lambda x: self[x].rev())
509 # starting from tip means fewer passes over reachable
513 # starting from tip means fewer passes over reachable
510 while newnodes:
514 while newnodes:
511 latest = newnodes.pop()
515 latest = newnodes.pop()
512 if latest not in bheads:
516 if latest not in bheads:
513 continue
517 continue
514 minbhrev = self[bheads[0]].node()
518 minbhrev = self[bheads[0]].node()
515 reachable = self.changelog.reachable(latest, minbhrev)
519 reachable = self.changelog.reachable(latest, minbhrev)
516 reachable.remove(latest)
520 reachable.remove(latest)
517 if reachable:
521 if reachable:
518 bheads = [b for b in bheads if b not in reachable]
522 bheads = [b for b in bheads if b not in reachable]
519 partial[branch] = bheads
523 partial[branch] = bheads
520
524
521 def lookup(self, key):
525 def lookup(self, key):
522 if isinstance(key, int):
526 if isinstance(key, int):
523 return self.changelog.node(key)
527 return self.changelog.node(key)
524 elif key == '.':
528 elif key == '.':
525 return self.dirstate.p1()
529 return self.dirstate.p1()
526 elif key == 'null':
530 elif key == 'null':
527 return nullid
531 return nullid
528 elif key == 'tip':
532 elif key == 'tip':
529 return self.changelog.tip()
533 return self.changelog.tip()
530 n = self.changelog._match(key)
534 n = self.changelog._match(key)
531 if n:
535 if n:
532 return n
536 return n
533 if key in self._bookmarks:
537 if key in self._bookmarks:
534 return self._bookmarks[key]
538 return self._bookmarks[key]
535 if key in self.tags():
539 if key in self.tags():
536 return self.tags()[key]
540 return self.tags()[key]
537 if key in self.branchtags():
541 if key in self.branchtags():
538 return self.branchtags()[key]
542 return self.branchtags()[key]
539 n = self.changelog._partialmatch(key)
543 n = self.changelog._partialmatch(key)
540 if n:
544 if n:
541 return n
545 return n
542
546
543 # can't find key, check if it might have come from damaged dirstate
547 # can't find key, check if it might have come from damaged dirstate
544 if key in self.dirstate.parents():
548 if key in self.dirstate.parents():
545 raise error.Abort(_("working directory has unknown parent '%s'!")
549 raise error.Abort(_("working directory has unknown parent '%s'!")
546 % short(key))
550 % short(key))
547 try:
551 try:
548 if len(key) == 20:
552 if len(key) == 20:
549 key = hex(key)
553 key = hex(key)
550 except TypeError:
554 except TypeError:
551 pass
555 pass
552 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 raise error.RepoLookupError(_("unknown revision '%s'") % key)
553
557
554 def lookupbranch(self, key, remote=None):
558 def lookupbranch(self, key, remote=None):
555 repo = remote or self
559 repo = remote or self
556 if key in repo.branchmap():
560 if key in repo.branchmap():
557 return key
561 return key
558
562
559 repo = (remote and remote.local()) and remote or self
563 repo = (remote and remote.local()) and remote or self
560 return repo[key].branch()
564 return repo[key].branch()
561
565
562 def known(self, nodes):
566 def known(self, nodes):
563 nm = self.changelog.nodemap
567 nm = self.changelog.nodemap
564 return [(n in nm) for n in nodes]
568 return [(n in nm) for n in nodes]
565
569
566 def local(self):
570 def local(self):
567 return True
571 return True
568
572
569 def join(self, f):
573 def join(self, f):
570 return os.path.join(self.path, f)
574 return os.path.join(self.path, f)
571
575
572 def wjoin(self, f):
576 def wjoin(self, f):
573 return os.path.join(self.root, f)
577 return os.path.join(self.root, f)
574
578
575 def file(self, f):
579 def file(self, f):
576 if f[0] == '/':
580 if f[0] == '/':
577 f = f[1:]
581 f = f[1:]
578 return filelog.filelog(self.sopener, f)
582 return filelog.filelog(self.sopener, f)
579
583
580 def changectx(self, changeid):
584 def changectx(self, changeid):
581 return self[changeid]
585 return self[changeid]
582
586
583 def parents(self, changeid=None):
587 def parents(self, changeid=None):
584 '''get list of changectxs for parents of changeid'''
588 '''get list of changectxs for parents of changeid'''
585 return self[changeid].parents()
589 return self[changeid].parents()
586
590
587 def filectx(self, path, changeid=None, fileid=None):
591 def filectx(self, path, changeid=None, fileid=None):
588 """changeid can be a changeset revision, node, or tag.
592 """changeid can be a changeset revision, node, or tag.
589 fileid can be a file revision or node."""
593 fileid can be a file revision or node."""
590 return context.filectx(self, path, changeid, fileid)
594 return context.filectx(self, path, changeid, fileid)
591
595
592 def getcwd(self):
596 def getcwd(self):
593 return self.dirstate.getcwd()
597 return self.dirstate.getcwd()
594
598
595 def pathto(self, f, cwd=None):
599 def pathto(self, f, cwd=None):
596 return self.dirstate.pathto(f, cwd)
600 return self.dirstate.pathto(f, cwd)
597
601
598 def wfile(self, f, mode='r'):
602 def wfile(self, f, mode='r'):
599 return self.wopener(f, mode)
603 return self.wopener(f, mode)
600
604
601 def _link(self, f):
605 def _link(self, f):
602 return os.path.islink(self.wjoin(f))
606 return os.path.islink(self.wjoin(f))
603
607
604 def _loadfilter(self, filter):
608 def _loadfilter(self, filter):
605 if filter not in self.filterpats:
609 if filter not in self.filterpats:
606 l = []
610 l = []
607 for pat, cmd in self.ui.configitems(filter):
611 for pat, cmd in self.ui.configitems(filter):
608 if cmd == '!':
612 if cmd == '!':
609 continue
613 continue
610 mf = matchmod.match(self.root, '', [pat])
614 mf = matchmod.match(self.root, '', [pat])
611 fn = None
615 fn = None
612 params = cmd
616 params = cmd
613 for name, filterfn in self._datafilters.iteritems():
617 for name, filterfn in self._datafilters.iteritems():
614 if cmd.startswith(name):
618 if cmd.startswith(name):
615 fn = filterfn
619 fn = filterfn
616 params = cmd[len(name):].lstrip()
620 params = cmd[len(name):].lstrip()
617 break
621 break
618 if not fn:
622 if not fn:
619 fn = lambda s, c, **kwargs: util.filter(s, c)
623 fn = lambda s, c, **kwargs: util.filter(s, c)
620 # Wrap old filters not supporting keyword arguments
624 # Wrap old filters not supporting keyword arguments
621 if not inspect.getargspec(fn)[2]:
625 if not inspect.getargspec(fn)[2]:
622 oldfn = fn
626 oldfn = fn
623 fn = lambda s, c, **kwargs: oldfn(s, c)
627 fn = lambda s, c, **kwargs: oldfn(s, c)
624 l.append((mf, fn, params))
628 l.append((mf, fn, params))
625 self.filterpats[filter] = l
629 self.filterpats[filter] = l
626 return self.filterpats[filter]
630 return self.filterpats[filter]
627
631
628 def _filter(self, filterpats, filename, data):
632 def _filter(self, filterpats, filename, data):
629 for mf, fn, cmd in filterpats:
633 for mf, fn, cmd in filterpats:
630 if mf(filename):
634 if mf(filename):
631 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
632 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
633 break
637 break
634
638
635 return data
639 return data
636
640
637 @propertycache
641 @propertycache
638 def _encodefilterpats(self):
642 def _encodefilterpats(self):
639 return self._loadfilter('encode')
643 return self._loadfilter('encode')
640
644
641 @propertycache
645 @propertycache
642 def _decodefilterpats(self):
646 def _decodefilterpats(self):
643 return self._loadfilter('decode')
647 return self._loadfilter('decode')
644
648
645 def adddatafilter(self, name, filter):
649 def adddatafilter(self, name, filter):
646 self._datafilters[name] = filter
650 self._datafilters[name] = filter
647
651
648 def wread(self, filename):
652 def wread(self, filename):
649 if self._link(filename):
653 if self._link(filename):
650 data = os.readlink(self.wjoin(filename))
654 data = os.readlink(self.wjoin(filename))
651 else:
655 else:
652 data = self.wopener.read(filename)
656 data = self.wopener.read(filename)
653 return self._filter(self._encodefilterpats, filename, data)
657 return self._filter(self._encodefilterpats, filename, data)
654
658
655 def wwrite(self, filename, data, flags):
659 def wwrite(self, filename, data, flags):
656 data = self._filter(self._decodefilterpats, filename, data)
660 data = self._filter(self._decodefilterpats, filename, data)
657 if 'l' in flags:
661 if 'l' in flags:
658 self.wopener.symlink(data, filename)
662 self.wopener.symlink(data, filename)
659 else:
663 else:
660 self.wopener.write(filename, data)
664 self.wopener.write(filename, data)
661 if 'x' in flags:
665 if 'x' in flags:
662 util.setflags(self.wjoin(filename), False, True)
666 util.setflags(self.wjoin(filename), False, True)
663
667
664 def wwritedata(self, filename, data):
668 def wwritedata(self, filename, data):
665 return self._filter(self._decodefilterpats, filename, data)
669 return self._filter(self._decodefilterpats, filename, data)
666
670
667 def transaction(self, desc):
671 def transaction(self, desc):
668 tr = self._transref and self._transref() or None
672 tr = self._transref and self._transref() or None
669 if tr and tr.running():
673 if tr and tr.running():
670 return tr.nest()
674 return tr.nest()
671
675
672 # abort here if the journal already exists
676 # abort here if the journal already exists
673 if os.path.exists(self.sjoin("journal")):
677 if os.path.exists(self.sjoin("journal")):
674 raise error.RepoError(
678 raise error.RepoError(
675 _("abandoned transaction found - run hg recover"))
679 _("abandoned transaction found - run hg recover"))
676
680
677 journalfiles = self._writejournal(desc)
681 journalfiles = self._writejournal(desc)
678 renames = [(x, undoname(x)) for x in journalfiles]
682 renames = [(x, undoname(x)) for x in journalfiles]
679
683
680 tr = transaction.transaction(self.ui.warn, self.sopener,
684 tr = transaction.transaction(self.ui.warn, self.sopener,
681 self.sjoin("journal"),
685 self.sjoin("journal"),
682 aftertrans(renames),
686 aftertrans(renames),
683 self.store.createmode)
687 self.store.createmode)
684 self._transref = weakref.ref(tr)
688 self._transref = weakref.ref(tr)
685 return tr
689 return tr
686
690
687 def _writejournal(self, desc):
691 def _writejournal(self, desc):
688 # save dirstate for rollback
692 # save dirstate for rollback
689 try:
693 try:
690 ds = self.opener.read("dirstate")
694 ds = self.opener.read("dirstate")
691 except IOError:
695 except IOError:
692 ds = ""
696 ds = ""
693 self.opener.write("journal.dirstate", ds)
697 self.opener.write("journal.dirstate", ds)
694 self.opener.write("journal.branch",
698 self.opener.write("journal.branch",
695 encoding.fromlocal(self.dirstate.branch()))
699 encoding.fromlocal(self.dirstate.branch()))
696 self.opener.write("journal.desc",
700 self.opener.write("journal.desc",
697 "%d\n%s\n" % (len(self), desc))
701 "%d\n%s\n" % (len(self), desc))
698
702
699 bkname = self.join('bookmarks')
703 bkname = self.join('bookmarks')
700 if os.path.exists(bkname):
704 if os.path.exists(bkname):
701 util.copyfile(bkname, self.join('journal.bookmarks'))
705 util.copyfile(bkname, self.join('journal.bookmarks'))
702 else:
706 else:
703 self.opener('journal.bookmarks', 'w').write('')
707 self.opener.write('journal.bookmarks', '')
704
708
705 return (self.sjoin('journal'), self.join('journal.dirstate'),
709 return (self.sjoin('journal'), self.join('journal.dirstate'),
706 self.join('journal.branch'), self.join('journal.desc'),
710 self.join('journal.branch'), self.join('journal.desc'),
707 self.join('journal.bookmarks'))
711 self.join('journal.bookmarks'))
708
712
709 def recover(self):
713 def recover(self):
710 lock = self.lock()
714 lock = self.lock()
711 try:
715 try:
712 if os.path.exists(self.sjoin("journal")):
716 if os.path.exists(self.sjoin("journal")):
713 self.ui.status(_("rolling back interrupted transaction\n"))
717 self.ui.status(_("rolling back interrupted transaction\n"))
714 transaction.rollback(self.sopener, self.sjoin("journal"),
718 transaction.rollback(self.sopener, self.sjoin("journal"),
715 self.ui.warn)
719 self.ui.warn)
716 self.invalidate()
720 self.invalidate()
717 return True
721 return True
718 else:
722 else:
719 self.ui.warn(_("no interrupted transaction available\n"))
723 self.ui.warn(_("no interrupted transaction available\n"))
720 return False
724 return False
721 finally:
725 finally:
722 lock.release()
726 lock.release()
723
727
724 def rollback(self, dryrun=False):
728 def rollback(self, dryrun=False):
725 wlock = lock = None
729 wlock = lock = None
726 try:
730 try:
727 wlock = self.wlock()
731 wlock = self.wlock()
728 lock = self.lock()
732 lock = self.lock()
729 if os.path.exists(self.sjoin("undo")):
733 if os.path.exists(self.sjoin("undo")):
730 try:
734 try:
731 args = self.opener.read("undo.desc").splitlines()
735 args = self.opener.read("undo.desc").splitlines()
732 if len(args) >= 3 and self.ui.verbose:
736 if len(args) >= 3 and self.ui.verbose:
733 desc = _("repository tip rolled back to revision %s"
737 desc = _("repository tip rolled back to revision %s"
734 " (undo %s: %s)\n") % (
738 " (undo %s: %s)\n") % (
735 int(args[0]) - 1, args[1], args[2])
739 int(args[0]) - 1, args[1], args[2])
736 elif len(args) >= 2:
740 elif len(args) >= 2:
737 desc = _("repository tip rolled back to revision %s"
741 desc = _("repository tip rolled back to revision %s"
738 " (undo %s)\n") % (
742 " (undo %s)\n") % (
739 int(args[0]) - 1, args[1])
743 int(args[0]) - 1, args[1])
740 except IOError:
744 except IOError:
741 desc = _("rolling back unknown transaction\n")
745 desc = _("rolling back unknown transaction\n")
742 self.ui.status(desc)
746 self.ui.status(desc)
743 if dryrun:
747 if dryrun:
744 return
748 return
745 transaction.rollback(self.sopener, self.sjoin("undo"),
749 transaction.rollback(self.sopener, self.sjoin("undo"),
746 self.ui.warn)
750 self.ui.warn)
747 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
751 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
748 if os.path.exists(self.join('undo.bookmarks')):
752 if os.path.exists(self.join('undo.bookmarks')):
749 util.rename(self.join('undo.bookmarks'),
753 util.rename(self.join('undo.bookmarks'),
750 self.join('bookmarks'))
754 self.join('bookmarks'))
751 try:
755 try:
752 branch = self.opener.read("undo.branch")
756 branch = self.opener.read("undo.branch")
753 self.dirstate.setbranch(branch)
757 self.dirstate.setbranch(branch)
754 except IOError:
758 except IOError:
755 self.ui.warn(_("named branch could not be reset, "
759 self.ui.warn(_("named branch could not be reset, "
756 "current branch is still: %s\n")
760 "current branch is still: %s\n")
757 % self.dirstate.branch())
761 % self.dirstate.branch())
758 self.invalidate()
762 self.invalidate()
759 self.dirstate.invalidate()
763 self.dirstate.invalidate()
760 self.destroyed()
764 self.destroyed()
761 parents = tuple([p.rev() for p in self.parents()])
765 parents = tuple([p.rev() for p in self.parents()])
762 if len(parents) > 1:
766 if len(parents) > 1:
763 self.ui.status(_("working directory now based on "
767 self.ui.status(_("working directory now based on "
764 "revisions %d and %d\n") % parents)
768 "revisions %d and %d\n") % parents)
765 else:
769 else:
766 self.ui.status(_("working directory now based on "
770 self.ui.status(_("working directory now based on "
767 "revision %d\n") % parents)
771 "revision %d\n") % parents)
768 else:
772 else:
769 self.ui.warn(_("no rollback information available\n"))
773 self.ui.warn(_("no rollback information available\n"))
770 return 1
774 return 1
771 finally:
775 finally:
772 release(lock, wlock)
776 release(lock, wlock)
773
777
774 def invalidatecaches(self):
778 def invalidatecaches(self):
775 self._tags = None
779 self._tags = None
776 self._tagtypes = None
780 self._tagtypes = None
777 self.nodetagscache = None
781 self.nodetagscache = None
778 self._branchcache = None # in UTF-8
782 self._branchcache = None # in UTF-8
779 self._branchcachetip = None
783 self._branchcachetip = None
780
784
781 def invalidate(self):
785 def invalidate(self):
782 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
786 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
783 if a in self.__dict__:
787 if a in self.__dict__:
784 delattr(self, a)
788 delattr(self, a)
785 self.invalidatecaches()
789 self.invalidatecaches()
786
790
787 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
791 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
788 try:
792 try:
789 l = lock.lock(lockname, 0, releasefn, desc=desc)
793 l = lock.lock(lockname, 0, releasefn, desc=desc)
790 except error.LockHeld, inst:
794 except error.LockHeld, inst:
791 if not wait:
795 if not wait:
792 raise
796 raise
793 self.ui.warn(_("waiting for lock on %s held by %r\n") %
797 self.ui.warn(_("waiting for lock on %s held by %r\n") %
794 (desc, inst.locker))
798 (desc, inst.locker))
795 # default to 600 seconds timeout
799 # default to 600 seconds timeout
796 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
800 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
797 releasefn, desc=desc)
801 releasefn, desc=desc)
798 if acquirefn:
802 if acquirefn:
799 acquirefn()
803 acquirefn()
800 return l
804 return l
801
805
802 def lock(self, wait=True):
806 def lock(self, wait=True):
803 '''Lock the repository store (.hg/store) and return a weak reference
807 '''Lock the repository store (.hg/store) and return a weak reference
804 to the lock. Use this before modifying the store (e.g. committing or
808 to the lock. Use this before modifying the store (e.g. committing or
805 stripping). If you are opening a transaction, get a lock as well.)'''
809 stripping). If you are opening a transaction, get a lock as well.)'''
806 l = self._lockref and self._lockref()
810 l = self._lockref and self._lockref()
807 if l is not None and l.held:
811 if l is not None and l.held:
808 l.lock()
812 l.lock()
809 return l
813 return l
810
814
811 l = self._lock(self.sjoin("lock"), wait, self.store.write,
815 l = self._lock(self.sjoin("lock"), wait, self.store.write,
812 self.invalidate, _('repository %s') % self.origroot)
816 self.invalidate, _('repository %s') % self.origroot)
813 self._lockref = weakref.ref(l)
817 self._lockref = weakref.ref(l)
814 return l
818 return l
815
819
816 def wlock(self, wait=True):
820 def wlock(self, wait=True):
817 '''Lock the non-store parts of the repository (everything under
821 '''Lock the non-store parts of the repository (everything under
818 .hg except .hg/store) and return a weak reference to the lock.
822 .hg except .hg/store) and return a weak reference to the lock.
819 Use this before modifying files in .hg.'''
823 Use this before modifying files in .hg.'''
820 l = self._wlockref and self._wlockref()
824 l = self._wlockref and self._wlockref()
821 if l is not None and l.held:
825 if l is not None and l.held:
822 l.lock()
826 l.lock()
823 return l
827 return l
824
828
825 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
829 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
826 self.dirstate.invalidate, _('working directory of %s') %
830 self.dirstate.invalidate, _('working directory of %s') %
827 self.origroot)
831 self.origroot)
828 self._wlockref = weakref.ref(l)
832 self._wlockref = weakref.ref(l)
829 return l
833 return l
830
834
831 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
835 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
832 """
836 """
833 commit an individual file as part of a larger transaction
837 commit an individual file as part of a larger transaction
834 """
838 """
835
839
836 fname = fctx.path()
840 fname = fctx.path()
837 text = fctx.data()
841 text = fctx.data()
838 flog = self.file(fname)
842 flog = self.file(fname)
839 fparent1 = manifest1.get(fname, nullid)
843 fparent1 = manifest1.get(fname, nullid)
840 fparent2 = fparent2o = manifest2.get(fname, nullid)
844 fparent2 = fparent2o = manifest2.get(fname, nullid)
841
845
842 meta = {}
846 meta = {}
843 copy = fctx.renamed()
847 copy = fctx.renamed()
844 if copy and copy[0] != fname:
848 if copy and copy[0] != fname:
845 # Mark the new revision of this file as a copy of another
849 # Mark the new revision of this file as a copy of another
846 # file. This copy data will effectively act as a parent
850 # file. This copy data will effectively act as a parent
847 # of this new revision. If this is a merge, the first
851 # of this new revision. If this is a merge, the first
848 # parent will be the nullid (meaning "look up the copy data")
852 # parent will be the nullid (meaning "look up the copy data")
849 # and the second one will be the other parent. For example:
853 # and the second one will be the other parent. For example:
850 #
854 #
851 # 0 --- 1 --- 3 rev1 changes file foo
855 # 0 --- 1 --- 3 rev1 changes file foo
852 # \ / rev2 renames foo to bar and changes it
856 # \ / rev2 renames foo to bar and changes it
853 # \- 2 -/ rev3 should have bar with all changes and
857 # \- 2 -/ rev3 should have bar with all changes and
854 # should record that bar descends from
858 # should record that bar descends from
855 # bar in rev2 and foo in rev1
859 # bar in rev2 and foo in rev1
856 #
860 #
857 # this allows this merge to succeed:
861 # this allows this merge to succeed:
858 #
862 #
859 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
863 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
860 # \ / merging rev3 and rev4 should use bar@rev2
864 # \ / merging rev3 and rev4 should use bar@rev2
861 # \- 2 --- 4 as the merge base
865 # \- 2 --- 4 as the merge base
862 #
866 #
863
867
864 cfname = copy[0]
868 cfname = copy[0]
865 crev = manifest1.get(cfname)
869 crev = manifest1.get(cfname)
866 newfparent = fparent2
870 newfparent = fparent2
867
871
868 if manifest2: # branch merge
872 if manifest2: # branch merge
869 if fparent2 == nullid or crev is None: # copied on remote side
873 if fparent2 == nullid or crev is None: # copied on remote side
870 if cfname in manifest2:
874 if cfname in manifest2:
871 crev = manifest2[cfname]
875 crev = manifest2[cfname]
872 newfparent = fparent1
876 newfparent = fparent1
873
877
874 # find source in nearest ancestor if we've lost track
878 # find source in nearest ancestor if we've lost track
875 if not crev:
879 if not crev:
876 self.ui.debug(" %s: searching for copy revision for %s\n" %
880 self.ui.debug(" %s: searching for copy revision for %s\n" %
877 (fname, cfname))
881 (fname, cfname))
878 for ancestor in self[None].ancestors():
882 for ancestor in self[None].ancestors():
879 if cfname in ancestor:
883 if cfname in ancestor:
880 crev = ancestor[cfname].filenode()
884 crev = ancestor[cfname].filenode()
881 break
885 break
882
886
883 if crev:
887 if crev:
884 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
888 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
885 meta["copy"] = cfname
889 meta["copy"] = cfname
886 meta["copyrev"] = hex(crev)
890 meta["copyrev"] = hex(crev)
887 fparent1, fparent2 = nullid, newfparent
891 fparent1, fparent2 = nullid, newfparent
888 else:
892 else:
889 self.ui.warn(_("warning: can't find ancestor for '%s' "
893 self.ui.warn(_("warning: can't find ancestor for '%s' "
890 "copied from '%s'!\n") % (fname, cfname))
894 "copied from '%s'!\n") % (fname, cfname))
891
895
892 elif fparent2 != nullid:
896 elif fparent2 != nullid:
893 # is one parent an ancestor of the other?
897 # is one parent an ancestor of the other?
894 fparentancestor = flog.ancestor(fparent1, fparent2)
898 fparentancestor = flog.ancestor(fparent1, fparent2)
895 if fparentancestor == fparent1:
899 if fparentancestor == fparent1:
896 fparent1, fparent2 = fparent2, nullid
900 fparent1, fparent2 = fparent2, nullid
897 elif fparentancestor == fparent2:
901 elif fparentancestor == fparent2:
898 fparent2 = nullid
902 fparent2 = nullid
899
903
900 # is the file changed?
904 # is the file changed?
901 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
905 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
902 changelist.append(fname)
906 changelist.append(fname)
903 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
907 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
904
908
905 # are just the flags changed during merge?
909 # are just the flags changed during merge?
906 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
910 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
907 changelist.append(fname)
911 changelist.append(fname)
908
912
909 return fparent1
913 return fparent1
910
914
911 def commit(self, text="", user=None, date=None, match=None, force=False,
915 def commit(self, text="", user=None, date=None, match=None, force=False,
912 editor=False, extra={}):
916 editor=False, extra={}):
913 """Add a new revision to current repository.
917 """Add a new revision to current repository.
914
918
915 Revision information is gathered from the working directory,
919 Revision information is gathered from the working directory,
916 match can be used to filter the committed files. If editor is
920 match can be used to filter the committed files. If editor is
917 supplied, it is called to get a commit message.
921 supplied, it is called to get a commit message.
918 """
922 """
919
923
920 def fail(f, msg):
924 def fail(f, msg):
921 raise util.Abort('%s: %s' % (f, msg))
925 raise util.Abort('%s: %s' % (f, msg))
922
926
923 if not match:
927 if not match:
924 match = matchmod.always(self.root, '')
928 match = matchmod.always(self.root, '')
925
929
926 if not force:
930 if not force:
927 vdirs = []
931 vdirs = []
928 match.dir = vdirs.append
932 match.dir = vdirs.append
929 match.bad = fail
933 match.bad = fail
930
934
931 wlock = self.wlock()
935 wlock = self.wlock()
932 try:
936 try:
933 wctx = self[None]
937 wctx = self[None]
934 merge = len(wctx.parents()) > 1
938 merge = len(wctx.parents()) > 1
935
939
936 if (not force and merge and match and
940 if (not force and merge and match and
937 (match.files() or match.anypats())):
941 (match.files() or match.anypats())):
938 raise util.Abort(_('cannot partially commit a merge '
942 raise util.Abort(_('cannot partially commit a merge '
939 '(do not specify files or patterns)'))
943 '(do not specify files or patterns)'))
940
944
941 changes = self.status(match=match, clean=force)
945 changes = self.status(match=match, clean=force)
942 if force:
946 if force:
943 changes[0].extend(changes[6]) # mq may commit unchanged files
947 changes[0].extend(changes[6]) # mq may commit unchanged files
944
948
945 # check subrepos
949 # check subrepos
946 subs = []
950 subs = []
947 removedsubs = set()
951 removedsubs = set()
948 for p in wctx.parents():
952 for p in wctx.parents():
949 removedsubs.update(s for s in p.substate if match(s))
953 removedsubs.update(s for s in p.substate if match(s))
950 for s in wctx.substate:
954 for s in wctx.substate:
951 removedsubs.discard(s)
955 removedsubs.discard(s)
952 if match(s) and wctx.sub(s).dirty():
956 if match(s) and wctx.sub(s).dirty():
953 subs.append(s)
957 subs.append(s)
954 if (subs or removedsubs):
958 if (subs or removedsubs):
955 if (not match('.hgsub') and
959 if (not match('.hgsub') and
956 '.hgsub' in (wctx.modified() + wctx.added())):
960 '.hgsub' in (wctx.modified() + wctx.added())):
957 raise util.Abort(_("can't commit subrepos without .hgsub"))
961 raise util.Abort(_("can't commit subrepos without .hgsub"))
958 if '.hgsubstate' not in changes[0]:
962 if '.hgsubstate' not in changes[0]:
959 changes[0].insert(0, '.hgsubstate')
963 changes[0].insert(0, '.hgsubstate')
960
964
961 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
965 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
962 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
966 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
963 if changedsubs:
967 if changedsubs:
964 raise util.Abort(_("uncommitted changes in subrepo %s")
968 raise util.Abort(_("uncommitted changes in subrepo %s")
965 % changedsubs[0])
969 % changedsubs[0])
966
970
967 # make sure all explicit patterns are matched
971 # make sure all explicit patterns are matched
968 if not force and match.files():
972 if not force and match.files():
969 matched = set(changes[0] + changes[1] + changes[2])
973 matched = set(changes[0] + changes[1] + changes[2])
970
974
971 for f in match.files():
975 for f in match.files():
972 if f == '.' or f in matched or f in wctx.substate:
976 if f == '.' or f in matched or f in wctx.substate:
973 continue
977 continue
974 if f in changes[3]: # missing
978 if f in changes[3]: # missing
975 fail(f, _('file not found!'))
979 fail(f, _('file not found!'))
976 if f in vdirs: # visited directory
980 if f in vdirs: # visited directory
977 d = f + '/'
981 d = f + '/'
978 for mf in matched:
982 for mf in matched:
979 if mf.startswith(d):
983 if mf.startswith(d):
980 break
984 break
981 else:
985 else:
982 fail(f, _("no match under directory!"))
986 fail(f, _("no match under directory!"))
983 elif f not in self.dirstate:
987 elif f not in self.dirstate:
984 fail(f, _("file not tracked!"))
988 fail(f, _("file not tracked!"))
985
989
986 if (not force and not extra.get("close") and not merge
990 if (not force and not extra.get("close") and not merge
987 and not (changes[0] or changes[1] or changes[2])
991 and not (changes[0] or changes[1] or changes[2])
988 and wctx.branch() == wctx.p1().branch()):
992 and wctx.branch() == wctx.p1().branch()):
989 return None
993 return None
990
994
991 ms = mergemod.mergestate(self)
995 ms = mergemod.mergestate(self)
992 for f in changes[0]:
996 for f in changes[0]:
993 if f in ms and ms[f] == 'u':
997 if f in ms and ms[f] == 'u':
994 raise util.Abort(_("unresolved merge conflicts "
998 raise util.Abort(_("unresolved merge conflicts "
995 "(see hg help resolve)"))
999 "(see hg help resolve)"))
996
1000
997 cctx = context.workingctx(self, text, user, date, extra, changes)
1001 cctx = context.workingctx(self, text, user, date, extra, changes)
998 if editor:
1002 if editor:
999 cctx._text = editor(self, cctx, subs)
1003 cctx._text = editor(self, cctx, subs)
1000 edited = (text != cctx._text)
1004 edited = (text != cctx._text)
1001
1005
1002 # commit subs
1006 # commit subs
1003 if subs or removedsubs:
1007 if subs or removedsubs:
1004 state = wctx.substate.copy()
1008 state = wctx.substate.copy()
1005 for s in sorted(subs):
1009 for s in sorted(subs):
1006 sub = wctx.sub(s)
1010 sub = wctx.sub(s)
1007 self.ui.status(_('committing subrepository %s\n') %
1011 self.ui.status(_('committing subrepository %s\n') %
1008 subrepo.subrelpath(sub))
1012 subrepo.subrelpath(sub))
1009 sr = sub.commit(cctx._text, user, date)
1013 sr = sub.commit(cctx._text, user, date)
1010 state[s] = (state[s][0], sr)
1014 state[s] = (state[s][0], sr)
1011 subrepo.writestate(self, state)
1015 subrepo.writestate(self, state)
1012
1016
1013 # Save commit message in case this transaction gets rolled back
1017 # Save commit message in case this transaction gets rolled back
1014 # (e.g. by a pretxncommit hook). Leave the content alone on
1018 # (e.g. by a pretxncommit hook). Leave the content alone on
1015 # the assumption that the user will use the same editor again.
1019 # the assumption that the user will use the same editor again.
1016 msgfile = self.opener('last-message.txt', 'wb')
1020 msgfile = self.opener('last-message.txt', 'wb')
1017 msgfile.write(cctx._text)
1021 msgfile.write(cctx._text)
1018 msgfile.close()
1022 msgfile.close()
1019
1023
1020 p1, p2 = self.dirstate.parents()
1024 p1, p2 = self.dirstate.parents()
1021 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1025 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1022 try:
1026 try:
1023 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1027 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1024 ret = self.commitctx(cctx, True)
1028 ret = self.commitctx(cctx, True)
1025 except:
1029 except:
1026 if edited:
1030 if edited:
1027 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1031 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1028 self.ui.write(
1032 self.ui.write(
1029 _('note: commit message saved in %s\n') % msgfn)
1033 _('note: commit message saved in %s\n') % msgfn)
1030 raise
1034 raise
1031
1035
1032 # update bookmarks, dirstate and mergestate
1036 # update bookmarks, dirstate and mergestate
1033 bookmarks.update(self, p1, ret)
1037 bookmarks.update(self, p1, ret)
1034 for f in changes[0] + changes[1]:
1038 for f in changes[0] + changes[1]:
1035 self.dirstate.normal(f)
1039 self.dirstate.normal(f)
1036 for f in changes[2]:
1040 for f in changes[2]:
1037 self.dirstate.forget(f)
1041 self.dirstate.forget(f)
1038 self.dirstate.setparents(ret)
1042 self.dirstate.setparents(ret)
1039 ms.reset()
1043 ms.reset()
1040 finally:
1044 finally:
1041 wlock.release()
1045 wlock.release()
1042
1046
1043 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1047 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1044 return ret
1048 return ret
1045
1049
1046 def commitctx(self, ctx, error=False):
1050 def commitctx(self, ctx, error=False):
1047 """Add a new revision to current repository.
1051 """Add a new revision to current repository.
1048 Revision information is passed via the context argument.
1052 Revision information is passed via the context argument.
1049 """
1053 """
1050
1054
1051 tr = lock = None
1055 tr = lock = None
1052 removed = list(ctx.removed())
1056 removed = list(ctx.removed())
1053 p1, p2 = ctx.p1(), ctx.p2()
1057 p1, p2 = ctx.p1(), ctx.p2()
1054 user = ctx.user()
1058 user = ctx.user()
1055
1059
1056 lock = self.lock()
1060 lock = self.lock()
1057 try:
1061 try:
1058 tr = self.transaction("commit")
1062 tr = self.transaction("commit")
1059 trp = weakref.proxy(tr)
1063 trp = weakref.proxy(tr)
1060
1064
1061 if ctx.files():
1065 if ctx.files():
1062 m1 = p1.manifest().copy()
1066 m1 = p1.manifest().copy()
1063 m2 = p2.manifest()
1067 m2 = p2.manifest()
1064
1068
1065 # check in files
1069 # check in files
1066 new = {}
1070 new = {}
1067 changed = []
1071 changed = []
1068 linkrev = len(self)
1072 linkrev = len(self)
1069 for f in sorted(ctx.modified() + ctx.added()):
1073 for f in sorted(ctx.modified() + ctx.added()):
1070 self.ui.note(f + "\n")
1074 self.ui.note(f + "\n")
1071 try:
1075 try:
1072 fctx = ctx[f]
1076 fctx = ctx[f]
1073 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1077 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1074 changed)
1078 changed)
1075 m1.set(f, fctx.flags())
1079 m1.set(f, fctx.flags())
1076 except OSError, inst:
1080 except OSError, inst:
1077 self.ui.warn(_("trouble committing %s!\n") % f)
1081 self.ui.warn(_("trouble committing %s!\n") % f)
1078 raise
1082 raise
1079 except IOError, inst:
1083 except IOError, inst:
1080 errcode = getattr(inst, 'errno', errno.ENOENT)
1084 errcode = getattr(inst, 'errno', errno.ENOENT)
1081 if error or errcode and errcode != errno.ENOENT:
1085 if error or errcode and errcode != errno.ENOENT:
1082 self.ui.warn(_("trouble committing %s!\n") % f)
1086 self.ui.warn(_("trouble committing %s!\n") % f)
1083 raise
1087 raise
1084 else:
1088 else:
1085 removed.append(f)
1089 removed.append(f)
1086
1090
1087 # update manifest
1091 # update manifest
1088 m1.update(new)
1092 m1.update(new)
1089 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1093 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1090 drop = [f for f in removed if f in m1]
1094 drop = [f for f in removed if f in m1]
1091 for f in drop:
1095 for f in drop:
1092 del m1[f]
1096 del m1[f]
1093 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1097 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1094 p2.manifestnode(), (new, drop))
1098 p2.manifestnode(), (new, drop))
1095 files = changed + removed
1099 files = changed + removed
1096 else:
1100 else:
1097 mn = p1.manifestnode()
1101 mn = p1.manifestnode()
1098 files = []
1102 files = []
1099
1103
1100 # update changelog
1104 # update changelog
1101 self.changelog.delayupdate()
1105 self.changelog.delayupdate()
1102 n = self.changelog.add(mn, files, ctx.description(),
1106 n = self.changelog.add(mn, files, ctx.description(),
1103 trp, p1.node(), p2.node(),
1107 trp, p1.node(), p2.node(),
1104 user, ctx.date(), ctx.extra().copy())
1108 user, ctx.date(), ctx.extra().copy())
1105 p = lambda: self.changelog.writepending() and self.root or ""
1109 p = lambda: self.changelog.writepending() and self.root or ""
1106 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1110 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1107 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1111 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1108 parent2=xp2, pending=p)
1112 parent2=xp2, pending=p)
1109 self.changelog.finalize(trp)
1113 self.changelog.finalize(trp)
1110 tr.close()
1114 tr.close()
1111
1115
1112 if self._branchcache:
1116 if self._branchcache:
1113 self.updatebranchcache()
1117 self.updatebranchcache()
1114 return n
1118 return n
1115 finally:
1119 finally:
1116 if tr:
1120 if tr:
1117 tr.release()
1121 tr.release()
1118 lock.release()
1122 lock.release()
1119
1123
1120 def destroyed(self):
1124 def destroyed(self):
1121 '''Inform the repository that nodes have been destroyed.
1125 '''Inform the repository that nodes have been destroyed.
1122 Intended for use by strip and rollback, so there's a common
1126 Intended for use by strip and rollback, so there's a common
1123 place for anything that has to be done after destroying history.'''
1127 place for anything that has to be done after destroying history.'''
1124 # XXX it might be nice if we could take the list of destroyed
1128 # XXX it might be nice if we could take the list of destroyed
1125 # nodes, but I don't see an easy way for rollback() to do that
1129 # nodes, but I don't see an easy way for rollback() to do that
1126
1130
1127 # Ensure the persistent tag cache is updated. Doing it now
1131 # Ensure the persistent tag cache is updated. Doing it now
1128 # means that the tag cache only has to worry about destroyed
1132 # means that the tag cache only has to worry about destroyed
1129 # heads immediately after a strip/rollback. That in turn
1133 # heads immediately after a strip/rollback. That in turn
1130 # guarantees that "cachetip == currenttip" (comparing both rev
1134 # guarantees that "cachetip == currenttip" (comparing both rev
1131 # and node) always means no nodes have been added or destroyed.
1135 # and node) always means no nodes have been added or destroyed.
1132
1136
1133 # XXX this is suboptimal when qrefresh'ing: we strip the current
1137 # XXX this is suboptimal when qrefresh'ing: we strip the current
1134 # head, refresh the tag cache, then immediately add a new head.
1138 # head, refresh the tag cache, then immediately add a new head.
1135 # But I think doing it this way is necessary for the "instant
1139 # But I think doing it this way is necessary for the "instant
1136 # tag cache retrieval" case to work.
1140 # tag cache retrieval" case to work.
1137 self.invalidatecaches()
1141 self.invalidatecaches()
1138
1142
1139 def walk(self, match, node=None):
1143 def walk(self, match, node=None):
1140 '''
1144 '''
1141 walk recursively through the directory tree or a given
1145 walk recursively through the directory tree or a given
1142 changeset, finding all files matched by the match
1146 changeset, finding all files matched by the match
1143 function
1147 function
1144 '''
1148 '''
1145 return self[node].walk(match)
1149 return self[node].walk(match)
1146
1150
1147 def status(self, node1='.', node2=None, match=None,
1151 def status(self, node1='.', node2=None, match=None,
1148 ignored=False, clean=False, unknown=False,
1152 ignored=False, clean=False, unknown=False,
1149 listsubrepos=False):
1153 listsubrepos=False):
1150 """return status of files between two nodes or node and working directory
1154 """return status of files between two nodes or node and working directory
1151
1155
1152 If node1 is None, use the first dirstate parent instead.
1156 If node1 is None, use the first dirstate parent instead.
1153 If node2 is None, compare node1 with working directory.
1157 If node2 is None, compare node1 with working directory.
1154 """
1158 """
1155
1159
1156 def mfmatches(ctx):
1160 def mfmatches(ctx):
1157 mf = ctx.manifest().copy()
1161 mf = ctx.manifest().copy()
1158 for fn in mf.keys():
1162 for fn in mf.keys():
1159 if not match(fn):
1163 if not match(fn):
1160 del mf[fn]
1164 del mf[fn]
1161 return mf
1165 return mf
1162
1166
1163 if isinstance(node1, context.changectx):
1167 if isinstance(node1, context.changectx):
1164 ctx1 = node1
1168 ctx1 = node1
1165 else:
1169 else:
1166 ctx1 = self[node1]
1170 ctx1 = self[node1]
1167 if isinstance(node2, context.changectx):
1171 if isinstance(node2, context.changectx):
1168 ctx2 = node2
1172 ctx2 = node2
1169 else:
1173 else:
1170 ctx2 = self[node2]
1174 ctx2 = self[node2]
1171
1175
1172 working = ctx2.rev() is None
1176 working = ctx2.rev() is None
1173 parentworking = working and ctx1 == self['.']
1177 parentworking = working and ctx1 == self['.']
1174 match = match or matchmod.always(self.root, self.getcwd())
1178 match = match or matchmod.always(self.root, self.getcwd())
1175 listignored, listclean, listunknown = ignored, clean, unknown
1179 listignored, listclean, listunknown = ignored, clean, unknown
1176
1180
1177 # load earliest manifest first for caching reasons
1181 # load earliest manifest first for caching reasons
1178 if not working and ctx2.rev() < ctx1.rev():
1182 if not working and ctx2.rev() < ctx1.rev():
1179 ctx2.manifest()
1183 ctx2.manifest()
1180
1184
1181 if not parentworking:
1185 if not parentworking:
1182 def bad(f, msg):
1186 def bad(f, msg):
1183 if f not in ctx1:
1187 if f not in ctx1:
1184 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1188 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1185 match.bad = bad
1189 match.bad = bad
1186
1190
1187 if working: # we need to scan the working dir
1191 if working: # we need to scan the working dir
1188 subrepos = []
1192 subrepos = []
1189 if '.hgsub' in self.dirstate:
1193 if '.hgsub' in self.dirstate:
1190 subrepos = ctx1.substate.keys()
1194 subrepos = ctx1.substate.keys()
1191 s = self.dirstate.status(match, subrepos, listignored,
1195 s = self.dirstate.status(match, subrepos, listignored,
1192 listclean, listunknown)
1196 listclean, listunknown)
1193 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1197 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1194
1198
1195 # check for any possibly clean files
1199 # check for any possibly clean files
1196 if parentworking and cmp:
1200 if parentworking and cmp:
1197 fixup = []
1201 fixup = []
1198 # do a full compare of any files that might have changed
1202 # do a full compare of any files that might have changed
1199 for f in sorted(cmp):
1203 for f in sorted(cmp):
1200 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1204 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1201 or ctx1[f].cmp(ctx2[f])):
1205 or ctx1[f].cmp(ctx2[f])):
1202 modified.append(f)
1206 modified.append(f)
1203 else:
1207 else:
1204 fixup.append(f)
1208 fixup.append(f)
1205
1209
1206 # update dirstate for files that are actually clean
1210 # update dirstate for files that are actually clean
1207 if fixup:
1211 if fixup:
1208 if listclean:
1212 if listclean:
1209 clean += fixup
1213 clean += fixup
1210
1214
1211 try:
1215 try:
1212 # updating the dirstate is optional
1216 # updating the dirstate is optional
1213 # so we don't wait on the lock
1217 # so we don't wait on the lock
1214 wlock = self.wlock(False)
1218 wlock = self.wlock(False)
1215 try:
1219 try:
1216 for f in fixup:
1220 for f in fixup:
1217 self.dirstate.normal(f)
1221 self.dirstate.normal(f)
1218 finally:
1222 finally:
1219 wlock.release()
1223 wlock.release()
1220 except error.LockError:
1224 except error.LockError:
1221 pass
1225 pass
1222
1226
1223 if not parentworking:
1227 if not parentworking:
1224 mf1 = mfmatches(ctx1)
1228 mf1 = mfmatches(ctx1)
1225 if working:
1229 if working:
1226 # we are comparing working dir against non-parent
1230 # we are comparing working dir against non-parent
1227 # generate a pseudo-manifest for the working dir
1231 # generate a pseudo-manifest for the working dir
1228 mf2 = mfmatches(self['.'])
1232 mf2 = mfmatches(self['.'])
1229 for f in cmp + modified + added:
1233 for f in cmp + modified + added:
1230 mf2[f] = None
1234 mf2[f] = None
1231 mf2.set(f, ctx2.flags(f))
1235 mf2.set(f, ctx2.flags(f))
1232 for f in removed:
1236 for f in removed:
1233 if f in mf2:
1237 if f in mf2:
1234 del mf2[f]
1238 del mf2[f]
1235 else:
1239 else:
1236 # we are comparing two revisions
1240 # we are comparing two revisions
1237 deleted, unknown, ignored = [], [], []
1241 deleted, unknown, ignored = [], [], []
1238 mf2 = mfmatches(ctx2)
1242 mf2 = mfmatches(ctx2)
1239
1243
1240 modified, added, clean = [], [], []
1244 modified, added, clean = [], [], []
1241 for fn in mf2:
1245 for fn in mf2:
1242 if fn in mf1:
1246 if fn in mf1:
1243 if (fn not in deleted and
1247 if (fn not in deleted and
1244 (mf1.flags(fn) != mf2.flags(fn) or
1248 (mf1.flags(fn) != mf2.flags(fn) or
1245 (mf1[fn] != mf2[fn] and
1249 (mf1[fn] != mf2[fn] and
1246 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1250 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1247 modified.append(fn)
1251 modified.append(fn)
1248 elif listclean:
1252 elif listclean:
1249 clean.append(fn)
1253 clean.append(fn)
1250 del mf1[fn]
1254 del mf1[fn]
1251 elif fn not in deleted:
1255 elif fn not in deleted:
1252 added.append(fn)
1256 added.append(fn)
1253 removed = mf1.keys()
1257 removed = mf1.keys()
1254
1258
1255 r = modified, added, removed, deleted, unknown, ignored, clean
1259 r = modified, added, removed, deleted, unknown, ignored, clean
1256
1260
1257 if listsubrepos:
1261 if listsubrepos:
1258 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1262 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1259 if working:
1263 if working:
1260 rev2 = None
1264 rev2 = None
1261 else:
1265 else:
1262 rev2 = ctx2.substate[subpath][1]
1266 rev2 = ctx2.substate[subpath][1]
1263 try:
1267 try:
1264 submatch = matchmod.narrowmatcher(subpath, match)
1268 submatch = matchmod.narrowmatcher(subpath, match)
1265 s = sub.status(rev2, match=submatch, ignored=listignored,
1269 s = sub.status(rev2, match=submatch, ignored=listignored,
1266 clean=listclean, unknown=listunknown,
1270 clean=listclean, unknown=listunknown,
1267 listsubrepos=True)
1271 listsubrepos=True)
1268 for rfiles, sfiles in zip(r, s):
1272 for rfiles, sfiles in zip(r, s):
1269 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1273 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1270 except error.LookupError:
1274 except error.LookupError:
1271 self.ui.status(_("skipping missing subrepository: %s\n")
1275 self.ui.status(_("skipping missing subrepository: %s\n")
1272 % subpath)
1276 % subpath)
1273
1277
1274 for l in r:
1278 for l in r:
1275 l.sort()
1279 l.sort()
1276 return r
1280 return r
1277
1281
1278 def heads(self, start=None):
1282 def heads(self, start=None):
1279 heads = self.changelog.heads(start)
1283 heads = self.changelog.heads(start)
1280 # sort the output in rev descending order
1284 # sort the output in rev descending order
1281 return sorted(heads, key=self.changelog.rev, reverse=True)
1285 return sorted(heads, key=self.changelog.rev, reverse=True)
1282
1286
1283 def branchheads(self, branch=None, start=None, closed=False):
1287 def branchheads(self, branch=None, start=None, closed=False):
1284 '''return a (possibly filtered) list of heads for the given branch
1288 '''return a (possibly filtered) list of heads for the given branch
1285
1289
1286 Heads are returned in topological order, from newest to oldest.
1290 Heads are returned in topological order, from newest to oldest.
1287 If branch is None, use the dirstate branch.
1291 If branch is None, use the dirstate branch.
1288 If start is not None, return only heads reachable from start.
1292 If start is not None, return only heads reachable from start.
1289 If closed is True, return heads that are marked as closed as well.
1293 If closed is True, return heads that are marked as closed as well.
1290 '''
1294 '''
1291 if branch is None:
1295 if branch is None:
1292 branch = self[None].branch()
1296 branch = self[None].branch()
1293 branches = self.branchmap()
1297 branches = self.branchmap()
1294 if branch not in branches:
1298 if branch not in branches:
1295 return []
1299 return []
1296 # the cache returns heads ordered lowest to highest
1300 # the cache returns heads ordered lowest to highest
1297 bheads = list(reversed(branches[branch]))
1301 bheads = list(reversed(branches[branch]))
1298 if start is not None:
1302 if start is not None:
1299 # filter out the heads that cannot be reached from startrev
1303 # filter out the heads that cannot be reached from startrev
1300 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1304 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1301 bheads = [h for h in bheads if h in fbheads]
1305 bheads = [h for h in bheads if h in fbheads]
1302 if not closed:
1306 if not closed:
1303 bheads = [h for h in bheads if
1307 bheads = [h for h in bheads if
1304 ('close' not in self.changelog.read(h)[5])]
1308 ('close' not in self.changelog.read(h)[5])]
1305 return bheads
1309 return bheads
1306
1310
1307 def branches(self, nodes):
1311 def branches(self, nodes):
1308 if not nodes:
1312 if not nodes:
1309 nodes = [self.changelog.tip()]
1313 nodes = [self.changelog.tip()]
1310 b = []
1314 b = []
1311 for n in nodes:
1315 for n in nodes:
1312 t = n
1316 t = n
1313 while 1:
1317 while 1:
1314 p = self.changelog.parents(n)
1318 p = self.changelog.parents(n)
1315 if p[1] != nullid or p[0] == nullid:
1319 if p[1] != nullid or p[0] == nullid:
1316 b.append((t, n, p[0], p[1]))
1320 b.append((t, n, p[0], p[1]))
1317 break
1321 break
1318 n = p[0]
1322 n = p[0]
1319 return b
1323 return b
1320
1324
1321 def between(self, pairs):
1325 def between(self, pairs):
1322 r = []
1326 r = []
1323
1327
1324 for top, bottom in pairs:
1328 for top, bottom in pairs:
1325 n, l, i = top, [], 0
1329 n, l, i = top, [], 0
1326 f = 1
1330 f = 1
1327
1331
1328 while n != bottom and n != nullid:
1332 while n != bottom and n != nullid:
1329 p = self.changelog.parents(n)[0]
1333 p = self.changelog.parents(n)[0]
1330 if i == f:
1334 if i == f:
1331 l.append(n)
1335 l.append(n)
1332 f = f * 2
1336 f = f * 2
1333 n = p
1337 n = p
1334 i += 1
1338 i += 1
1335
1339
1336 r.append(l)
1340 r.append(l)
1337
1341
1338 return r
1342 return r
1339
1343
1340 def pull(self, remote, heads=None, force=False):
1344 def pull(self, remote, heads=None, force=False):
1341 lock = self.lock()
1345 lock = self.lock()
1342 try:
1346 try:
1343 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1347 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1344 force=force)
1348 force=force)
1345 common, fetch, rheads = tmp
1349 common, fetch, rheads = tmp
1346 if not fetch:
1350 if not fetch:
1347 self.ui.status(_("no changes found\n"))
1351 self.ui.status(_("no changes found\n"))
1348 result = 0
1352 result = 0
1349 else:
1353 else:
1350 if heads is None and list(common) == [nullid]:
1354 if heads is None and list(common) == [nullid]:
1351 self.ui.status(_("requesting all changes\n"))
1355 self.ui.status(_("requesting all changes\n"))
1352 elif heads is None and remote.capable('changegroupsubset'):
1356 elif heads is None and remote.capable('changegroupsubset'):
1353 # issue1320, avoid a race if remote changed after discovery
1357 # issue1320, avoid a race if remote changed after discovery
1354 heads = rheads
1358 heads = rheads
1355
1359
1356 if remote.capable('getbundle'):
1360 if remote.capable('getbundle'):
1357 cg = remote.getbundle('pull', common=common,
1361 cg = remote.getbundle('pull', common=common,
1358 heads=heads or rheads)
1362 heads=heads or rheads)
1359 elif heads is None:
1363 elif heads is None:
1360 cg = remote.changegroup(fetch, 'pull')
1364 cg = remote.changegroup(fetch, 'pull')
1361 elif not remote.capable('changegroupsubset'):
1365 elif not remote.capable('changegroupsubset'):
1362 raise util.Abort(_("partial pull cannot be done because "
1366 raise util.Abort(_("partial pull cannot be done because "
1363 "other repository doesn't support "
1367 "other repository doesn't support "
1364 "changegroupsubset."))
1368 "changegroupsubset."))
1365 else:
1369 else:
1366 cg = remote.changegroupsubset(fetch, heads, 'pull')
1370 cg = remote.changegroupsubset(fetch, heads, 'pull')
1367 result = self.addchangegroup(cg, 'pull', remote.url(),
1371 result = self.addchangegroup(cg, 'pull', remote.url(),
1368 lock=lock)
1372 lock=lock)
1369 finally:
1373 finally:
1370 lock.release()
1374 lock.release()
1371
1375
1372 return result
1376 return result
1373
1377
1374 def checkpush(self, force, revs):
1378 def checkpush(self, force, revs):
1375 """Extensions can override this function if additional checks have
1379 """Extensions can override this function if additional checks have
1376 to be performed before pushing, or call it if they override push
1380 to be performed before pushing, or call it if they override push
1377 command.
1381 command.
1378 """
1382 """
1379 pass
1383 pass
1380
1384
1381 def push(self, remote, force=False, revs=None, newbranch=False):
1385 def push(self, remote, force=False, revs=None, newbranch=False):
1382 '''Push outgoing changesets (limited by revs) from the current
1386 '''Push outgoing changesets (limited by revs) from the current
1383 repository to remote. Return an integer:
1387 repository to remote. Return an integer:
1384 - 0 means HTTP error *or* nothing to push
1388 - 0 means HTTP error *or* nothing to push
1385 - 1 means we pushed and remote head count is unchanged *or*
1389 - 1 means we pushed and remote head count is unchanged *or*
1386 we have outgoing changesets but refused to push
1390 we have outgoing changesets but refused to push
1387 - other values as described by addchangegroup()
1391 - other values as described by addchangegroup()
1388 '''
1392 '''
1389 # there are two ways to push to remote repo:
1393 # there are two ways to push to remote repo:
1390 #
1394 #
1391 # addchangegroup assumes local user can lock remote
1395 # addchangegroup assumes local user can lock remote
1392 # repo (local filesystem, old ssh servers).
1396 # repo (local filesystem, old ssh servers).
1393 #
1397 #
1394 # unbundle assumes local user cannot lock remote repo (new ssh
1398 # unbundle assumes local user cannot lock remote repo (new ssh
1395 # servers, http servers).
1399 # servers, http servers).
1396
1400
1397 self.checkpush(force, revs)
1401 self.checkpush(force, revs)
1398 lock = None
1402 lock = None
1399 unbundle = remote.capable('unbundle')
1403 unbundle = remote.capable('unbundle')
1400 if not unbundle:
1404 if not unbundle:
1401 lock = remote.lock()
1405 lock = remote.lock()
1402 try:
1406 try:
1403 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1407 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1404 newbranch)
1408 newbranch)
1405 ret = remote_heads
1409 ret = remote_heads
1406 if cg is not None:
1410 if cg is not None:
1407 if unbundle:
1411 if unbundle:
1408 # local repo finds heads on server, finds out what
1412 # local repo finds heads on server, finds out what
1409 # revs it must push. once revs transferred, if server
1413 # revs it must push. once revs transferred, if server
1410 # finds it has different heads (someone else won
1414 # finds it has different heads (someone else won
1411 # commit/push race), server aborts.
1415 # commit/push race), server aborts.
1412 if force:
1416 if force:
1413 remote_heads = ['force']
1417 remote_heads = ['force']
1414 # ssh: return remote's addchangegroup()
1418 # ssh: return remote's addchangegroup()
1415 # http: return remote's addchangegroup() or 0 for error
1419 # http: return remote's addchangegroup() or 0 for error
1416 ret = remote.unbundle(cg, remote_heads, 'push')
1420 ret = remote.unbundle(cg, remote_heads, 'push')
1417 else:
1421 else:
1418 # we return an integer indicating remote head count change
1422 # we return an integer indicating remote head count change
1419 ret = remote.addchangegroup(cg, 'push', self.url(),
1423 ret = remote.addchangegroup(cg, 'push', self.url(),
1420 lock=lock)
1424 lock=lock)
1421 finally:
1425 finally:
1422 if lock is not None:
1426 if lock is not None:
1423 lock.release()
1427 lock.release()
1424
1428
1425 self.ui.debug("checking for updated bookmarks\n")
1429 self.ui.debug("checking for updated bookmarks\n")
1426 rb = remote.listkeys('bookmarks')
1430 rb = remote.listkeys('bookmarks')
1427 for k in rb.keys():
1431 for k in rb.keys():
1428 if k in self._bookmarks:
1432 if k in self._bookmarks:
1429 nr, nl = rb[k], hex(self._bookmarks[k])
1433 nr, nl = rb[k], hex(self._bookmarks[k])
1430 if nr in self:
1434 if nr in self:
1431 cr = self[nr]
1435 cr = self[nr]
1432 cl = self[nl]
1436 cl = self[nl]
1433 if cl in cr.descendants():
1437 if cl in cr.descendants():
1434 r = remote.pushkey('bookmarks', k, nr, nl)
1438 r = remote.pushkey('bookmarks', k, nr, nl)
1435 if r:
1439 if r:
1436 self.ui.status(_("updating bookmark %s\n") % k)
1440 self.ui.status(_("updating bookmark %s\n") % k)
1437 else:
1441 else:
1438 self.ui.warn(_('updating bookmark %s'
1442 self.ui.warn(_('updating bookmark %s'
1439 ' failed!\n') % k)
1443 ' failed!\n') % k)
1440
1444
1441 return ret
1445 return ret
1442
1446
1443 def changegroupinfo(self, nodes, source):
1447 def changegroupinfo(self, nodes, source):
1444 if self.ui.verbose or source == 'bundle':
1448 if self.ui.verbose or source == 'bundle':
1445 self.ui.status(_("%d changesets found\n") % len(nodes))
1449 self.ui.status(_("%d changesets found\n") % len(nodes))
1446 if self.ui.debugflag:
1450 if self.ui.debugflag:
1447 self.ui.debug("list of changesets:\n")
1451 self.ui.debug("list of changesets:\n")
1448 for node in nodes:
1452 for node in nodes:
1449 self.ui.debug("%s\n" % hex(node))
1453 self.ui.debug("%s\n" % hex(node))
1450
1454
1451 def changegroupsubset(self, bases, heads, source):
1455 def changegroupsubset(self, bases, heads, source):
1452 """Compute a changegroup consisting of all the nodes that are
1456 """Compute a changegroup consisting of all the nodes that are
1453 descendents of any of the bases and ancestors of any of the heads.
1457 descendents of any of the bases and ancestors of any of the heads.
1454 Return a chunkbuffer object whose read() method will return
1458 Return a chunkbuffer object whose read() method will return
1455 successive changegroup chunks.
1459 successive changegroup chunks.
1456
1460
1457 It is fairly complex as determining which filenodes and which
1461 It is fairly complex as determining which filenodes and which
1458 manifest nodes need to be included for the changeset to be complete
1462 manifest nodes need to be included for the changeset to be complete
1459 is non-trivial.
1463 is non-trivial.
1460
1464
1461 Another wrinkle is doing the reverse, figuring out which changeset in
1465 Another wrinkle is doing the reverse, figuring out which changeset in
1462 the changegroup a particular filenode or manifestnode belongs to.
1466 the changegroup a particular filenode or manifestnode belongs to.
1463 """
1467 """
1464 cl = self.changelog
1468 cl = self.changelog
1465 if not bases:
1469 if not bases:
1466 bases = [nullid]
1470 bases = [nullid]
1467 csets, bases, heads = cl.nodesbetween(bases, heads)
1471 csets, bases, heads = cl.nodesbetween(bases, heads)
1468 # We assume that all ancestors of bases are known
1472 # We assume that all ancestors of bases are known
1469 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1473 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1470 return self._changegroupsubset(common, csets, heads, source)
1474 return self._changegroupsubset(common, csets, heads, source)
1471
1475
1472 def getbundle(self, source, heads=None, common=None):
1476 def getbundle(self, source, heads=None, common=None):
1473 """Like changegroupsubset, but returns the set difference between the
1477 """Like changegroupsubset, but returns the set difference between the
1474 ancestors of heads and the ancestors common.
1478 ancestors of heads and the ancestors common.
1475
1479
1476 If heads is None, use the local heads. If common is None, use [nullid].
1480 If heads is None, use the local heads. If common is None, use [nullid].
1477
1481
1478 The nodes in common might not all be known locally due to the way the
1482 The nodes in common might not all be known locally due to the way the
1479 current discovery protocol works.
1483 current discovery protocol works.
1480 """
1484 """
1481 cl = self.changelog
1485 cl = self.changelog
1482 if common:
1486 if common:
1483 nm = cl.nodemap
1487 nm = cl.nodemap
1484 common = [n for n in common if n in nm]
1488 common = [n for n in common if n in nm]
1485 else:
1489 else:
1486 common = [nullid]
1490 common = [nullid]
1487 if not heads:
1491 if not heads:
1488 heads = cl.heads()
1492 heads = cl.heads()
1489 common, missing = cl.findcommonmissing(common, heads)
1493 common, missing = cl.findcommonmissing(common, heads)
1490 if not missing:
1494 if not missing:
1491 return None
1495 return None
1492 return self._changegroupsubset(common, missing, heads, source)
1496 return self._changegroupsubset(common, missing, heads, source)
1493
1497
1494 def _changegroupsubset(self, commonrevs, csets, heads, source):
1498 def _changegroupsubset(self, commonrevs, csets, heads, source):
1495
1499
1496 cl = self.changelog
1500 cl = self.changelog
1497 mf = self.manifest
1501 mf = self.manifest
1498 mfs = {} # needed manifests
1502 mfs = {} # needed manifests
1499 fnodes = {} # needed file nodes
1503 fnodes = {} # needed file nodes
1500 changedfiles = set()
1504 changedfiles = set()
1501 fstate = ['', {}]
1505 fstate = ['', {}]
1502 count = [0]
1506 count = [0]
1503
1507
1504 # can we go through the fast path ?
1508 # can we go through the fast path ?
1505 heads.sort()
1509 heads.sort()
1506 if heads == sorted(self.heads()):
1510 if heads == sorted(self.heads()):
1507 return self._changegroup(csets, source)
1511 return self._changegroup(csets, source)
1508
1512
1509 # slow path
1513 # slow path
1510 self.hook('preoutgoing', throw=True, source=source)
1514 self.hook('preoutgoing', throw=True, source=source)
1511 self.changegroupinfo(csets, source)
1515 self.changegroupinfo(csets, source)
1512
1516
1513 # filter any nodes that claim to be part of the known set
1517 # filter any nodes that claim to be part of the known set
1514 def prune(revlog, missing):
1518 def prune(revlog, missing):
1515 for n in missing:
1519 for n in missing:
1516 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1520 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1517 yield n
1521 yield n
1518
1522
1519 def lookup(revlog, x):
1523 def lookup(revlog, x):
1520 if revlog == cl:
1524 if revlog == cl:
1521 c = cl.read(x)
1525 c = cl.read(x)
1522 changedfiles.update(c[3])
1526 changedfiles.update(c[3])
1523 mfs.setdefault(c[0], x)
1527 mfs.setdefault(c[0], x)
1524 count[0] += 1
1528 count[0] += 1
1525 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1529 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1526 return x
1530 return x
1527 elif revlog == mf:
1531 elif revlog == mf:
1528 clnode = mfs[x]
1532 clnode = mfs[x]
1529 mdata = mf.readfast(x)
1533 mdata = mf.readfast(x)
1530 for f in changedfiles:
1534 for f in changedfiles:
1531 if f in mdata:
1535 if f in mdata:
1532 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1536 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1533 count[0] += 1
1537 count[0] += 1
1534 self.ui.progress(_('bundling'), count[0],
1538 self.ui.progress(_('bundling'), count[0],
1535 unit=_('manifests'), total=len(mfs))
1539 unit=_('manifests'), total=len(mfs))
1536 return mfs[x]
1540 return mfs[x]
1537 else:
1541 else:
1538 self.ui.progress(
1542 self.ui.progress(
1539 _('bundling'), count[0], item=fstate[0],
1543 _('bundling'), count[0], item=fstate[0],
1540 unit=_('files'), total=len(changedfiles))
1544 unit=_('files'), total=len(changedfiles))
1541 return fstate[1][x]
1545 return fstate[1][x]
1542
1546
1543 bundler = changegroup.bundle10(lookup)
1547 bundler = changegroup.bundle10(lookup)
1544
1548
1545 def gengroup():
1549 def gengroup():
1546 # Create a changenode group generator that will call our functions
1550 # Create a changenode group generator that will call our functions
1547 # back to lookup the owning changenode and collect information.
1551 # back to lookup the owning changenode and collect information.
1548 for chunk in cl.group(csets, bundler):
1552 for chunk in cl.group(csets, bundler):
1549 yield chunk
1553 yield chunk
1550 self.ui.progress(_('bundling'), None)
1554 self.ui.progress(_('bundling'), None)
1551
1555
1552 # Create a generator for the manifestnodes that calls our lookup
1556 # Create a generator for the manifestnodes that calls our lookup
1553 # and data collection functions back.
1557 # and data collection functions back.
1554 count[0] = 0
1558 count[0] = 0
1555 for chunk in mf.group(prune(mf, mfs), bundler):
1559 for chunk in mf.group(prune(mf, mfs), bundler):
1556 yield chunk
1560 yield chunk
1557 self.ui.progress(_('bundling'), None)
1561 self.ui.progress(_('bundling'), None)
1558
1562
1559 mfs.clear()
1563 mfs.clear()
1560
1564
1561 # Go through all our files in order sorted by name.
1565 # Go through all our files in order sorted by name.
1562 count[0] = 0
1566 count[0] = 0
1563 for fname in sorted(changedfiles):
1567 for fname in sorted(changedfiles):
1564 filerevlog = self.file(fname)
1568 filerevlog = self.file(fname)
1565 if not len(filerevlog):
1569 if not len(filerevlog):
1566 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 raise util.Abort(_("empty or missing revlog for %s") % fname)
1567 fstate[0] = fname
1571 fstate[0] = fname
1568 fstate[1] = fnodes.pop(fname, {})
1572 fstate[1] = fnodes.pop(fname, {})
1569 first = True
1573 first = True
1570
1574
1571 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1575 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1572 bundler):
1576 bundler):
1573 if first:
1577 if first:
1574 if chunk == bundler.close():
1578 if chunk == bundler.close():
1575 break
1579 break
1576 count[0] += 1
1580 count[0] += 1
1577 yield bundler.fileheader(fname)
1581 yield bundler.fileheader(fname)
1578 first = False
1582 first = False
1579 yield chunk
1583 yield chunk
1580 # Signal that no more groups are left.
1584 # Signal that no more groups are left.
1581 yield bundler.close()
1585 yield bundler.close()
1582 self.ui.progress(_('bundling'), None)
1586 self.ui.progress(_('bundling'), None)
1583
1587
1584 if csets:
1588 if csets:
1585 self.hook('outgoing', node=hex(csets[0]), source=source)
1589 self.hook('outgoing', node=hex(csets[0]), source=source)
1586
1590
1587 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1591 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1588
1592
1589 def changegroup(self, basenodes, source):
1593 def changegroup(self, basenodes, source):
1590 # to avoid a race we use changegroupsubset() (issue1320)
1594 # to avoid a race we use changegroupsubset() (issue1320)
1591 return self.changegroupsubset(basenodes, self.heads(), source)
1595 return self.changegroupsubset(basenodes, self.heads(), source)
1592
1596
1593 def _changegroup(self, nodes, source):
1597 def _changegroup(self, nodes, source):
1594 """Compute the changegroup of all nodes that we have that a recipient
1598 """Compute the changegroup of all nodes that we have that a recipient
1595 doesn't. Return a chunkbuffer object whose read() method will return
1599 doesn't. Return a chunkbuffer object whose read() method will return
1596 successive changegroup chunks.
1600 successive changegroup chunks.
1597
1601
1598 This is much easier than the previous function as we can assume that
1602 This is much easier than the previous function as we can assume that
1599 the recipient has any changenode we aren't sending them.
1603 the recipient has any changenode we aren't sending them.
1600
1604
1601 nodes is the set of nodes to send"""
1605 nodes is the set of nodes to send"""
1602
1606
1603 cl = self.changelog
1607 cl = self.changelog
1604 mf = self.manifest
1608 mf = self.manifest
1605 mfs = {}
1609 mfs = {}
1606 changedfiles = set()
1610 changedfiles = set()
1607 fstate = ['']
1611 fstate = ['']
1608 count = [0]
1612 count = [0]
1609
1613
1610 self.hook('preoutgoing', throw=True, source=source)
1614 self.hook('preoutgoing', throw=True, source=source)
1611 self.changegroupinfo(nodes, source)
1615 self.changegroupinfo(nodes, source)
1612
1616
1613 revset = set([cl.rev(n) for n in nodes])
1617 revset = set([cl.rev(n) for n in nodes])
1614
1618
1615 def gennodelst(log):
1619 def gennodelst(log):
1616 for r in log:
1620 for r in log:
1617 if log.linkrev(r) in revset:
1621 if log.linkrev(r) in revset:
1618 yield log.node(r)
1622 yield log.node(r)
1619
1623
1620 def lookup(revlog, x):
1624 def lookup(revlog, x):
1621 if revlog == cl:
1625 if revlog == cl:
1622 c = cl.read(x)
1626 c = cl.read(x)
1623 changedfiles.update(c[3])
1627 changedfiles.update(c[3])
1624 mfs.setdefault(c[0], x)
1628 mfs.setdefault(c[0], x)
1625 count[0] += 1
1629 count[0] += 1
1626 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1630 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1627 return x
1631 return x
1628 elif revlog == mf:
1632 elif revlog == mf:
1629 count[0] += 1
1633 count[0] += 1
1630 self.ui.progress(_('bundling'), count[0],
1634 self.ui.progress(_('bundling'), count[0],
1631 unit=_('manifests'), total=len(mfs))
1635 unit=_('manifests'), total=len(mfs))
1632 return cl.node(revlog.linkrev(revlog.rev(x)))
1636 return cl.node(revlog.linkrev(revlog.rev(x)))
1633 else:
1637 else:
1634 self.ui.progress(
1638 self.ui.progress(
1635 _('bundling'), count[0], item=fstate[0],
1639 _('bundling'), count[0], item=fstate[0],
1636 total=len(changedfiles), unit=_('files'))
1640 total=len(changedfiles), unit=_('files'))
1637 return cl.node(revlog.linkrev(revlog.rev(x)))
1641 return cl.node(revlog.linkrev(revlog.rev(x)))
1638
1642
1639 bundler = changegroup.bundle10(lookup)
1643 bundler = changegroup.bundle10(lookup)
1640
1644
1641 def gengroup():
1645 def gengroup():
1642 '''yield a sequence of changegroup chunks (strings)'''
1646 '''yield a sequence of changegroup chunks (strings)'''
1643 # construct a list of all changed files
1647 # construct a list of all changed files
1644
1648
1645 for chunk in cl.group(nodes, bundler):
1649 for chunk in cl.group(nodes, bundler):
1646 yield chunk
1650 yield chunk
1647 self.ui.progress(_('bundling'), None)
1651 self.ui.progress(_('bundling'), None)
1648
1652
1649 count[0] = 0
1653 count[0] = 0
1650 for chunk in mf.group(gennodelst(mf), bundler):
1654 for chunk in mf.group(gennodelst(mf), bundler):
1651 yield chunk
1655 yield chunk
1652 self.ui.progress(_('bundling'), None)
1656 self.ui.progress(_('bundling'), None)
1653
1657
1654 count[0] = 0
1658 count[0] = 0
1655 for fname in sorted(changedfiles):
1659 for fname in sorted(changedfiles):
1656 filerevlog = self.file(fname)
1660 filerevlog = self.file(fname)
1657 if not len(filerevlog):
1661 if not len(filerevlog):
1658 raise util.Abort(_("empty or missing revlog for %s") % fname)
1662 raise util.Abort(_("empty or missing revlog for %s") % fname)
1659 fstate[0] = fname
1663 fstate[0] = fname
1660 first = True
1664 first = True
1661 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1665 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1662 if first:
1666 if first:
1663 if chunk == bundler.close():
1667 if chunk == bundler.close():
1664 break
1668 break
1665 count[0] += 1
1669 count[0] += 1
1666 yield bundler.fileheader(fname)
1670 yield bundler.fileheader(fname)
1667 first = False
1671 first = False
1668 yield chunk
1672 yield chunk
1669 yield bundler.close()
1673 yield bundler.close()
1670 self.ui.progress(_('bundling'), None)
1674 self.ui.progress(_('bundling'), None)
1671
1675
1672 if nodes:
1676 if nodes:
1673 self.hook('outgoing', node=hex(nodes[0]), source=source)
1677 self.hook('outgoing', node=hex(nodes[0]), source=source)
1674
1678
1675 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1679 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1676
1680
1677 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1681 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1678 """Add the changegroup returned by source.read() to this repo.
1682 """Add the changegroup returned by source.read() to this repo.
1679 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1683 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1680 the URL of the repo where this changegroup is coming from.
1684 the URL of the repo where this changegroup is coming from.
1681 If lock is not None, the function takes ownership of the lock
1685 If lock is not None, the function takes ownership of the lock
1682 and releases it after the changegroup is added.
1686 and releases it after the changegroup is added.
1683
1687
1684 Return an integer summarizing the change to this repo:
1688 Return an integer summarizing the change to this repo:
1685 - nothing changed or no source: 0
1689 - nothing changed or no source: 0
1686 - more heads than before: 1+added heads (2..n)
1690 - more heads than before: 1+added heads (2..n)
1687 - fewer heads than before: -1-removed heads (-2..-n)
1691 - fewer heads than before: -1-removed heads (-2..-n)
1688 - number of heads stays the same: 1
1692 - number of heads stays the same: 1
1689 """
1693 """
1690 def csmap(x):
1694 def csmap(x):
1691 self.ui.debug("add changeset %s\n" % short(x))
1695 self.ui.debug("add changeset %s\n" % short(x))
1692 return len(cl)
1696 return len(cl)
1693
1697
1694 def revmap(x):
1698 def revmap(x):
1695 return cl.rev(x)
1699 return cl.rev(x)
1696
1700
1697 if not source:
1701 if not source:
1698 return 0
1702 return 0
1699
1703
1700 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1704 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1701
1705
1702 changesets = files = revisions = 0
1706 changesets = files = revisions = 0
1703 efiles = set()
1707 efiles = set()
1704
1708
1705 # write changelog data to temp files so concurrent readers will not see
1709 # write changelog data to temp files so concurrent readers will not see
1706 # inconsistent view
1710 # inconsistent view
1707 cl = self.changelog
1711 cl = self.changelog
1708 cl.delayupdate()
1712 cl.delayupdate()
1709 oldheads = cl.heads()
1713 oldheads = cl.heads()
1710
1714
1711 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1715 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1712 try:
1716 try:
1713 trp = weakref.proxy(tr)
1717 trp = weakref.proxy(tr)
1714 # pull off the changeset group
1718 # pull off the changeset group
1715 self.ui.status(_("adding changesets\n"))
1719 self.ui.status(_("adding changesets\n"))
1716 clstart = len(cl)
1720 clstart = len(cl)
1717 class prog(object):
1721 class prog(object):
1718 step = _('changesets')
1722 step = _('changesets')
1719 count = 1
1723 count = 1
1720 ui = self.ui
1724 ui = self.ui
1721 total = None
1725 total = None
1722 def __call__(self):
1726 def __call__(self):
1723 self.ui.progress(self.step, self.count, unit=_('chunks'),
1727 self.ui.progress(self.step, self.count, unit=_('chunks'),
1724 total=self.total)
1728 total=self.total)
1725 self.count += 1
1729 self.count += 1
1726 pr = prog()
1730 pr = prog()
1727 source.callback = pr
1731 source.callback = pr
1728
1732
1729 source.changelogheader()
1733 source.changelogheader()
1730 if (cl.addgroup(source, csmap, trp) is None
1734 if (cl.addgroup(source, csmap, trp) is None
1731 and not emptyok):
1735 and not emptyok):
1732 raise util.Abort(_("received changelog group is empty"))
1736 raise util.Abort(_("received changelog group is empty"))
1733 clend = len(cl)
1737 clend = len(cl)
1734 changesets = clend - clstart
1738 changesets = clend - clstart
1735 for c in xrange(clstart, clend):
1739 for c in xrange(clstart, clend):
1736 efiles.update(self[c].files())
1740 efiles.update(self[c].files())
1737 efiles = len(efiles)
1741 efiles = len(efiles)
1738 self.ui.progress(_('changesets'), None)
1742 self.ui.progress(_('changesets'), None)
1739
1743
1740 # pull off the manifest group
1744 # pull off the manifest group
1741 self.ui.status(_("adding manifests\n"))
1745 self.ui.status(_("adding manifests\n"))
1742 pr.step = _('manifests')
1746 pr.step = _('manifests')
1743 pr.count = 1
1747 pr.count = 1
1744 pr.total = changesets # manifests <= changesets
1748 pr.total = changesets # manifests <= changesets
1745 # no need to check for empty manifest group here:
1749 # no need to check for empty manifest group here:
1746 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1750 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1747 # no new manifest will be created and the manifest group will
1751 # no new manifest will be created and the manifest group will
1748 # be empty during the pull
1752 # be empty during the pull
1749 source.manifestheader()
1753 source.manifestheader()
1750 self.manifest.addgroup(source, revmap, trp)
1754 self.manifest.addgroup(source, revmap, trp)
1751 self.ui.progress(_('manifests'), None)
1755 self.ui.progress(_('manifests'), None)
1752
1756
1753 needfiles = {}
1757 needfiles = {}
1754 if self.ui.configbool('server', 'validate', default=False):
1758 if self.ui.configbool('server', 'validate', default=False):
1755 # validate incoming csets have their manifests
1759 # validate incoming csets have their manifests
1756 for cset in xrange(clstart, clend):
1760 for cset in xrange(clstart, clend):
1757 mfest = self.changelog.read(self.changelog.node(cset))[0]
1761 mfest = self.changelog.read(self.changelog.node(cset))[0]
1758 mfest = self.manifest.readdelta(mfest)
1762 mfest = self.manifest.readdelta(mfest)
1759 # store file nodes we must see
1763 # store file nodes we must see
1760 for f, n in mfest.iteritems():
1764 for f, n in mfest.iteritems():
1761 needfiles.setdefault(f, set()).add(n)
1765 needfiles.setdefault(f, set()).add(n)
1762
1766
1763 # process the files
1767 # process the files
1764 self.ui.status(_("adding file changes\n"))
1768 self.ui.status(_("adding file changes\n"))
1765 pr.step = 'files'
1769 pr.step = 'files'
1766 pr.count = 1
1770 pr.count = 1
1767 pr.total = efiles
1771 pr.total = efiles
1768 source.callback = None
1772 source.callback = None
1769
1773
1770 while 1:
1774 while 1:
1771 chunkdata = source.filelogheader()
1775 chunkdata = source.filelogheader()
1772 if not chunkdata:
1776 if not chunkdata:
1773 break
1777 break
1774 f = chunkdata["filename"]
1778 f = chunkdata["filename"]
1775 self.ui.debug("adding %s revisions\n" % f)
1779 self.ui.debug("adding %s revisions\n" % f)
1776 pr()
1780 pr()
1777 fl = self.file(f)
1781 fl = self.file(f)
1778 o = len(fl)
1782 o = len(fl)
1779 if fl.addgroup(source, revmap, trp) is None:
1783 if fl.addgroup(source, revmap, trp) is None:
1780 raise util.Abort(_("received file revlog group is empty"))
1784 raise util.Abort(_("received file revlog group is empty"))
1781 revisions += len(fl) - o
1785 revisions += len(fl) - o
1782 files += 1
1786 files += 1
1783 if f in needfiles:
1787 if f in needfiles:
1784 needs = needfiles[f]
1788 needs = needfiles[f]
1785 for new in xrange(o, len(fl)):
1789 for new in xrange(o, len(fl)):
1786 n = fl.node(new)
1790 n = fl.node(new)
1787 if n in needs:
1791 if n in needs:
1788 needs.remove(n)
1792 needs.remove(n)
1789 if not needs:
1793 if not needs:
1790 del needfiles[f]
1794 del needfiles[f]
1791 self.ui.progress(_('files'), None)
1795 self.ui.progress(_('files'), None)
1792
1796
1793 for f, needs in needfiles.iteritems():
1797 for f, needs in needfiles.iteritems():
1794 fl = self.file(f)
1798 fl = self.file(f)
1795 for n in needs:
1799 for n in needs:
1796 try:
1800 try:
1797 fl.rev(n)
1801 fl.rev(n)
1798 except error.LookupError:
1802 except error.LookupError:
1799 raise util.Abort(
1803 raise util.Abort(
1800 _('missing file data for %s:%s - run hg verify') %
1804 _('missing file data for %s:%s - run hg verify') %
1801 (f, hex(n)))
1805 (f, hex(n)))
1802
1806
1803 dh = 0
1807 dh = 0
1804 if oldheads:
1808 if oldheads:
1805 heads = cl.heads()
1809 heads = cl.heads()
1806 dh = len(heads) - len(oldheads)
1810 dh = len(heads) - len(oldheads)
1807 for h in heads:
1811 for h in heads:
1808 if h not in oldheads and 'close' in self[h].extra():
1812 if h not in oldheads and 'close' in self[h].extra():
1809 dh -= 1
1813 dh -= 1
1810 htext = ""
1814 htext = ""
1811 if dh:
1815 if dh:
1812 htext = _(" (%+d heads)") % dh
1816 htext = _(" (%+d heads)") % dh
1813
1817
1814 self.ui.status(_("added %d changesets"
1818 self.ui.status(_("added %d changesets"
1815 " with %d changes to %d files%s\n")
1819 " with %d changes to %d files%s\n")
1816 % (changesets, revisions, files, htext))
1820 % (changesets, revisions, files, htext))
1817
1821
1818 if changesets > 0:
1822 if changesets > 0:
1819 p = lambda: cl.writepending() and self.root or ""
1823 p = lambda: cl.writepending() and self.root or ""
1820 self.hook('pretxnchangegroup', throw=True,
1824 self.hook('pretxnchangegroup', throw=True,
1821 node=hex(cl.node(clstart)), source=srctype,
1825 node=hex(cl.node(clstart)), source=srctype,
1822 url=url, pending=p)
1826 url=url, pending=p)
1823
1827
1824 # make changelog see real files again
1828 # make changelog see real files again
1825 cl.finalize(trp)
1829 cl.finalize(trp)
1826
1830
1827 tr.close()
1831 tr.close()
1828 finally:
1832 finally:
1829 tr.release()
1833 tr.release()
1830 if lock:
1834 if lock:
1831 lock.release()
1835 lock.release()
1832
1836
1833 if changesets > 0:
1837 if changesets > 0:
1834 # forcefully update the on-disk branch cache
1838 # forcefully update the on-disk branch cache
1835 self.ui.debug("updating the branch cache\n")
1839 self.ui.debug("updating the branch cache\n")
1836 self.updatebranchcache()
1840 self.updatebranchcache()
1837 self.hook("changegroup", node=hex(cl.node(clstart)),
1841 self.hook("changegroup", node=hex(cl.node(clstart)),
1838 source=srctype, url=url)
1842 source=srctype, url=url)
1839
1843
1840 for i in xrange(clstart, clend):
1844 for i in xrange(clstart, clend):
1841 self.hook("incoming", node=hex(cl.node(i)),
1845 self.hook("incoming", node=hex(cl.node(i)),
1842 source=srctype, url=url)
1846 source=srctype, url=url)
1843
1847
1844 # never return 0 here:
1848 # never return 0 here:
1845 if dh < 0:
1849 if dh < 0:
1846 return dh - 1
1850 return dh - 1
1847 else:
1851 else:
1848 return dh + 1
1852 return dh + 1
1849
1853
1850 def stream_in(self, remote, requirements):
1854 def stream_in(self, remote, requirements):
1851 lock = self.lock()
1855 lock = self.lock()
1852 try:
1856 try:
1853 fp = remote.stream_out()
1857 fp = remote.stream_out()
1854 l = fp.readline()
1858 l = fp.readline()
1855 try:
1859 try:
1856 resp = int(l)
1860 resp = int(l)
1857 except ValueError:
1861 except ValueError:
1858 raise error.ResponseError(
1862 raise error.ResponseError(
1859 _('Unexpected response from remote server:'), l)
1863 _('Unexpected response from remote server:'), l)
1860 if resp == 1:
1864 if resp == 1:
1861 raise util.Abort(_('operation forbidden by server'))
1865 raise util.Abort(_('operation forbidden by server'))
1862 elif resp == 2:
1866 elif resp == 2:
1863 raise util.Abort(_('locking the remote repository failed'))
1867 raise util.Abort(_('locking the remote repository failed'))
1864 elif resp != 0:
1868 elif resp != 0:
1865 raise util.Abort(_('the server sent an unknown error code'))
1869 raise util.Abort(_('the server sent an unknown error code'))
1866 self.ui.status(_('streaming all changes\n'))
1870 self.ui.status(_('streaming all changes\n'))
1867 l = fp.readline()
1871 l = fp.readline()
1868 try:
1872 try:
1869 total_files, total_bytes = map(int, l.split(' ', 1))
1873 total_files, total_bytes = map(int, l.split(' ', 1))
1870 except (ValueError, TypeError):
1874 except (ValueError, TypeError):
1871 raise error.ResponseError(
1875 raise error.ResponseError(
1872 _('Unexpected response from remote server:'), l)
1876 _('Unexpected response from remote server:'), l)
1873 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 self.ui.status(_('%d files to transfer, %s of data\n') %
1874 (total_files, util.bytecount(total_bytes)))
1878 (total_files, util.bytecount(total_bytes)))
1875 start = time.time()
1879 start = time.time()
1876 for i in xrange(total_files):
1880 for i in xrange(total_files):
1877 # XXX doesn't support '\n' or '\r' in filenames
1881 # XXX doesn't support '\n' or '\r' in filenames
1878 l = fp.readline()
1882 l = fp.readline()
1879 try:
1883 try:
1880 name, size = l.split('\0', 1)
1884 name, size = l.split('\0', 1)
1881 size = int(size)
1885 size = int(size)
1882 except (ValueError, TypeError):
1886 except (ValueError, TypeError):
1883 raise error.ResponseError(
1887 raise error.ResponseError(
1884 _('Unexpected response from remote server:'), l)
1888 _('Unexpected response from remote server:'), l)
1885 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1886 # for backwards compat, name was partially encoded
1890 # for backwards compat, name was partially encoded
1887 ofp = self.sopener(store.decodedir(name), 'w')
1891 ofp = self.sopener(store.decodedir(name), 'w')
1888 for chunk in util.filechunkiter(fp, limit=size):
1892 for chunk in util.filechunkiter(fp, limit=size):
1889 ofp.write(chunk)
1893 ofp.write(chunk)
1890 ofp.close()
1894 ofp.close()
1891 elapsed = time.time() - start
1895 elapsed = time.time() - start
1892 if elapsed <= 0:
1896 if elapsed <= 0:
1893 elapsed = 0.001
1897 elapsed = 0.001
1894 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1898 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1895 (util.bytecount(total_bytes), elapsed,
1899 (util.bytecount(total_bytes), elapsed,
1896 util.bytecount(total_bytes / elapsed)))
1900 util.bytecount(total_bytes / elapsed)))
1897
1901
1898 # new requirements = old non-format requirements + new format-related
1902 # new requirements = old non-format requirements + new format-related
1899 # requirements from the streamed-in repository
1903 # requirements from the streamed-in repository
1900 requirements.update(set(self.requirements) - self.supportedformats)
1904 requirements.update(set(self.requirements) - self.supportedformats)
1901 self._applyrequirements(requirements)
1905 self._applyrequirements(requirements)
1902 self._writerequirements()
1906 self._writerequirements()
1903
1907
1904 self.invalidate()
1908 self.invalidate()
1905 return len(self.heads()) + 1
1909 return len(self.heads()) + 1
1906 finally:
1910 finally:
1907 lock.release()
1911 lock.release()
1908
1912
1909 def clone(self, remote, heads=[], stream=False):
1913 def clone(self, remote, heads=[], stream=False):
1910 '''clone remote repository.
1914 '''clone remote repository.
1911
1915
1912 keyword arguments:
1916 keyword arguments:
1913 heads: list of revs to clone (forces use of pull)
1917 heads: list of revs to clone (forces use of pull)
1914 stream: use streaming clone if possible'''
1918 stream: use streaming clone if possible'''
1915
1919
1916 # now, all clients that can request uncompressed clones can
1920 # now, all clients that can request uncompressed clones can
1917 # read repo formats supported by all servers that can serve
1921 # read repo formats supported by all servers that can serve
1918 # them.
1922 # them.
1919
1923
1920 # if revlog format changes, client will have to check version
1924 # if revlog format changes, client will have to check version
1921 # and format flags on "stream" capability, and use
1925 # and format flags on "stream" capability, and use
1922 # uncompressed only if compatible.
1926 # uncompressed only if compatible.
1923
1927
1924 if stream and not heads:
1928 if stream and not heads:
1925 # 'stream' means remote revlog format is revlogv1 only
1929 # 'stream' means remote revlog format is revlogv1 only
1926 if remote.capable('stream'):
1930 if remote.capable('stream'):
1927 return self.stream_in(remote, set(('revlogv1',)))
1931 return self.stream_in(remote, set(('revlogv1',)))
1928 # otherwise, 'streamreqs' contains the remote revlog format
1932 # otherwise, 'streamreqs' contains the remote revlog format
1929 streamreqs = remote.capable('streamreqs')
1933 streamreqs = remote.capable('streamreqs')
1930 if streamreqs:
1934 if streamreqs:
1931 streamreqs = set(streamreqs.split(','))
1935 streamreqs = set(streamreqs.split(','))
1932 # if we support it, stream in and adjust our requirements
1936 # if we support it, stream in and adjust our requirements
1933 if not streamreqs - self.supportedformats:
1937 if not streamreqs - self.supportedformats:
1934 return self.stream_in(remote, streamreqs)
1938 return self.stream_in(remote, streamreqs)
1935 return self.pull(remote, heads)
1939 return self.pull(remote, heads)
1936
1940
1937 def pushkey(self, namespace, key, old, new):
1941 def pushkey(self, namespace, key, old, new):
1938 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1942 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1939 old=old, new=new)
1943 old=old, new=new)
1940 ret = pushkey.push(self, namespace, key, old, new)
1944 ret = pushkey.push(self, namespace, key, old, new)
1941 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1945 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1942 ret=ret)
1946 ret=ret)
1943 return ret
1947 return ret
1944
1948
1945 def listkeys(self, namespace):
1949 def listkeys(self, namespace):
1946 self.hook('prelistkeys', throw=True, namespace=namespace)
1950 self.hook('prelistkeys', throw=True, namespace=namespace)
1947 values = pushkey.list(self, namespace)
1951 values = pushkey.list(self, namespace)
1948 self.hook('listkeys', namespace=namespace, values=values)
1952 self.hook('listkeys', namespace=namespace, values=values)
1949 return values
1953 return values
1950
1954
1951 def debugwireargs(self, one, two, three=None, four=None, five=None):
1955 def debugwireargs(self, one, two, three=None, four=None, five=None):
1952 '''used to test argument passing over the wire'''
1956 '''used to test argument passing over the wire'''
1953 return "%s %s %s %s %s" % (one, two, three, four, five)
1957 return "%s %s %s %s %s" % (one, two, three, four, five)
1954
1958
1955 # used to avoid circular references so destructors work
1959 # used to avoid circular references so destructors work
1956 def aftertrans(files):
1960 def aftertrans(files):
1957 renamefiles = [tuple(t) for t in files]
1961 renamefiles = [tuple(t) for t in files]
1958 def a():
1962 def a():
1959 for src, dest in renamefiles:
1963 for src, dest in renamefiles:
1960 util.rename(src, dest)
1964 util.rename(src, dest)
1961 return a
1965 return a
1962
1966
1963 def undoname(fn):
1967 def undoname(fn):
1964 base, name = os.path.split(fn)
1968 base, name = os.path.split(fn)
1965 assert name.startswith('journal')
1969 assert name.startswith('journal')
1966 return os.path.join(base, name.replace('journal', 'undo', 1))
1970 return os.path.join(base, name.replace('journal', 'undo', 1))
1967
1971
1968 def instance(ui, path, create):
1972 def instance(ui, path, create):
1969 return localrepository(ui, util.localpath(path), create)
1973 return localrepository(ui, util.localpath(path), create)
1970
1974
1971 def islocal(path):
1975 def islocal(path):
1972 return True
1976 return True
@@ -1,1249 +1,1259 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util
17 import ancestor, mdiff, parsers, error, util
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = None
221 self._basecache = None
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 if hasattr(opener, 'options') and 'defversion' in opener.options:
229 if hasattr(opener, 'options'):
230 v = opener.options['defversion']
230 if 'defversion' in opener.options:
231 if v & REVLOGNG:
231 v = opener.options['defversion']
232 v |= REVLOGNGINLINEDATA
232 if v & REVLOGNG:
233 v |= REVLOGNGINLINEDATA
234 if v & REVLOGNG and 'generaldelta' in opener.options:
235 v |= REVLOGGENERALDELTA
233
236
234 i = ''
237 i = ''
235 try:
238 try:
236 f = self.opener(self.indexfile)
239 f = self.opener(self.indexfile)
237 i = f.read()
240 i = f.read()
238 f.close()
241 f.close()
239 if len(i) > 0:
242 if len(i) > 0:
240 v = struct.unpack(versionformat, i[:4])[0]
243 v = struct.unpack(versionformat, i[:4])[0]
241 except IOError, inst:
244 except IOError, inst:
242 if inst.errno != errno.ENOENT:
245 if inst.errno != errno.ENOENT:
243 raise
246 raise
244
247
245 self.version = v
248 self.version = v
246 self._inline = v & REVLOGNGINLINEDATA
249 self._inline = v & REVLOGNGINLINEDATA
247 self._generaldelta = v & REVLOGGENERALDELTA
250 self._generaldelta = v & REVLOGGENERALDELTA
248 flags = v & ~0xFFFF
251 flags = v & ~0xFFFF
249 fmt = v & 0xFFFF
252 fmt = v & 0xFFFF
250 if fmt == REVLOGV0 and flags:
253 if fmt == REVLOGV0 and flags:
251 raise RevlogError(_("index %s unknown flags %#04x for format v0")
254 raise RevlogError(_("index %s unknown flags %#04x for format v0")
252 % (self.indexfile, flags >> 16))
255 % (self.indexfile, flags >> 16))
253 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
256 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
254 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
257 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
255 % (self.indexfile, flags >> 16))
258 % (self.indexfile, flags >> 16))
256 elif fmt > REVLOGNG:
259 elif fmt > REVLOGNG:
257 raise RevlogError(_("index %s unknown format %d")
260 raise RevlogError(_("index %s unknown format %d")
258 % (self.indexfile, fmt))
261 % (self.indexfile, fmt))
259
262
260 self._io = revlogio()
263 self._io = revlogio()
261 if self.version == REVLOGV0:
264 if self.version == REVLOGV0:
262 self._io = revlogoldio()
265 self._io = revlogoldio()
263 try:
266 try:
264 d = self._io.parseindex(i, self._inline)
267 d = self._io.parseindex(i, self._inline)
265 except (ValueError, IndexError):
268 except (ValueError, IndexError):
266 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
269 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
267 self.index, nodemap, self._chunkcache = d
270 self.index, nodemap, self._chunkcache = d
268 if nodemap is not None:
271 if nodemap is not None:
269 self.nodemap = self._nodecache = nodemap
272 self.nodemap = self._nodecache = nodemap
270 if not self._chunkcache:
273 if not self._chunkcache:
271 self._chunkclear()
274 self._chunkclear()
272
275
273 def tip(self):
276 def tip(self):
274 return self.node(len(self.index) - 2)
277 return self.node(len(self.index) - 2)
275 def __len__(self):
278 def __len__(self):
276 return len(self.index) - 1
279 return len(self.index) - 1
277 def __iter__(self):
280 def __iter__(self):
278 for i in xrange(len(self)):
281 for i in xrange(len(self)):
279 yield i
282 yield i
280
283
281 @util.propertycache
284 @util.propertycache
282 def nodemap(self):
285 def nodemap(self):
283 self.rev(self.node(0))
286 self.rev(self.node(0))
284 return self._nodecache
287 return self._nodecache
285
288
286 def rev(self, node):
289 def rev(self, node):
287 try:
290 try:
288 return self._nodecache[node]
291 return self._nodecache[node]
289 except KeyError:
292 except KeyError:
290 n = self._nodecache
293 n = self._nodecache
291 i = self.index
294 i = self.index
292 p = self._nodepos
295 p = self._nodepos
293 if p is None:
296 if p is None:
294 p = len(i) - 2
297 p = len(i) - 2
295 for r in xrange(p, -1, -1):
298 for r in xrange(p, -1, -1):
296 v = i[r][7]
299 v = i[r][7]
297 n[v] = r
300 n[v] = r
298 if v == node:
301 if v == node:
299 self._nodepos = r - 1
302 self._nodepos = r - 1
300 return r
303 return r
301 raise LookupError(node, self.indexfile, _('no node'))
304 raise LookupError(node, self.indexfile, _('no node'))
302
305
303 def node(self, rev):
306 def node(self, rev):
304 return self.index[rev][7]
307 return self.index[rev][7]
305 def linkrev(self, rev):
308 def linkrev(self, rev):
306 return self.index[rev][4]
309 return self.index[rev][4]
307 def parents(self, node):
310 def parents(self, node):
308 i = self.index
311 i = self.index
309 d = i[self.rev(node)]
312 d = i[self.rev(node)]
310 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
313 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
311 def parentrevs(self, rev):
314 def parentrevs(self, rev):
312 return self.index[rev][5:7]
315 return self.index[rev][5:7]
313 def start(self, rev):
316 def start(self, rev):
314 return int(self.index[rev][0] >> 16)
317 return int(self.index[rev][0] >> 16)
315 def end(self, rev):
318 def end(self, rev):
316 return self.start(rev) + self.length(rev)
319 return self.start(rev) + self.length(rev)
317 def length(self, rev):
320 def length(self, rev):
318 return self.index[rev][1]
321 return self.index[rev][1]
319 def chainbase(self, rev):
322 def chainbase(self, rev):
320 index = self.index
323 index = self.index
321 base = index[rev][3]
324 base = index[rev][3]
322 while base != rev:
325 while base != rev:
323 rev = base
326 rev = base
324 base = index[rev][3]
327 base = index[rev][3]
325 return base
328 return base
326 def flags(self, rev):
329 def flags(self, rev):
327 return self.index[rev][0] & 0xFFFF
330 return self.index[rev][0] & 0xFFFF
328 def rawsize(self, rev):
331 def rawsize(self, rev):
329 """return the length of the uncompressed text for a given revision"""
332 """return the length of the uncompressed text for a given revision"""
330 l = self.index[rev][2]
333 l = self.index[rev][2]
331 if l >= 0:
334 if l >= 0:
332 return l
335 return l
333
336
334 t = self.revision(self.node(rev))
337 t = self.revision(self.node(rev))
335 return len(t)
338 return len(t)
336 size = rawsize
339 size = rawsize
337
340
338 def reachable(self, node, stop=None):
341 def reachable(self, node, stop=None):
339 """return the set of all nodes ancestral to a given node, including
342 """return the set of all nodes ancestral to a given node, including
340 the node itself, stopping when stop is matched"""
343 the node itself, stopping when stop is matched"""
341 reachable = set((node,))
344 reachable = set((node,))
342 visit = [node]
345 visit = [node]
343 if stop:
346 if stop:
344 stopn = self.rev(stop)
347 stopn = self.rev(stop)
345 else:
348 else:
346 stopn = 0
349 stopn = 0
347 while visit:
350 while visit:
348 n = visit.pop(0)
351 n = visit.pop(0)
349 if n == stop:
352 if n == stop:
350 continue
353 continue
351 if n == nullid:
354 if n == nullid:
352 continue
355 continue
353 for p in self.parents(n):
356 for p in self.parents(n):
354 if self.rev(p) < stopn:
357 if self.rev(p) < stopn:
355 continue
358 continue
356 if p not in reachable:
359 if p not in reachable:
357 reachable.add(p)
360 reachable.add(p)
358 visit.append(p)
361 visit.append(p)
359 return reachable
362 return reachable
360
363
361 def ancestors(self, *revs):
364 def ancestors(self, *revs):
362 """Generate the ancestors of 'revs' in reverse topological order.
365 """Generate the ancestors of 'revs' in reverse topological order.
363
366
364 Yield a sequence of revision numbers starting with the parents
367 Yield a sequence of revision numbers starting with the parents
365 of each revision in revs, i.e., each revision is *not* considered
368 of each revision in revs, i.e., each revision is *not* considered
366 an ancestor of itself. Results are in breadth-first order:
369 an ancestor of itself. Results are in breadth-first order:
367 parents of each rev in revs, then parents of those, etc. Result
370 parents of each rev in revs, then parents of those, etc. Result
368 does not include the null revision."""
371 does not include the null revision."""
369 visit = list(revs)
372 visit = list(revs)
370 seen = set([nullrev])
373 seen = set([nullrev])
371 while visit:
374 while visit:
372 for parent in self.parentrevs(visit.pop(0)):
375 for parent in self.parentrevs(visit.pop(0)):
373 if parent not in seen:
376 if parent not in seen:
374 visit.append(parent)
377 visit.append(parent)
375 seen.add(parent)
378 seen.add(parent)
376 yield parent
379 yield parent
377
380
378 def descendants(self, *revs):
381 def descendants(self, *revs):
379 """Generate the descendants of 'revs' in revision order.
382 """Generate the descendants of 'revs' in revision order.
380
383
381 Yield a sequence of revision numbers starting with a child of
384 Yield a sequence of revision numbers starting with a child of
382 some rev in revs, i.e., each revision is *not* considered a
385 some rev in revs, i.e., each revision is *not* considered a
383 descendant of itself. Results are ordered by revision number (a
386 descendant of itself. Results are ordered by revision number (a
384 topological sort)."""
387 topological sort)."""
385 first = min(revs)
388 first = min(revs)
386 if first == nullrev:
389 if first == nullrev:
387 for i in self:
390 for i in self:
388 yield i
391 yield i
389 return
392 return
390
393
391 seen = set(revs)
394 seen = set(revs)
392 for i in xrange(first + 1, len(self)):
395 for i in xrange(first + 1, len(self)):
393 for x in self.parentrevs(i):
396 for x in self.parentrevs(i):
394 if x != nullrev and x in seen:
397 if x != nullrev and x in seen:
395 seen.add(i)
398 seen.add(i)
396 yield i
399 yield i
397 break
400 break
398
401
399 def findcommonmissing(self, common=None, heads=None):
402 def findcommonmissing(self, common=None, heads=None):
400 """Return a tuple of the ancestors of common and the ancestors of heads
403 """Return a tuple of the ancestors of common and the ancestors of heads
401 that are not ancestors of common.
404 that are not ancestors of common.
402
405
403 More specifically, the second element is a list of nodes N such that
406 More specifically, the second element is a list of nodes N such that
404 every N satisfies the following constraints:
407 every N satisfies the following constraints:
405
408
406 1. N is an ancestor of some node in 'heads'
409 1. N is an ancestor of some node in 'heads'
407 2. N is not an ancestor of any node in 'common'
410 2. N is not an ancestor of any node in 'common'
408
411
409 The list is sorted by revision number, meaning it is
412 The list is sorted by revision number, meaning it is
410 topologically sorted.
413 topologically sorted.
411
414
412 'heads' and 'common' are both lists of node IDs. If heads is
415 'heads' and 'common' are both lists of node IDs. If heads is
413 not supplied, uses all of the revlog's heads. If common is not
416 not supplied, uses all of the revlog's heads. If common is not
414 supplied, uses nullid."""
417 supplied, uses nullid."""
415 if common is None:
418 if common is None:
416 common = [nullid]
419 common = [nullid]
417 if heads is None:
420 if heads is None:
418 heads = self.heads()
421 heads = self.heads()
419
422
420 common = [self.rev(n) for n in common]
423 common = [self.rev(n) for n in common]
421 heads = [self.rev(n) for n in heads]
424 heads = [self.rev(n) for n in heads]
422
425
423 # we want the ancestors, but inclusive
426 # we want the ancestors, but inclusive
424 has = set(self.ancestors(*common))
427 has = set(self.ancestors(*common))
425 has.add(nullrev)
428 has.add(nullrev)
426 has.update(common)
429 has.update(common)
427
430
428 # take all ancestors from heads that aren't in has
431 # take all ancestors from heads that aren't in has
429 missing = set()
432 missing = set()
430 visit = [r for r in heads if r not in has]
433 visit = [r for r in heads if r not in has]
431 while visit:
434 while visit:
432 r = visit.pop(0)
435 r = visit.pop(0)
433 if r in missing:
436 if r in missing:
434 continue
437 continue
435 else:
438 else:
436 missing.add(r)
439 missing.add(r)
437 for p in self.parentrevs(r):
440 for p in self.parentrevs(r):
438 if p not in has:
441 if p not in has:
439 visit.append(p)
442 visit.append(p)
440 missing = list(missing)
443 missing = list(missing)
441 missing.sort()
444 missing.sort()
442 return has, [self.node(r) for r in missing]
445 return has, [self.node(r) for r in missing]
443
446
444 def findmissing(self, common=None, heads=None):
447 def findmissing(self, common=None, heads=None):
445 """Return the ancestors of heads that are not ancestors of common.
448 """Return the ancestors of heads that are not ancestors of common.
446
449
447 More specifically, return a list of nodes N such that every N
450 More specifically, return a list of nodes N such that every N
448 satisfies the following constraints:
451 satisfies the following constraints:
449
452
450 1. N is an ancestor of some node in 'heads'
453 1. N is an ancestor of some node in 'heads'
451 2. N is not an ancestor of any node in 'common'
454 2. N is not an ancestor of any node in 'common'
452
455
453 The list is sorted by revision number, meaning it is
456 The list is sorted by revision number, meaning it is
454 topologically sorted.
457 topologically sorted.
455
458
456 'heads' and 'common' are both lists of node IDs. If heads is
459 'heads' and 'common' are both lists of node IDs. If heads is
457 not supplied, uses all of the revlog's heads. If common is not
460 not supplied, uses all of the revlog's heads. If common is not
458 supplied, uses nullid."""
461 supplied, uses nullid."""
459 _common, missing = self.findcommonmissing(common, heads)
462 _common, missing = self.findcommonmissing(common, heads)
460 return missing
463 return missing
461
464
462 def nodesbetween(self, roots=None, heads=None):
465 def nodesbetween(self, roots=None, heads=None):
463 """Return a topological path from 'roots' to 'heads'.
466 """Return a topological path from 'roots' to 'heads'.
464
467
465 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
468 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
466 topologically sorted list of all nodes N that satisfy both of
469 topologically sorted list of all nodes N that satisfy both of
467 these constraints:
470 these constraints:
468
471
469 1. N is a descendant of some node in 'roots'
472 1. N is a descendant of some node in 'roots'
470 2. N is an ancestor of some node in 'heads'
473 2. N is an ancestor of some node in 'heads'
471
474
472 Every node is considered to be both a descendant and an ancestor
475 Every node is considered to be both a descendant and an ancestor
473 of itself, so every reachable node in 'roots' and 'heads' will be
476 of itself, so every reachable node in 'roots' and 'heads' will be
474 included in 'nodes'.
477 included in 'nodes'.
475
478
476 'outroots' is the list of reachable nodes in 'roots', i.e., the
479 'outroots' is the list of reachable nodes in 'roots', i.e., the
477 subset of 'roots' that is returned in 'nodes'. Likewise,
480 subset of 'roots' that is returned in 'nodes'. Likewise,
478 'outheads' is the subset of 'heads' that is also in 'nodes'.
481 'outheads' is the subset of 'heads' that is also in 'nodes'.
479
482
480 'roots' and 'heads' are both lists of node IDs. If 'roots' is
483 'roots' and 'heads' are both lists of node IDs. If 'roots' is
481 unspecified, uses nullid as the only root. If 'heads' is
484 unspecified, uses nullid as the only root. If 'heads' is
482 unspecified, uses list of all of the revlog's heads."""
485 unspecified, uses list of all of the revlog's heads."""
483 nonodes = ([], [], [])
486 nonodes = ([], [], [])
484 if roots is not None:
487 if roots is not None:
485 roots = list(roots)
488 roots = list(roots)
486 if not roots:
489 if not roots:
487 return nonodes
490 return nonodes
488 lowestrev = min([self.rev(n) for n in roots])
491 lowestrev = min([self.rev(n) for n in roots])
489 else:
492 else:
490 roots = [nullid] # Everybody's a descendent of nullid
493 roots = [nullid] # Everybody's a descendent of nullid
491 lowestrev = nullrev
494 lowestrev = nullrev
492 if (lowestrev == nullrev) and (heads is None):
495 if (lowestrev == nullrev) and (heads is None):
493 # We want _all_ the nodes!
496 # We want _all_ the nodes!
494 return ([self.node(r) for r in self], [nullid], list(self.heads()))
497 return ([self.node(r) for r in self], [nullid], list(self.heads()))
495 if heads is None:
498 if heads is None:
496 # All nodes are ancestors, so the latest ancestor is the last
499 # All nodes are ancestors, so the latest ancestor is the last
497 # node.
500 # node.
498 highestrev = len(self) - 1
501 highestrev = len(self) - 1
499 # Set ancestors to None to signal that every node is an ancestor.
502 # Set ancestors to None to signal that every node is an ancestor.
500 ancestors = None
503 ancestors = None
501 # Set heads to an empty dictionary for later discovery of heads
504 # Set heads to an empty dictionary for later discovery of heads
502 heads = {}
505 heads = {}
503 else:
506 else:
504 heads = list(heads)
507 heads = list(heads)
505 if not heads:
508 if not heads:
506 return nonodes
509 return nonodes
507 ancestors = set()
510 ancestors = set()
508 # Turn heads into a dictionary so we can remove 'fake' heads.
511 # Turn heads into a dictionary so we can remove 'fake' heads.
509 # Also, later we will be using it to filter out the heads we can't
512 # Also, later we will be using it to filter out the heads we can't
510 # find from roots.
513 # find from roots.
511 heads = dict.fromkeys(heads, False)
514 heads = dict.fromkeys(heads, False)
512 # Start at the top and keep marking parents until we're done.
515 # Start at the top and keep marking parents until we're done.
513 nodestotag = set(heads)
516 nodestotag = set(heads)
514 # Remember where the top was so we can use it as a limit later.
517 # Remember where the top was so we can use it as a limit later.
515 highestrev = max([self.rev(n) for n in nodestotag])
518 highestrev = max([self.rev(n) for n in nodestotag])
516 while nodestotag:
519 while nodestotag:
517 # grab a node to tag
520 # grab a node to tag
518 n = nodestotag.pop()
521 n = nodestotag.pop()
519 # Never tag nullid
522 # Never tag nullid
520 if n == nullid:
523 if n == nullid:
521 continue
524 continue
522 # A node's revision number represents its place in a
525 # A node's revision number represents its place in a
523 # topologically sorted list of nodes.
526 # topologically sorted list of nodes.
524 r = self.rev(n)
527 r = self.rev(n)
525 if r >= lowestrev:
528 if r >= lowestrev:
526 if n not in ancestors:
529 if n not in ancestors:
527 # If we are possibly a descendent of one of the roots
530 # If we are possibly a descendent of one of the roots
528 # and we haven't already been marked as an ancestor
531 # and we haven't already been marked as an ancestor
529 ancestors.add(n) # Mark as ancestor
532 ancestors.add(n) # Mark as ancestor
530 # Add non-nullid parents to list of nodes to tag.
533 # Add non-nullid parents to list of nodes to tag.
531 nodestotag.update([p for p in self.parents(n) if
534 nodestotag.update([p for p in self.parents(n) if
532 p != nullid])
535 p != nullid])
533 elif n in heads: # We've seen it before, is it a fake head?
536 elif n in heads: # We've seen it before, is it a fake head?
534 # So it is, real heads should not be the ancestors of
537 # So it is, real heads should not be the ancestors of
535 # any other heads.
538 # any other heads.
536 heads.pop(n)
539 heads.pop(n)
537 if not ancestors:
540 if not ancestors:
538 return nonodes
541 return nonodes
539 # Now that we have our set of ancestors, we want to remove any
542 # Now that we have our set of ancestors, we want to remove any
540 # roots that are not ancestors.
543 # roots that are not ancestors.
541
544
542 # If one of the roots was nullid, everything is included anyway.
545 # If one of the roots was nullid, everything is included anyway.
543 if lowestrev > nullrev:
546 if lowestrev > nullrev:
544 # But, since we weren't, let's recompute the lowest rev to not
547 # But, since we weren't, let's recompute the lowest rev to not
545 # include roots that aren't ancestors.
548 # include roots that aren't ancestors.
546
549
547 # Filter out roots that aren't ancestors of heads
550 # Filter out roots that aren't ancestors of heads
548 roots = [n for n in roots if n in ancestors]
551 roots = [n for n in roots if n in ancestors]
549 # Recompute the lowest revision
552 # Recompute the lowest revision
550 if roots:
553 if roots:
551 lowestrev = min([self.rev(n) for n in roots])
554 lowestrev = min([self.rev(n) for n in roots])
552 else:
555 else:
553 # No more roots? Return empty list
556 # No more roots? Return empty list
554 return nonodes
557 return nonodes
555 else:
558 else:
556 # We are descending from nullid, and don't need to care about
559 # We are descending from nullid, and don't need to care about
557 # any other roots.
560 # any other roots.
558 lowestrev = nullrev
561 lowestrev = nullrev
559 roots = [nullid]
562 roots = [nullid]
560 # Transform our roots list into a set.
563 # Transform our roots list into a set.
561 descendents = set(roots)
564 descendents = set(roots)
562 # Also, keep the original roots so we can filter out roots that aren't
565 # Also, keep the original roots so we can filter out roots that aren't
563 # 'real' roots (i.e. are descended from other roots).
566 # 'real' roots (i.e. are descended from other roots).
564 roots = descendents.copy()
567 roots = descendents.copy()
565 # Our topologically sorted list of output nodes.
568 # Our topologically sorted list of output nodes.
566 orderedout = []
569 orderedout = []
567 # Don't start at nullid since we don't want nullid in our output list,
570 # Don't start at nullid since we don't want nullid in our output list,
568 # and if nullid shows up in descedents, empty parents will look like
571 # and if nullid shows up in descedents, empty parents will look like
569 # they're descendents.
572 # they're descendents.
570 for r in xrange(max(lowestrev, 0), highestrev + 1):
573 for r in xrange(max(lowestrev, 0), highestrev + 1):
571 n = self.node(r)
574 n = self.node(r)
572 isdescendent = False
575 isdescendent = False
573 if lowestrev == nullrev: # Everybody is a descendent of nullid
576 if lowestrev == nullrev: # Everybody is a descendent of nullid
574 isdescendent = True
577 isdescendent = True
575 elif n in descendents:
578 elif n in descendents:
576 # n is already a descendent
579 # n is already a descendent
577 isdescendent = True
580 isdescendent = True
578 # This check only needs to be done here because all the roots
581 # This check only needs to be done here because all the roots
579 # will start being marked is descendents before the loop.
582 # will start being marked is descendents before the loop.
580 if n in roots:
583 if n in roots:
581 # If n was a root, check if it's a 'real' root.
584 # If n was a root, check if it's a 'real' root.
582 p = tuple(self.parents(n))
585 p = tuple(self.parents(n))
583 # If any of its parents are descendents, it's not a root.
586 # If any of its parents are descendents, it's not a root.
584 if (p[0] in descendents) or (p[1] in descendents):
587 if (p[0] in descendents) or (p[1] in descendents):
585 roots.remove(n)
588 roots.remove(n)
586 else:
589 else:
587 p = tuple(self.parents(n))
590 p = tuple(self.parents(n))
588 # A node is a descendent if either of its parents are
591 # A node is a descendent if either of its parents are
589 # descendents. (We seeded the dependents list with the roots
592 # descendents. (We seeded the dependents list with the roots
590 # up there, remember?)
593 # up there, remember?)
591 if (p[0] in descendents) or (p[1] in descendents):
594 if (p[0] in descendents) or (p[1] in descendents):
592 descendents.add(n)
595 descendents.add(n)
593 isdescendent = True
596 isdescendent = True
594 if isdescendent and ((ancestors is None) or (n in ancestors)):
597 if isdescendent and ((ancestors is None) or (n in ancestors)):
595 # Only include nodes that are both descendents and ancestors.
598 # Only include nodes that are both descendents and ancestors.
596 orderedout.append(n)
599 orderedout.append(n)
597 if (ancestors is not None) and (n in heads):
600 if (ancestors is not None) and (n in heads):
598 # We're trying to figure out which heads are reachable
601 # We're trying to figure out which heads are reachable
599 # from roots.
602 # from roots.
600 # Mark this head as having been reached
603 # Mark this head as having been reached
601 heads[n] = True
604 heads[n] = True
602 elif ancestors is None:
605 elif ancestors is None:
603 # Otherwise, we're trying to discover the heads.
606 # Otherwise, we're trying to discover the heads.
604 # Assume this is a head because if it isn't, the next step
607 # Assume this is a head because if it isn't, the next step
605 # will eventually remove it.
608 # will eventually remove it.
606 heads[n] = True
609 heads[n] = True
607 # But, obviously its parents aren't.
610 # But, obviously its parents aren't.
608 for p in self.parents(n):
611 for p in self.parents(n):
609 heads.pop(p, None)
612 heads.pop(p, None)
610 heads = [n for n, flag in heads.iteritems() if flag]
613 heads = [n for n, flag in heads.iteritems() if flag]
611 roots = list(roots)
614 roots = list(roots)
612 assert orderedout
615 assert orderedout
613 assert roots
616 assert roots
614 assert heads
617 assert heads
615 return (orderedout, roots, heads)
618 return (orderedout, roots, heads)
616
619
617 def headrevs(self):
620 def headrevs(self):
618 count = len(self)
621 count = len(self)
619 if not count:
622 if not count:
620 return [nullrev]
623 return [nullrev]
621 ishead = [1] * (count + 1)
624 ishead = [1] * (count + 1)
622 index = self.index
625 index = self.index
623 for r in xrange(count):
626 for r in xrange(count):
624 e = index[r]
627 e = index[r]
625 ishead[e[5]] = ishead[e[6]] = 0
628 ishead[e[5]] = ishead[e[6]] = 0
626 return [r for r in xrange(count) if ishead[r]]
629 return [r for r in xrange(count) if ishead[r]]
627
630
628 def heads(self, start=None, stop=None):
631 def heads(self, start=None, stop=None):
629 """return the list of all nodes that have no children
632 """return the list of all nodes that have no children
630
633
631 if start is specified, only heads that are descendants of
634 if start is specified, only heads that are descendants of
632 start will be returned
635 start will be returned
633 if stop is specified, it will consider all the revs from stop
636 if stop is specified, it will consider all the revs from stop
634 as if they had no children
637 as if they had no children
635 """
638 """
636 if start is None and stop is None:
639 if start is None and stop is None:
637 if not len(self):
640 if not len(self):
638 return [nullid]
641 return [nullid]
639 return [self.node(r) for r in self.headrevs()]
642 return [self.node(r) for r in self.headrevs()]
640
643
641 if start is None:
644 if start is None:
642 start = nullid
645 start = nullid
643 if stop is None:
646 if stop is None:
644 stop = []
647 stop = []
645 stoprevs = set([self.rev(n) for n in stop])
648 stoprevs = set([self.rev(n) for n in stop])
646 startrev = self.rev(start)
649 startrev = self.rev(start)
647 reachable = set((startrev,))
650 reachable = set((startrev,))
648 heads = set((startrev,))
651 heads = set((startrev,))
649
652
650 parentrevs = self.parentrevs
653 parentrevs = self.parentrevs
651 for r in xrange(startrev + 1, len(self)):
654 for r in xrange(startrev + 1, len(self)):
652 for p in parentrevs(r):
655 for p in parentrevs(r):
653 if p in reachable:
656 if p in reachable:
654 if r not in stoprevs:
657 if r not in stoprevs:
655 reachable.add(r)
658 reachable.add(r)
656 heads.add(r)
659 heads.add(r)
657 if p in heads and p not in stoprevs:
660 if p in heads and p not in stoprevs:
658 heads.remove(p)
661 heads.remove(p)
659
662
660 return [self.node(r) for r in heads]
663 return [self.node(r) for r in heads]
661
664
662 def children(self, node):
665 def children(self, node):
663 """find the children of a given node"""
666 """find the children of a given node"""
664 c = []
667 c = []
665 p = self.rev(node)
668 p = self.rev(node)
666 for r in range(p + 1, len(self)):
669 for r in range(p + 1, len(self)):
667 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
670 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
668 if prevs:
671 if prevs:
669 for pr in prevs:
672 for pr in prevs:
670 if pr == p:
673 if pr == p:
671 c.append(self.node(r))
674 c.append(self.node(r))
672 elif p == nullrev:
675 elif p == nullrev:
673 c.append(self.node(r))
676 c.append(self.node(r))
674 return c
677 return c
675
678
676 def descendant(self, start, end):
679 def descendant(self, start, end):
677 if start == nullrev:
680 if start == nullrev:
678 return True
681 return True
679 for i in self.descendants(start):
682 for i in self.descendants(start):
680 if i == end:
683 if i == end:
681 return True
684 return True
682 elif i > end:
685 elif i > end:
683 break
686 break
684 return False
687 return False
685
688
686 def ancestor(self, a, b):
689 def ancestor(self, a, b):
687 """calculate the least common ancestor of nodes a and b"""
690 """calculate the least common ancestor of nodes a and b"""
688
691
689 # fast path, check if it is a descendant
692 # fast path, check if it is a descendant
690 a, b = self.rev(a), self.rev(b)
693 a, b = self.rev(a), self.rev(b)
691 start, end = sorted((a, b))
694 start, end = sorted((a, b))
692 if self.descendant(start, end):
695 if self.descendant(start, end):
693 return self.node(start)
696 return self.node(start)
694
697
695 def parents(rev):
698 def parents(rev):
696 return [p for p in self.parentrevs(rev) if p != nullrev]
699 return [p for p in self.parentrevs(rev) if p != nullrev]
697
700
698 c = ancestor.ancestor(a, b, parents)
701 c = ancestor.ancestor(a, b, parents)
699 if c is None:
702 if c is None:
700 return nullid
703 return nullid
701
704
702 return self.node(c)
705 return self.node(c)
703
706
704 def _match(self, id):
707 def _match(self, id):
705 if isinstance(id, (long, int)):
708 if isinstance(id, (long, int)):
706 # rev
709 # rev
707 return self.node(id)
710 return self.node(id)
708 if len(id) == 20:
711 if len(id) == 20:
709 # possibly a binary node
712 # possibly a binary node
710 # odds of a binary node being all hex in ASCII are 1 in 10**25
713 # odds of a binary node being all hex in ASCII are 1 in 10**25
711 try:
714 try:
712 node = id
715 node = id
713 self.rev(node) # quick search the index
716 self.rev(node) # quick search the index
714 return node
717 return node
715 except LookupError:
718 except LookupError:
716 pass # may be partial hex id
719 pass # may be partial hex id
717 try:
720 try:
718 # str(rev)
721 # str(rev)
719 rev = int(id)
722 rev = int(id)
720 if str(rev) != id:
723 if str(rev) != id:
721 raise ValueError
724 raise ValueError
722 if rev < 0:
725 if rev < 0:
723 rev = len(self) + rev
726 rev = len(self) + rev
724 if rev < 0 or rev >= len(self):
727 if rev < 0 or rev >= len(self):
725 raise ValueError
728 raise ValueError
726 return self.node(rev)
729 return self.node(rev)
727 except (ValueError, OverflowError):
730 except (ValueError, OverflowError):
728 pass
731 pass
729 if len(id) == 40:
732 if len(id) == 40:
730 try:
733 try:
731 # a full hex nodeid?
734 # a full hex nodeid?
732 node = bin(id)
735 node = bin(id)
733 self.rev(node)
736 self.rev(node)
734 return node
737 return node
735 except (TypeError, LookupError):
738 except (TypeError, LookupError):
736 pass
739 pass
737
740
738 def _partialmatch(self, id):
741 def _partialmatch(self, id):
739 if id in self._pcache:
742 if id in self._pcache:
740 return self._pcache[id]
743 return self._pcache[id]
741
744
742 if len(id) < 40:
745 if len(id) < 40:
743 try:
746 try:
744 # hex(node)[:...]
747 # hex(node)[:...]
745 l = len(id) // 2 # grab an even number of digits
748 l = len(id) // 2 # grab an even number of digits
746 prefix = bin(id[:l * 2])
749 prefix = bin(id[:l * 2])
747 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
750 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
748 nl = [n for n in nl if hex(n).startswith(id)]
751 nl = [n for n in nl if hex(n).startswith(id)]
749 if len(nl) > 0:
752 if len(nl) > 0:
750 if len(nl) == 1:
753 if len(nl) == 1:
751 self._pcache[id] = nl[0]
754 self._pcache[id] = nl[0]
752 return nl[0]
755 return nl[0]
753 raise LookupError(id, self.indexfile,
756 raise LookupError(id, self.indexfile,
754 _('ambiguous identifier'))
757 _('ambiguous identifier'))
755 return None
758 return None
756 except TypeError:
759 except TypeError:
757 pass
760 pass
758
761
759 def lookup(self, id):
762 def lookup(self, id):
760 """locate a node based on:
763 """locate a node based on:
761 - revision number or str(revision number)
764 - revision number or str(revision number)
762 - nodeid or subset of hex nodeid
765 - nodeid or subset of hex nodeid
763 """
766 """
764 n = self._match(id)
767 n = self._match(id)
765 if n is not None:
768 if n is not None:
766 return n
769 return n
767 n = self._partialmatch(id)
770 n = self._partialmatch(id)
768 if n:
771 if n:
769 return n
772 return n
770
773
771 raise LookupError(id, self.indexfile, _('no match found'))
774 raise LookupError(id, self.indexfile, _('no match found'))
772
775
773 def cmp(self, node, text):
776 def cmp(self, node, text):
774 """compare text with a given file revision
777 """compare text with a given file revision
775
778
776 returns True if text is different than what is stored.
779 returns True if text is different than what is stored.
777 """
780 """
778 p1, p2 = self.parents(node)
781 p1, p2 = self.parents(node)
779 return hash(text, p1, p2) != node
782 return hash(text, p1, p2) != node
780
783
781 def _addchunk(self, offset, data):
784 def _addchunk(self, offset, data):
782 o, d = self._chunkcache
785 o, d = self._chunkcache
783 # try to add to existing cache
786 # try to add to existing cache
784 if o + len(d) == offset and len(d) + len(data) < _chunksize:
787 if o + len(d) == offset and len(d) + len(data) < _chunksize:
785 self._chunkcache = o, d + data
788 self._chunkcache = o, d + data
786 else:
789 else:
787 self._chunkcache = offset, data
790 self._chunkcache = offset, data
788
791
789 def _loadchunk(self, offset, length):
792 def _loadchunk(self, offset, length):
790 if self._inline:
793 if self._inline:
791 df = self.opener(self.indexfile)
794 df = self.opener(self.indexfile)
792 else:
795 else:
793 df = self.opener(self.datafile)
796 df = self.opener(self.datafile)
794
797
795 readahead = max(65536, length)
798 readahead = max(65536, length)
796 df.seek(offset)
799 df.seek(offset)
797 d = df.read(readahead)
800 d = df.read(readahead)
798 self._addchunk(offset, d)
801 self._addchunk(offset, d)
799 if readahead > length:
802 if readahead > length:
800 return d[:length]
803 return d[:length]
801 return d
804 return d
802
805
803 def _getchunk(self, offset, length):
806 def _getchunk(self, offset, length):
804 o, d = self._chunkcache
807 o, d = self._chunkcache
805 l = len(d)
808 l = len(d)
806
809
807 # is it in the cache?
810 # is it in the cache?
808 cachestart = offset - o
811 cachestart = offset - o
809 cacheend = cachestart + length
812 cacheend = cachestart + length
810 if cachestart >= 0 and cacheend <= l:
813 if cachestart >= 0 and cacheend <= l:
811 if cachestart == 0 and cacheend == l:
814 if cachestart == 0 and cacheend == l:
812 return d # avoid a copy
815 return d # avoid a copy
813 return d[cachestart:cacheend]
816 return d[cachestart:cacheend]
814
817
815 return self._loadchunk(offset, length)
818 return self._loadchunk(offset, length)
816
819
817 def _chunkraw(self, startrev, endrev):
820 def _chunkraw(self, startrev, endrev):
818 start = self.start(startrev)
821 start = self.start(startrev)
819 length = self.end(endrev) - start
822 length = self.end(endrev) - start
820 if self._inline:
823 if self._inline:
821 start += (startrev + 1) * self._io.size
824 start += (startrev + 1) * self._io.size
822 return self._getchunk(start, length)
825 return self._getchunk(start, length)
823
826
824 def _chunk(self, rev):
827 def _chunk(self, rev):
825 return decompress(self._chunkraw(rev, rev))
828 return decompress(self._chunkraw(rev, rev))
826
829
827 def _chunkbase(self, rev):
830 def _chunkbase(self, rev):
828 return self._chunk(rev)
831 return self._chunk(rev)
829
832
830 def _chunkclear(self):
833 def _chunkclear(self):
831 self._chunkcache = (0, '')
834 self._chunkcache = (0, '')
832
835
833 def deltaparent(self, rev):
836 def deltaparent(self, rev):
834 """return deltaparent of the given revision"""
837 """return deltaparent of the given revision"""
835 base = self.index[rev][3]
838 base = self.index[rev][3]
836 if base == rev:
839 if base == rev:
837 return nullrev
840 return nullrev
838 elif self._generaldelta:
841 elif self._generaldelta:
839 return base
842 return base
840 else:
843 else:
841 return rev - 1
844 return rev - 1
842
845
843 def revdiff(self, rev1, rev2):
846 def revdiff(self, rev1, rev2):
844 """return or calculate a delta between two revisions"""
847 """return or calculate a delta between two revisions"""
845 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
848 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
846 return self._chunk(rev2)
849 return self._chunk(rev2)
847
850
848 return mdiff.textdiff(self.revision(self.node(rev1)),
851 return mdiff.textdiff(self.revision(self.node(rev1)),
849 self.revision(self.node(rev2)))
852 self.revision(self.node(rev2)))
850
853
851 def revision(self, node):
854 def revision(self, node):
852 """return an uncompressed revision of a given node"""
855 """return an uncompressed revision of a given node"""
853 cachedrev = None
856 cachedrev = None
854 if node == nullid:
857 if node == nullid:
855 return ""
858 return ""
856 if self._cache:
859 if self._cache:
857 if self._cache[0] == node:
860 if self._cache[0] == node:
858 return self._cache[2]
861 return self._cache[2]
859 cachedrev = self._cache[1]
862 cachedrev = self._cache[1]
860
863
861 # look up what we need to read
864 # look up what we need to read
862 text = None
865 text = None
863 rev = self.rev(node)
866 rev = self.rev(node)
864
867
865 # check rev flags
868 # check rev flags
866 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
869 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
867 raise RevlogError(_('incompatible revision flag %x') %
870 raise RevlogError(_('incompatible revision flag %x') %
868 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
871 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
869
872
870 # build delta chain
873 # build delta chain
871 chain = []
874 chain = []
872 index = self.index # for performance
875 index = self.index # for performance
873 generaldelta = self._generaldelta
876 generaldelta = self._generaldelta
874 iterrev = rev
877 iterrev = rev
875 e = index[iterrev]
878 e = index[iterrev]
876 while iterrev != e[3] and iterrev != cachedrev:
879 while iterrev != e[3] and iterrev != cachedrev:
877 chain.append(iterrev)
880 chain.append(iterrev)
878 if generaldelta:
881 if generaldelta:
879 iterrev = e[3]
882 iterrev = e[3]
880 else:
883 else:
881 iterrev -= 1
884 iterrev -= 1
882 e = index[iterrev]
885 e = index[iterrev]
883 chain.reverse()
886 chain.reverse()
884 base = iterrev
887 base = iterrev
885
888
886 if iterrev == cachedrev:
889 if iterrev == cachedrev:
887 # cache hit
890 # cache hit
888 text = self._cache[2]
891 text = self._cache[2]
889
892
890 # drop cache to save memory
893 # drop cache to save memory
891 self._cache = None
894 self._cache = None
892
895
893 self._chunkraw(base, rev)
896 self._chunkraw(base, rev)
894 if text is None:
897 if text is None:
895 text = self._chunkbase(base)
898 text = self._chunkbase(base)
896
899
897 bins = [self._chunk(r) for r in chain]
900 bins = [self._chunk(r) for r in chain]
898 text = mdiff.patches(text, bins)
901 text = mdiff.patches(text, bins)
899
902
900 text = self._checkhash(text, node, rev)
903 text = self._checkhash(text, node, rev)
901
904
902 self._cache = (node, rev, text)
905 self._cache = (node, rev, text)
903 return text
906 return text
904
907
905 def _checkhash(self, text, node, rev):
908 def _checkhash(self, text, node, rev):
906 p1, p2 = self.parents(node)
909 p1, p2 = self.parents(node)
907 if node != hash(text, p1, p2):
910 if node != hash(text, p1, p2):
908 raise RevlogError(_("integrity check failed on %s:%d")
911 raise RevlogError(_("integrity check failed on %s:%d")
909 % (self.indexfile, rev))
912 % (self.indexfile, rev))
910 return text
913 return text
911
914
912 def checkinlinesize(self, tr, fp=None):
915 def checkinlinesize(self, tr, fp=None):
913 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
916 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
914 return
917 return
915
918
916 trinfo = tr.find(self.indexfile)
919 trinfo = tr.find(self.indexfile)
917 if trinfo is None:
920 if trinfo is None:
918 raise RevlogError(_("%s not found in the transaction")
921 raise RevlogError(_("%s not found in the transaction")
919 % self.indexfile)
922 % self.indexfile)
920
923
921 trindex = trinfo[2]
924 trindex = trinfo[2]
922 dataoff = self.start(trindex)
925 dataoff = self.start(trindex)
923
926
924 tr.add(self.datafile, dataoff)
927 tr.add(self.datafile, dataoff)
925
928
926 if fp:
929 if fp:
927 fp.flush()
930 fp.flush()
928 fp.close()
931 fp.close()
929
932
930 df = self.opener(self.datafile, 'w')
933 df = self.opener(self.datafile, 'w')
931 try:
934 try:
932 for r in self:
935 for r in self:
933 df.write(self._chunkraw(r, r))
936 df.write(self._chunkraw(r, r))
934 finally:
937 finally:
935 df.close()
938 df.close()
936
939
937 fp = self.opener(self.indexfile, 'w', atomictemp=True)
940 fp = self.opener(self.indexfile, 'w', atomictemp=True)
938 self.version &= ~(REVLOGNGINLINEDATA)
941 self.version &= ~(REVLOGNGINLINEDATA)
939 self._inline = False
942 self._inline = False
940 for i in self:
943 for i in self:
941 e = self._io.packentry(self.index[i], self.node, self.version, i)
944 e = self._io.packentry(self.index[i], self.node, self.version, i)
942 fp.write(e)
945 fp.write(e)
943
946
944 # if we don't call rename, the temp file will never replace the
947 # if we don't call rename, the temp file will never replace the
945 # real index
948 # real index
946 fp.rename()
949 fp.rename()
947
950
948 tr.replace(self.indexfile, trindex * self._io.size)
951 tr.replace(self.indexfile, trindex * self._io.size)
949 self._chunkclear()
952 self._chunkclear()
950
953
951 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
954 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
952 """add a revision to the log
955 """add a revision to the log
953
956
954 text - the revision data to add
957 text - the revision data to add
955 transaction - the transaction object used for rollback
958 transaction - the transaction object used for rollback
956 link - the linkrev data to add
959 link - the linkrev data to add
957 p1, p2 - the parent nodeids of the revision
960 p1, p2 - the parent nodeids of the revision
958 cachedelta - an optional precomputed delta
961 cachedelta - an optional precomputed delta
959 """
962 """
960 node = hash(text, p1, p2)
963 node = hash(text, p1, p2)
961 if node in self.nodemap:
964 if node in self.nodemap:
962 return node
965 return node
963
966
964 dfh = None
967 dfh = None
965 if not self._inline:
968 if not self._inline:
966 dfh = self.opener(self.datafile, "a")
969 dfh = self.opener(self.datafile, "a")
967 ifh = self.opener(self.indexfile, "a+")
970 ifh = self.opener(self.indexfile, "a+")
968 try:
971 try:
969 return self._addrevision(node, text, transaction, link, p1, p2,
972 return self._addrevision(node, text, transaction, link, p1, p2,
970 cachedelta, ifh, dfh)
973 cachedelta, ifh, dfh)
971 finally:
974 finally:
972 if dfh:
975 if dfh:
973 dfh.close()
976 dfh.close()
974 ifh.close()
977 ifh.close()
975
978
976 def _addrevision(self, node, text, transaction, link, p1, p2,
979 def _addrevision(self, node, text, transaction, link, p1, p2,
977 cachedelta, ifh, dfh):
980 cachedelta, ifh, dfh):
978
981
979 btext = [text]
982 btext = [text]
980 def buildtext():
983 def buildtext():
981 if btext[0] is not None:
984 if btext[0] is not None:
982 return btext[0]
985 return btext[0]
983 # flush any pending writes here so we can read it in revision
986 # flush any pending writes here so we can read it in revision
984 if dfh:
987 if dfh:
985 dfh.flush()
988 dfh.flush()
986 ifh.flush()
989 ifh.flush()
987 basetext = self.revision(self.node(cachedelta[0]))
990 basetext = self.revision(self.node(cachedelta[0]))
988 btext[0] = mdiff.patch(basetext, cachedelta[1])
991 btext[0] = mdiff.patch(basetext, cachedelta[1])
989 chk = hash(btext[0], p1, p2)
992 chk = hash(btext[0], p1, p2)
990 if chk != node:
993 if chk != node:
991 raise RevlogError(_("consistency error in delta"))
994 raise RevlogError(_("consistency error in delta"))
992 return btext[0]
995 return btext[0]
993
996
994 def builddelta(rev):
997 def builddelta(rev):
995 # can we use the cached delta?
998 # can we use the cached delta?
996 if cachedelta and cachedelta[0] == rev:
999 if cachedelta and cachedelta[0] == rev:
997 delta = cachedelta[1]
1000 delta = cachedelta[1]
998 else:
1001 else:
999 t = buildtext()
1002 t = buildtext()
1000 ptext = self.revision(self.node(rev))
1003 ptext = self.revision(self.node(rev))
1001 delta = mdiff.textdiff(ptext, t)
1004 delta = mdiff.textdiff(ptext, t)
1002 data = compress(delta)
1005 data = compress(delta)
1003 l = len(data[1]) + len(data[0])
1006 l = len(data[1]) + len(data[0])
1004 basecache = self._basecache
1007 basecache = self._basecache
1005 if basecache and basecache[0] == rev:
1008 if basecache and basecache[0] == rev:
1006 base = basecache[1]
1009 chainbase = basecache[1]
1007 else:
1010 else:
1008 base = self.chainbase(rev)
1011 chainbase = self.chainbase(rev)
1009 dist = l + offset - self.start(base)
1012 dist = l + offset - self.start(chainbase)
1013 if self._generaldelta:
1014 base = rev
1015 else:
1016 base = chainbase
1010 return dist, l, data, base
1017 return dist, l, data, base
1011
1018
1012 curr = len(self)
1019 curr = len(self)
1013 prev = curr - 1
1020 prev = curr - 1
1014 base = curr
1021 base = curr
1015 offset = self.end(prev)
1022 offset = self.end(prev)
1016 flags = 0
1023 flags = 0
1017 d = None
1024 d = None
1018 p1r, p2r = self.rev(p1), self.rev(p2)
1025 p1r, p2r = self.rev(p1), self.rev(p2)
1019
1026
1020 # should we try to build a delta?
1027 # should we try to build a delta?
1021 if prev != nullrev:
1028 if prev != nullrev:
1022 d = builddelta(prev)
1029 if self._generaldelta:
1030 d = builddelta(p1r)
1031 else:
1032 d = builddelta(prev)
1023 dist, l, data, base = d
1033 dist, l, data, base = d
1024
1034
1025 # full versions are inserted when the needed deltas
1035 # full versions are inserted when the needed deltas
1026 # become comparable to the uncompressed text
1036 # become comparable to the uncompressed text
1027 if text is None:
1037 if text is None:
1028 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1038 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1029 cachedelta[1])
1039 cachedelta[1])
1030 else:
1040 else:
1031 textlen = len(text)
1041 textlen = len(text)
1032 if d is None or dist > textlen * 2:
1042 if d is None or dist > textlen * 2:
1033 text = buildtext()
1043 text = buildtext()
1034 data = compress(text)
1044 data = compress(text)
1035 l = len(data[1]) + len(data[0])
1045 l = len(data[1]) + len(data[0])
1036 base = curr
1046 base = curr
1037
1047
1038 e = (offset_type(offset, flags), l, textlen,
1048 e = (offset_type(offset, flags), l, textlen,
1039 base, link, p1r, p2r, node)
1049 base, link, p1r, p2r, node)
1040 self.index.insert(-1, e)
1050 self.index.insert(-1, e)
1041 self.nodemap[node] = curr
1051 self.nodemap[node] = curr
1042
1052
1043 entry = self._io.packentry(e, self.node, self.version, curr)
1053 entry = self._io.packentry(e, self.node, self.version, curr)
1044 if not self._inline:
1054 if not self._inline:
1045 transaction.add(self.datafile, offset)
1055 transaction.add(self.datafile, offset)
1046 transaction.add(self.indexfile, curr * len(entry))
1056 transaction.add(self.indexfile, curr * len(entry))
1047 if data[0]:
1057 if data[0]:
1048 dfh.write(data[0])
1058 dfh.write(data[0])
1049 dfh.write(data[1])
1059 dfh.write(data[1])
1050 dfh.flush()
1060 dfh.flush()
1051 ifh.write(entry)
1061 ifh.write(entry)
1052 else:
1062 else:
1053 offset += curr * self._io.size
1063 offset += curr * self._io.size
1054 transaction.add(self.indexfile, offset, curr)
1064 transaction.add(self.indexfile, offset, curr)
1055 ifh.write(entry)
1065 ifh.write(entry)
1056 ifh.write(data[0])
1066 ifh.write(data[0])
1057 ifh.write(data[1])
1067 ifh.write(data[1])
1058 self.checkinlinesize(transaction, ifh)
1068 self.checkinlinesize(transaction, ifh)
1059
1069
1060 if type(text) == str: # only accept immutable objects
1070 if type(text) == str: # only accept immutable objects
1061 self._cache = (node, curr, text)
1071 self._cache = (node, curr, text)
1062 self._basecache = (curr, base)
1072 self._basecache = (curr, base)
1063 return node
1073 return node
1064
1074
1065 def group(self, nodelist, bundler):
1075 def group(self, nodelist, bundler):
1066 """Calculate a delta group, yielding a sequence of changegroup chunks
1076 """Calculate a delta group, yielding a sequence of changegroup chunks
1067 (strings).
1077 (strings).
1068
1078
1069 Given a list of changeset revs, return a set of deltas and
1079 Given a list of changeset revs, return a set of deltas and
1070 metadata corresponding to nodes. The first delta is
1080 metadata corresponding to nodes. The first delta is
1071 first parent(nodelist[0]) -> nodelist[0], the receiver is
1081 first parent(nodelist[0]) -> nodelist[0], the receiver is
1072 guaranteed to have this parent as it has all history before
1082 guaranteed to have this parent as it has all history before
1073 these changesets. In the case firstparent is nullrev the
1083 these changesets. In the case firstparent is nullrev the
1074 changegroup starts with a full revision.
1084 changegroup starts with a full revision.
1075 """
1085 """
1076
1086
1077 revs = sorted([self.rev(n) for n in nodelist])
1087 revs = sorted([self.rev(n) for n in nodelist])
1078
1088
1079 # if we don't have any revisions touched by these changesets, bail
1089 # if we don't have any revisions touched by these changesets, bail
1080 if not revs:
1090 if not revs:
1081 yield bundler.close()
1091 yield bundler.close()
1082 return
1092 return
1083
1093
1084 # add the parent of the first rev
1094 # add the parent of the first rev
1085 p = self.parentrevs(revs[0])[0]
1095 p = self.parentrevs(revs[0])[0]
1086 revs.insert(0, p)
1096 revs.insert(0, p)
1087
1097
1088 # build deltas
1098 # build deltas
1089 for r in xrange(len(revs) - 1):
1099 for r in xrange(len(revs) - 1):
1090 prev, curr = revs[r], revs[r + 1]
1100 prev, curr = revs[r], revs[r + 1]
1091 for c in bundler.revchunk(self, curr, prev):
1101 for c in bundler.revchunk(self, curr, prev):
1092 yield c
1102 yield c
1093
1103
1094 yield bundler.close()
1104 yield bundler.close()
1095
1105
1096 def addgroup(self, bundle, linkmapper, transaction):
1106 def addgroup(self, bundle, linkmapper, transaction):
1097 """
1107 """
1098 add a delta group
1108 add a delta group
1099
1109
1100 given a set of deltas, add them to the revision log. the
1110 given a set of deltas, add them to the revision log. the
1101 first delta is against its parent, which should be in our
1111 first delta is against its parent, which should be in our
1102 log, the rest are against the previous delta.
1112 log, the rest are against the previous delta.
1103 """
1113 """
1104
1114
1105 # track the base of the current delta log
1115 # track the base of the current delta log
1106 node = None
1116 node = None
1107
1117
1108 r = len(self)
1118 r = len(self)
1109 end = 0
1119 end = 0
1110 if r:
1120 if r:
1111 end = self.end(r - 1)
1121 end = self.end(r - 1)
1112 ifh = self.opener(self.indexfile, "a+")
1122 ifh = self.opener(self.indexfile, "a+")
1113 isize = r * self._io.size
1123 isize = r * self._io.size
1114 if self._inline:
1124 if self._inline:
1115 transaction.add(self.indexfile, end + isize, r)
1125 transaction.add(self.indexfile, end + isize, r)
1116 dfh = None
1126 dfh = None
1117 else:
1127 else:
1118 transaction.add(self.indexfile, isize, r)
1128 transaction.add(self.indexfile, isize, r)
1119 transaction.add(self.datafile, end)
1129 transaction.add(self.datafile, end)
1120 dfh = self.opener(self.datafile, "a")
1130 dfh = self.opener(self.datafile, "a")
1121
1131
1122 try:
1132 try:
1123 # loop through our set of deltas
1133 # loop through our set of deltas
1124 chain = None
1134 chain = None
1125 while 1:
1135 while 1:
1126 chunkdata = bundle.deltachunk(chain)
1136 chunkdata = bundle.deltachunk(chain)
1127 if not chunkdata:
1137 if not chunkdata:
1128 break
1138 break
1129 node = chunkdata['node']
1139 node = chunkdata['node']
1130 p1 = chunkdata['p1']
1140 p1 = chunkdata['p1']
1131 p2 = chunkdata['p2']
1141 p2 = chunkdata['p2']
1132 cs = chunkdata['cs']
1142 cs = chunkdata['cs']
1133 deltabase = chunkdata['deltabase']
1143 deltabase = chunkdata['deltabase']
1134 delta = chunkdata['delta']
1144 delta = chunkdata['delta']
1135
1145
1136 link = linkmapper(cs)
1146 link = linkmapper(cs)
1137 if node in self.nodemap:
1147 if node in self.nodemap:
1138 # this can happen if two branches make the same change
1148 # this can happen if two branches make the same change
1139 chain = node
1149 chain = node
1140 continue
1150 continue
1141
1151
1142 for p in (p1, p2):
1152 for p in (p1, p2):
1143 if not p in self.nodemap:
1153 if not p in self.nodemap:
1144 raise LookupError(p, self.indexfile,
1154 raise LookupError(p, self.indexfile,
1145 _('unknown parent'))
1155 _('unknown parent'))
1146
1156
1147 if deltabase not in self.nodemap:
1157 if deltabase not in self.nodemap:
1148 raise LookupError(deltabase, self.indexfile,
1158 raise LookupError(deltabase, self.indexfile,
1149 _('unknown delta base'))
1159 _('unknown delta base'))
1150
1160
1151 baserev = self.rev(deltabase)
1161 baserev = self.rev(deltabase)
1152 chain = self._addrevision(node, None, transaction, link,
1162 chain = self._addrevision(node, None, transaction, link,
1153 p1, p2, (baserev, delta), ifh, dfh)
1163 p1, p2, (baserev, delta), ifh, dfh)
1154 if not dfh and not self._inline:
1164 if not dfh and not self._inline:
1155 # addrevision switched from inline to conventional
1165 # addrevision switched from inline to conventional
1156 # reopen the index
1166 # reopen the index
1157 ifh.close()
1167 ifh.close()
1158 dfh = self.opener(self.datafile, "a")
1168 dfh = self.opener(self.datafile, "a")
1159 ifh = self.opener(self.indexfile, "a")
1169 ifh = self.opener(self.indexfile, "a")
1160 finally:
1170 finally:
1161 if dfh:
1171 if dfh:
1162 dfh.close()
1172 dfh.close()
1163 ifh.close()
1173 ifh.close()
1164
1174
1165 return node
1175 return node
1166
1176
1167 def strip(self, minlink, transaction):
1177 def strip(self, minlink, transaction):
1168 """truncate the revlog on the first revision with a linkrev >= minlink
1178 """truncate the revlog on the first revision with a linkrev >= minlink
1169
1179
1170 This function is called when we're stripping revision minlink and
1180 This function is called when we're stripping revision minlink and
1171 its descendants from the repository.
1181 its descendants from the repository.
1172
1182
1173 We have to remove all revisions with linkrev >= minlink, because
1183 We have to remove all revisions with linkrev >= minlink, because
1174 the equivalent changelog revisions will be renumbered after the
1184 the equivalent changelog revisions will be renumbered after the
1175 strip.
1185 strip.
1176
1186
1177 So we truncate the revlog on the first of these revisions, and
1187 So we truncate the revlog on the first of these revisions, and
1178 trust that the caller has saved the revisions that shouldn't be
1188 trust that the caller has saved the revisions that shouldn't be
1179 removed and that it'll readd them after this truncation.
1189 removed and that it'll readd them after this truncation.
1180 """
1190 """
1181 if len(self) == 0:
1191 if len(self) == 0:
1182 return
1192 return
1183
1193
1184 for rev in self:
1194 for rev in self:
1185 if self.index[rev][4] >= minlink:
1195 if self.index[rev][4] >= minlink:
1186 break
1196 break
1187 else:
1197 else:
1188 return
1198 return
1189
1199
1190 # first truncate the files on disk
1200 # first truncate the files on disk
1191 end = self.start(rev)
1201 end = self.start(rev)
1192 if not self._inline:
1202 if not self._inline:
1193 transaction.add(self.datafile, end)
1203 transaction.add(self.datafile, end)
1194 end = rev * self._io.size
1204 end = rev * self._io.size
1195 else:
1205 else:
1196 end += rev * self._io.size
1206 end += rev * self._io.size
1197
1207
1198 transaction.add(self.indexfile, end)
1208 transaction.add(self.indexfile, end)
1199
1209
1200 # then reset internal state in memory to forget those revisions
1210 # then reset internal state in memory to forget those revisions
1201 self._cache = None
1211 self._cache = None
1202 self._chunkclear()
1212 self._chunkclear()
1203 for x in xrange(rev, len(self)):
1213 for x in xrange(rev, len(self)):
1204 del self.nodemap[self.node(x)]
1214 del self.nodemap[self.node(x)]
1205
1215
1206 del self.index[rev:-1]
1216 del self.index[rev:-1]
1207
1217
1208 def checksize(self):
1218 def checksize(self):
1209 expected = 0
1219 expected = 0
1210 if len(self):
1220 if len(self):
1211 expected = max(0, self.end(len(self) - 1))
1221 expected = max(0, self.end(len(self) - 1))
1212
1222
1213 try:
1223 try:
1214 f = self.opener(self.datafile)
1224 f = self.opener(self.datafile)
1215 f.seek(0, 2)
1225 f.seek(0, 2)
1216 actual = f.tell()
1226 actual = f.tell()
1217 f.close()
1227 f.close()
1218 dd = actual - expected
1228 dd = actual - expected
1219 except IOError, inst:
1229 except IOError, inst:
1220 if inst.errno != errno.ENOENT:
1230 if inst.errno != errno.ENOENT:
1221 raise
1231 raise
1222 dd = 0
1232 dd = 0
1223
1233
1224 try:
1234 try:
1225 f = self.opener(self.indexfile)
1235 f = self.opener(self.indexfile)
1226 f.seek(0, 2)
1236 f.seek(0, 2)
1227 actual = f.tell()
1237 actual = f.tell()
1228 f.close()
1238 f.close()
1229 s = self._io.size
1239 s = self._io.size
1230 i = max(0, actual // s)
1240 i = max(0, actual // s)
1231 di = actual - (i * s)
1241 di = actual - (i * s)
1232 if self._inline:
1242 if self._inline:
1233 databytes = 0
1243 databytes = 0
1234 for r in self:
1244 for r in self:
1235 databytes += max(0, self.length(r))
1245 databytes += max(0, self.length(r))
1236 dd = 0
1246 dd = 0
1237 di = actual - len(self) * s - databytes
1247 di = actual - len(self) * s - databytes
1238 except IOError, inst:
1248 except IOError, inst:
1239 if inst.errno != errno.ENOENT:
1249 if inst.errno != errno.ENOENT:
1240 raise
1250 raise
1241 di = 0
1251 di = 0
1242
1252
1243 return (dd, di)
1253 return (dd, di)
1244
1254
1245 def files(self):
1255 def files(self):
1246 res = [self.indexfile]
1256 res = [self.indexfile]
1247 if not self._inline:
1257 if not self._inline:
1248 res.append(self.datafile)
1258 res.append(self.datafile)
1249 return res
1259 return res
General Comments 0
You need to be logged in to leave comments. Login now