##// END OF EJS Templates
manifest: delete unused dirlog and _newmanifest functions...
Durham Goode -
r30371:fccc3eea default
parent child Browse files
Show More
@@ -1,3577 +1,3577
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import sys
13 import sys
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 changelog,
27 changelog,
28 copies,
28 copies,
29 crecord as crecordmod,
29 crecord as crecordmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 lock as lockmod,
34 lock as lockmod,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 repair,
40 repair,
41 revlog,
41 revlog,
42 revset,
42 revset,
43 scmutil,
43 scmutil,
44 templatekw,
44 templatekw,
45 templater,
45 templater,
46 util,
46 util,
47 )
47 )
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 def ishunk(x):
50 def ishunk(x):
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 return isinstance(x, hunkclasses)
52 return isinstance(x, hunkclasses)
53
53
54 def newandmodified(chunks, originalchunks):
54 def newandmodified(chunks, originalchunks):
55 newlyaddedandmodifiedfiles = set()
55 newlyaddedandmodifiedfiles = set()
56 for chunk in chunks:
56 for chunk in chunks:
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 originalchunks:
58 originalchunks:
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 return newlyaddedandmodifiedfiles
60 return newlyaddedandmodifiedfiles
61
61
62 def parsealiases(cmd):
62 def parsealiases(cmd):
63 return cmd.lstrip("^").split("|")
63 return cmd.lstrip("^").split("|")
64
64
65 def setupwrapcolorwrite(ui):
65 def setupwrapcolorwrite(ui):
66 # wrap ui.write so diff output can be labeled/colorized
66 # wrap ui.write so diff output can be labeled/colorized
67 def wrapwrite(orig, *args, **kw):
67 def wrapwrite(orig, *args, **kw):
68 label = kw.pop('label', '')
68 label = kw.pop('label', '')
69 for chunk, l in patch.difflabel(lambda: args):
69 for chunk, l in patch.difflabel(lambda: args):
70 orig(chunk, label=label + l)
70 orig(chunk, label=label + l)
71
71
72 oldwrite = ui.write
72 oldwrite = ui.write
73 def wrap(*args, **kwargs):
73 def wrap(*args, **kwargs):
74 return wrapwrite(oldwrite, *args, **kwargs)
74 return wrapwrite(oldwrite, *args, **kwargs)
75 setattr(ui, 'write', wrap)
75 setattr(ui, 'write', wrap)
76 return oldwrite
76 return oldwrite
77
77
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 if usecurses:
79 if usecurses:
80 if testfile:
80 if testfile:
81 recordfn = crecordmod.testdecorator(testfile,
81 recordfn = crecordmod.testdecorator(testfile,
82 crecordmod.testchunkselector)
82 crecordmod.testchunkselector)
83 else:
83 else:
84 recordfn = crecordmod.chunkselector
84 recordfn = crecordmod.chunkselector
85
85
86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
87
87
88 else:
88 else:
89 return patch.filterpatch(ui, originalhunks, operation)
89 return patch.filterpatch(ui, originalhunks, operation)
90
90
91 def recordfilter(ui, originalhunks, operation=None):
91 def recordfilter(ui, originalhunks, operation=None):
92 """ Prompts the user to filter the originalhunks and return a list of
92 """ Prompts the user to filter the originalhunks and return a list of
93 selected hunks.
93 selected hunks.
94 *operation* is used for to build ui messages to indicate the user what
94 *operation* is used for to build ui messages to indicate the user what
95 kind of filtering they are doing: reverting, committing, shelving, etc.
95 kind of filtering they are doing: reverting, committing, shelving, etc.
96 (see patch.filterpatch).
96 (see patch.filterpatch).
97 """
97 """
98 usecurses = crecordmod.checkcurses(ui)
98 usecurses = crecordmod.checkcurses(ui)
99 testfile = ui.config('experimental', 'crecordtest', None)
99 testfile = ui.config('experimental', 'crecordtest', None)
100 oldwrite = setupwrapcolorwrite(ui)
100 oldwrite = setupwrapcolorwrite(ui)
101 try:
101 try:
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 testfile, operation)
103 testfile, operation)
104 finally:
104 finally:
105 ui.write = oldwrite
105 ui.write = oldwrite
106 return newchunks, newopts
106 return newchunks, newopts
107
107
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 filterfn, *pats, **opts):
109 filterfn, *pats, **opts):
110 from . import merge as mergemod
110 from . import merge as mergemod
111 if not ui.interactive():
111 if not ui.interactive():
112 if cmdsuggest:
112 if cmdsuggest:
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 else:
114 else:
115 msg = _('running non-interactively')
115 msg = _('running non-interactively')
116 raise error.Abort(msg)
116 raise error.Abort(msg)
117
117
118 # make sure username is set before going interactive
118 # make sure username is set before going interactive
119 if not opts.get('user'):
119 if not opts.get('user'):
120 ui.username() # raise exception, username not provided
120 ui.username() # raise exception, username not provided
121
121
122 def recordfunc(ui, repo, message, match, opts):
122 def recordfunc(ui, repo, message, match, opts):
123 """This is generic record driver.
123 """This is generic record driver.
124
124
125 Its job is to interactively filter local changes, and
125 Its job is to interactively filter local changes, and
126 accordingly prepare working directory into a state in which the
126 accordingly prepare working directory into a state in which the
127 job can be delegated to a non-interactive commit command such as
127 job can be delegated to a non-interactive commit command such as
128 'commit' or 'qrefresh'.
128 'commit' or 'qrefresh'.
129
129
130 After the actual job is done by non-interactive command, the
130 After the actual job is done by non-interactive command, the
131 working directory is restored to its original state.
131 working directory is restored to its original state.
132
132
133 In the end we'll record interesting changes, and everything else
133 In the end we'll record interesting changes, and everything else
134 will be left in place, so the user can continue working.
134 will be left in place, so the user can continue working.
135 """
135 """
136
136
137 checkunfinished(repo, commit=True)
137 checkunfinished(repo, commit=True)
138 wctx = repo[None]
138 wctx = repo[None]
139 merge = len(wctx.parents()) > 1
139 merge = len(wctx.parents()) > 1
140 if merge:
140 if merge:
141 raise error.Abort(_('cannot partially commit a merge '
141 raise error.Abort(_('cannot partially commit a merge '
142 '(use "hg commit" instead)'))
142 '(use "hg commit" instead)'))
143
143
144 def fail(f, msg):
144 def fail(f, msg):
145 raise error.Abort('%s: %s' % (f, msg))
145 raise error.Abort('%s: %s' % (f, msg))
146
146
147 force = opts.get('force')
147 force = opts.get('force')
148 if not force:
148 if not force:
149 vdirs = []
149 vdirs = []
150 match.explicitdir = vdirs.append
150 match.explicitdir = vdirs.append
151 match.bad = fail
151 match.bad = fail
152
152
153 status = repo.status(match=match)
153 status = repo.status(match=match)
154 if not force:
154 if not force:
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 diffopts.nodates = True
157 diffopts.nodates = True
158 diffopts.git = True
158 diffopts.git = True
159 diffopts.showfunc = True
159 diffopts.showfunc = True
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 originalchunks = patch.parsepatch(originaldiff)
161 originalchunks = patch.parsepatch(originaldiff)
162
162
163 # 1. filter patch, since we are intending to apply subset of it
163 # 1. filter patch, since we are intending to apply subset of it
164 try:
164 try:
165 chunks, newopts = filterfn(ui, originalchunks)
165 chunks, newopts = filterfn(ui, originalchunks)
166 except patch.PatchError as err:
166 except patch.PatchError as err:
167 raise error.Abort(_('error parsing patch: %s') % err)
167 raise error.Abort(_('error parsing patch: %s') % err)
168 opts.update(newopts)
168 opts.update(newopts)
169
169
170 # We need to keep a backup of files that have been newly added and
170 # We need to keep a backup of files that have been newly added and
171 # modified during the recording process because there is a previous
171 # modified during the recording process because there is a previous
172 # version without the edit in the workdir
172 # version without the edit in the workdir
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 contenders = set()
174 contenders = set()
175 for h in chunks:
175 for h in chunks:
176 try:
176 try:
177 contenders.update(set(h.files()))
177 contenders.update(set(h.files()))
178 except AttributeError:
178 except AttributeError:
179 pass
179 pass
180
180
181 changed = status.modified + status.added + status.removed
181 changed = status.modified + status.added + status.removed
182 newfiles = [f for f in changed if f in contenders]
182 newfiles = [f for f in changed if f in contenders]
183 if not newfiles:
183 if not newfiles:
184 ui.status(_('no changes to record\n'))
184 ui.status(_('no changes to record\n'))
185 return 0
185 return 0
186
186
187 modified = set(status.modified)
187 modified = set(status.modified)
188
188
189 # 2. backup changed files, so we can restore them in the end
189 # 2. backup changed files, so we can restore them in the end
190
190
191 if backupall:
191 if backupall:
192 tobackup = changed
192 tobackup = changed
193 else:
193 else:
194 tobackup = [f for f in newfiles if f in modified or f in \
194 tobackup = [f for f in newfiles if f in modified or f in \
195 newlyaddedandmodifiedfiles]
195 newlyaddedandmodifiedfiles]
196 backups = {}
196 backups = {}
197 if tobackup:
197 if tobackup:
198 backupdir = repo.join('record-backups')
198 backupdir = repo.join('record-backups')
199 try:
199 try:
200 os.mkdir(backupdir)
200 os.mkdir(backupdir)
201 except OSError as err:
201 except OSError as err:
202 if err.errno != errno.EEXIST:
202 if err.errno != errno.EEXIST:
203 raise
203 raise
204 try:
204 try:
205 # backup continues
205 # backup continues
206 for f in tobackup:
206 for f in tobackup:
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 dir=backupdir)
208 dir=backupdir)
209 os.close(fd)
209 os.close(fd)
210 ui.debug('backup %r as %r\n' % (f, tmpname))
210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 backups[f] = tmpname
212 backups[f] = tmpname
213
213
214 fp = stringio()
214 fp = stringio()
215 for c in chunks:
215 for c in chunks:
216 fname = c.filename()
216 fname = c.filename()
217 if fname in backups:
217 if fname in backups:
218 c.write(fp)
218 c.write(fp)
219 dopatch = fp.tell()
219 dopatch = fp.tell()
220 fp.seek(0)
220 fp.seek(0)
221
221
222 # 2.5 optionally review / modify patch in text editor
222 # 2.5 optionally review / modify patch in text editor
223 if opts.get('review', False):
223 if opts.get('review', False):
224 patchtext = (crecordmod.diffhelptext
224 patchtext = (crecordmod.diffhelptext
225 + crecordmod.patchhelptext
225 + crecordmod.patchhelptext
226 + fp.read())
226 + fp.read())
227 reviewedpatch = ui.edit(patchtext, "",
227 reviewedpatch = ui.edit(patchtext, "",
228 extra={"suffix": ".diff"})
228 extra={"suffix": ".diff"})
229 fp.truncate(0)
229 fp.truncate(0)
230 fp.write(reviewedpatch)
230 fp.write(reviewedpatch)
231 fp.seek(0)
231 fp.seek(0)
232
232
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 # 3a. apply filtered patch to clean repo (clean)
234 # 3a. apply filtered patch to clean repo (clean)
235 if backups:
235 if backups:
236 # Equivalent to hg.revert
236 # Equivalent to hg.revert
237 m = scmutil.matchfiles(repo, backups.keys())
237 m = scmutil.matchfiles(repo, backups.keys())
238 mergemod.update(repo, repo.dirstate.p1(),
238 mergemod.update(repo, repo.dirstate.p1(),
239 False, True, matcher=m)
239 False, True, matcher=m)
240
240
241 # 3b. (apply)
241 # 3b. (apply)
242 if dopatch:
242 if dopatch:
243 try:
243 try:
244 ui.debug('applying patch\n')
244 ui.debug('applying patch\n')
245 ui.debug(fp.getvalue())
245 ui.debug(fp.getvalue())
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 except patch.PatchError as err:
247 except patch.PatchError as err:
248 raise error.Abort(str(err))
248 raise error.Abort(str(err))
249 del fp
249 del fp
250
250
251 # 4. We prepared working directory according to filtered
251 # 4. We prepared working directory according to filtered
252 # patch. Now is the time to delegate the job to
252 # patch. Now is the time to delegate the job to
253 # commit/qrefresh or the like!
253 # commit/qrefresh or the like!
254
254
255 # Make all of the pathnames absolute.
255 # Make all of the pathnames absolute.
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 return commitfunc(ui, repo, *newfiles, **opts)
257 return commitfunc(ui, repo, *newfiles, **opts)
258 finally:
258 finally:
259 # 5. finally restore backed-up files
259 # 5. finally restore backed-up files
260 try:
260 try:
261 dirstate = repo.dirstate
261 dirstate = repo.dirstate
262 for realname, tmpname in backups.iteritems():
262 for realname, tmpname in backups.iteritems():
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264
264
265 if dirstate[realname] == 'n':
265 if dirstate[realname] == 'n':
266 # without normallookup, restoring timestamp
266 # without normallookup, restoring timestamp
267 # may cause partially committed files
267 # may cause partially committed files
268 # to be treated as unmodified
268 # to be treated as unmodified
269 dirstate.normallookup(realname)
269 dirstate.normallookup(realname)
270
270
271 # copystat=True here and above are a hack to trick any
271 # copystat=True here and above are a hack to trick any
272 # editors that have f open that we haven't modified them.
272 # editors that have f open that we haven't modified them.
273 #
273 #
274 # Also note that this racy as an editor could notice the
274 # Also note that this racy as an editor could notice the
275 # file's mtime before we've finished writing it.
275 # file's mtime before we've finished writing it.
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 os.unlink(tmpname)
277 os.unlink(tmpname)
278 if tobackup:
278 if tobackup:
279 os.rmdir(backupdir)
279 os.rmdir(backupdir)
280 except OSError:
280 except OSError:
281 pass
281 pass
282
282
283 def recordinwlock(ui, repo, message, match, opts):
283 def recordinwlock(ui, repo, message, match, opts):
284 with repo.wlock():
284 with repo.wlock():
285 return recordfunc(ui, repo, message, match, opts)
285 return recordfunc(ui, repo, message, match, opts)
286
286
287 return commit(ui, repo, recordinwlock, pats, opts)
287 return commit(ui, repo, recordinwlock, pats, opts)
288
288
289 def findpossible(cmd, table, strict=False):
289 def findpossible(cmd, table, strict=False):
290 """
290 """
291 Return cmd -> (aliases, command table entry)
291 Return cmd -> (aliases, command table entry)
292 for each matching command.
292 for each matching command.
293 Return debug commands (or their aliases) only if no normal command matches.
293 Return debug commands (or their aliases) only if no normal command matches.
294 """
294 """
295 choice = {}
295 choice = {}
296 debugchoice = {}
296 debugchoice = {}
297
297
298 if cmd in table:
298 if cmd in table:
299 # short-circuit exact matches, "log" alias beats "^log|history"
299 # short-circuit exact matches, "log" alias beats "^log|history"
300 keys = [cmd]
300 keys = [cmd]
301 else:
301 else:
302 keys = table.keys()
302 keys = table.keys()
303
303
304 allcmds = []
304 allcmds = []
305 for e in keys:
305 for e in keys:
306 aliases = parsealiases(e)
306 aliases = parsealiases(e)
307 allcmds.extend(aliases)
307 allcmds.extend(aliases)
308 found = None
308 found = None
309 if cmd in aliases:
309 if cmd in aliases:
310 found = cmd
310 found = cmd
311 elif not strict:
311 elif not strict:
312 for a in aliases:
312 for a in aliases:
313 if a.startswith(cmd):
313 if a.startswith(cmd):
314 found = a
314 found = a
315 break
315 break
316 if found is not None:
316 if found is not None:
317 if aliases[0].startswith("debug") or found.startswith("debug"):
317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 debugchoice[found] = (aliases, table[e])
318 debugchoice[found] = (aliases, table[e])
319 else:
319 else:
320 choice[found] = (aliases, table[e])
320 choice[found] = (aliases, table[e])
321
321
322 if not choice and debugchoice:
322 if not choice and debugchoice:
323 choice = debugchoice
323 choice = debugchoice
324
324
325 return choice, allcmds
325 return choice, allcmds
326
326
327 def findcmd(cmd, table, strict=True):
327 def findcmd(cmd, table, strict=True):
328 """Return (aliases, command table entry) for command string."""
328 """Return (aliases, command table entry) for command string."""
329 choice, allcmds = findpossible(cmd, table, strict)
329 choice, allcmds = findpossible(cmd, table, strict)
330
330
331 if cmd in choice:
331 if cmd in choice:
332 return choice[cmd]
332 return choice[cmd]
333
333
334 if len(choice) > 1:
334 if len(choice) > 1:
335 clist = choice.keys()
335 clist = choice.keys()
336 clist.sort()
336 clist.sort()
337 raise error.AmbiguousCommand(cmd, clist)
337 raise error.AmbiguousCommand(cmd, clist)
338
338
339 if choice:
339 if choice:
340 return choice.values()[0]
340 return choice.values()[0]
341
341
342 raise error.UnknownCommand(cmd, allcmds)
342 raise error.UnknownCommand(cmd, allcmds)
343
343
344 def findrepo(p):
344 def findrepo(p):
345 while not os.path.isdir(os.path.join(p, ".hg")):
345 while not os.path.isdir(os.path.join(p, ".hg")):
346 oldp, p = p, os.path.dirname(p)
346 oldp, p = p, os.path.dirname(p)
347 if p == oldp:
347 if p == oldp:
348 return None
348 return None
349
349
350 return p
350 return p
351
351
352 def bailifchanged(repo, merge=True):
352 def bailifchanged(repo, merge=True):
353 if merge and repo.dirstate.p2() != nullid:
353 if merge and repo.dirstate.p2() != nullid:
354 raise error.Abort(_('outstanding uncommitted merge'))
354 raise error.Abort(_('outstanding uncommitted merge'))
355 modified, added, removed, deleted = repo.status()[:4]
355 modified, added, removed, deleted = repo.status()[:4]
356 if modified or added or removed or deleted:
356 if modified or added or removed or deleted:
357 raise error.Abort(_('uncommitted changes'))
357 raise error.Abort(_('uncommitted changes'))
358 ctx = repo[None]
358 ctx = repo[None]
359 for s in sorted(ctx.substate):
359 for s in sorted(ctx.substate):
360 ctx.sub(s).bailifchanged()
360 ctx.sub(s).bailifchanged()
361
361
362 def logmessage(ui, opts):
362 def logmessage(ui, opts):
363 """ get the log message according to -m and -l option """
363 """ get the log message according to -m and -l option """
364 message = opts.get('message')
364 message = opts.get('message')
365 logfile = opts.get('logfile')
365 logfile = opts.get('logfile')
366
366
367 if message and logfile:
367 if message and logfile:
368 raise error.Abort(_('options --message and --logfile are mutually '
368 raise error.Abort(_('options --message and --logfile are mutually '
369 'exclusive'))
369 'exclusive'))
370 if not message and logfile:
370 if not message and logfile:
371 try:
371 try:
372 if logfile == '-':
372 if logfile == '-':
373 message = ui.fin.read()
373 message = ui.fin.read()
374 else:
374 else:
375 message = '\n'.join(util.readfile(logfile).splitlines())
375 message = '\n'.join(util.readfile(logfile).splitlines())
376 except IOError as inst:
376 except IOError as inst:
377 raise error.Abort(_("can't read commit message '%s': %s") %
377 raise error.Abort(_("can't read commit message '%s': %s") %
378 (logfile, inst.strerror))
378 (logfile, inst.strerror))
379 return message
379 return message
380
380
381 def mergeeditform(ctxorbool, baseformname):
381 def mergeeditform(ctxorbool, baseformname):
382 """return appropriate editform name (referencing a committemplate)
382 """return appropriate editform name (referencing a committemplate)
383
383
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 merging is committed.
385 merging is committed.
386
386
387 This returns baseformname with '.merge' appended if it is a merge,
387 This returns baseformname with '.merge' appended if it is a merge,
388 otherwise '.normal' is appended.
388 otherwise '.normal' is appended.
389 """
389 """
390 if isinstance(ctxorbool, bool):
390 if isinstance(ctxorbool, bool):
391 if ctxorbool:
391 if ctxorbool:
392 return baseformname + ".merge"
392 return baseformname + ".merge"
393 elif 1 < len(ctxorbool.parents()):
393 elif 1 < len(ctxorbool.parents()):
394 return baseformname + ".merge"
394 return baseformname + ".merge"
395
395
396 return baseformname + ".normal"
396 return baseformname + ".normal"
397
397
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 editform='', **opts):
399 editform='', **opts):
400 """get appropriate commit message editor according to '--edit' option
400 """get appropriate commit message editor according to '--edit' option
401
401
402 'finishdesc' is a function to be called with edited commit message
402 'finishdesc' is a function to be called with edited commit message
403 (= 'description' of the new changeset) just after editing, but
403 (= 'description' of the new changeset) just after editing, but
404 before checking empty-ness. It should return actual text to be
404 before checking empty-ness. It should return actual text to be
405 stored into history. This allows to change description before
405 stored into history. This allows to change description before
406 storing.
406 storing.
407
407
408 'extramsg' is a extra message to be shown in the editor instead of
408 'extramsg' is a extra message to be shown in the editor instead of
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 is automatically added.
410 is automatically added.
411
411
412 'editform' is a dot-separated list of names, to distinguish
412 'editform' is a dot-separated list of names, to distinguish
413 the purpose of commit text editing.
413 the purpose of commit text editing.
414
414
415 'getcommiteditor' returns 'commitforceeditor' regardless of
415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 they are specific for usage in MQ.
417 they are specific for usage in MQ.
418 """
418 """
419 if edit or finishdesc or extramsg:
419 if edit or finishdesc or extramsg:
420 return lambda r, c, s: commitforceeditor(r, c, s,
420 return lambda r, c, s: commitforceeditor(r, c, s,
421 finishdesc=finishdesc,
421 finishdesc=finishdesc,
422 extramsg=extramsg,
422 extramsg=extramsg,
423 editform=editform)
423 editform=editform)
424 elif editform:
424 elif editform:
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 else:
426 else:
427 return commiteditor
427 return commiteditor
428
428
429 def loglimit(opts):
429 def loglimit(opts):
430 """get the log limit according to option -l/--limit"""
430 """get the log limit according to option -l/--limit"""
431 limit = opts.get('limit')
431 limit = opts.get('limit')
432 if limit:
432 if limit:
433 try:
433 try:
434 limit = int(limit)
434 limit = int(limit)
435 except ValueError:
435 except ValueError:
436 raise error.Abort(_('limit must be a positive integer'))
436 raise error.Abort(_('limit must be a positive integer'))
437 if limit <= 0:
437 if limit <= 0:
438 raise error.Abort(_('limit must be positive'))
438 raise error.Abort(_('limit must be positive'))
439 else:
439 else:
440 limit = None
440 limit = None
441 return limit
441 return limit
442
442
443 def makefilename(repo, pat, node, desc=None,
443 def makefilename(repo, pat, node, desc=None,
444 total=None, seqno=None, revwidth=None, pathname=None):
444 total=None, seqno=None, revwidth=None, pathname=None):
445 node_expander = {
445 node_expander = {
446 'H': lambda: hex(node),
446 'H': lambda: hex(node),
447 'R': lambda: str(repo.changelog.rev(node)),
447 'R': lambda: str(repo.changelog.rev(node)),
448 'h': lambda: short(node),
448 'h': lambda: short(node),
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 }
450 }
451 expander = {
451 expander = {
452 '%': lambda: '%',
452 '%': lambda: '%',
453 'b': lambda: os.path.basename(repo.root),
453 'b': lambda: os.path.basename(repo.root),
454 }
454 }
455
455
456 try:
456 try:
457 if node:
457 if node:
458 expander.update(node_expander)
458 expander.update(node_expander)
459 if node:
459 if node:
460 expander['r'] = (lambda:
460 expander['r'] = (lambda:
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 if total is not None:
462 if total is not None:
463 expander['N'] = lambda: str(total)
463 expander['N'] = lambda: str(total)
464 if seqno is not None:
464 if seqno is not None:
465 expander['n'] = lambda: str(seqno)
465 expander['n'] = lambda: str(seqno)
466 if total is not None and seqno is not None:
466 if total is not None and seqno is not None:
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 if pathname is not None:
468 if pathname is not None:
469 expander['s'] = lambda: os.path.basename(pathname)
469 expander['s'] = lambda: os.path.basename(pathname)
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 expander['p'] = lambda: pathname
471 expander['p'] = lambda: pathname
472
472
473 newname = []
473 newname = []
474 patlen = len(pat)
474 patlen = len(pat)
475 i = 0
475 i = 0
476 while i < patlen:
476 while i < patlen:
477 c = pat[i]
477 c = pat[i]
478 if c == '%':
478 if c == '%':
479 i += 1
479 i += 1
480 c = pat[i]
480 c = pat[i]
481 c = expander[c]()
481 c = expander[c]()
482 newname.append(c)
482 newname.append(c)
483 i += 1
483 i += 1
484 return ''.join(newname)
484 return ''.join(newname)
485 except KeyError as inst:
485 except KeyError as inst:
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 inst.args[0])
487 inst.args[0])
488
488
489 class _unclosablefile(object):
489 class _unclosablefile(object):
490 def __init__(self, fp):
490 def __init__(self, fp):
491 self._fp = fp
491 self._fp = fp
492
492
493 def close(self):
493 def close(self):
494 pass
494 pass
495
495
496 def __iter__(self):
496 def __iter__(self):
497 return iter(self._fp)
497 return iter(self._fp)
498
498
499 def __getattr__(self, attr):
499 def __getattr__(self, attr):
500 return getattr(self._fp, attr)
500 return getattr(self._fp, attr)
501
501
502 def __enter__(self):
502 def __enter__(self):
503 return self
503 return self
504
504
505 def __exit__(self, exc_type, exc_value, exc_tb):
505 def __exit__(self, exc_type, exc_value, exc_tb):
506 pass
506 pass
507
507
508 def makefileobj(repo, pat, node=None, desc=None, total=None,
508 def makefileobj(repo, pat, node=None, desc=None, total=None,
509 seqno=None, revwidth=None, mode='wb', modemap=None,
509 seqno=None, revwidth=None, mode='wb', modemap=None,
510 pathname=None):
510 pathname=None):
511
511
512 writable = mode not in ('r', 'rb')
512 writable = mode not in ('r', 'rb')
513
513
514 if not pat or pat == '-':
514 if not pat or pat == '-':
515 if writable:
515 if writable:
516 fp = repo.ui.fout
516 fp = repo.ui.fout
517 else:
517 else:
518 fp = repo.ui.fin
518 fp = repo.ui.fin
519 return _unclosablefile(fp)
519 return _unclosablefile(fp)
520 if util.safehasattr(pat, 'write') and writable:
520 if util.safehasattr(pat, 'write') and writable:
521 return pat
521 return pat
522 if util.safehasattr(pat, 'read') and 'r' in mode:
522 if util.safehasattr(pat, 'read') and 'r' in mode:
523 return pat
523 return pat
524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
525 if modemap is not None:
525 if modemap is not None:
526 mode = modemap.get(fn, mode)
526 mode = modemap.get(fn, mode)
527 if mode == 'wb':
527 if mode == 'wb':
528 modemap[fn] = 'ab'
528 modemap[fn] = 'ab'
529 return open(fn, mode)
529 return open(fn, mode)
530
530
531 def openrevlog(repo, cmd, file_, opts):
531 def openrevlog(repo, cmd, file_, opts):
532 """opens the changelog, manifest, a filelog or a given revlog"""
532 """opens the changelog, manifest, a filelog or a given revlog"""
533 cl = opts['changelog']
533 cl = opts['changelog']
534 mf = opts['manifest']
534 mf = opts['manifest']
535 dir = opts['dir']
535 dir = opts['dir']
536 msg = None
536 msg = None
537 if cl and mf:
537 if cl and mf:
538 msg = _('cannot specify --changelog and --manifest at the same time')
538 msg = _('cannot specify --changelog and --manifest at the same time')
539 elif cl and dir:
539 elif cl and dir:
540 msg = _('cannot specify --changelog and --dir at the same time')
540 msg = _('cannot specify --changelog and --dir at the same time')
541 elif cl or mf or dir:
541 elif cl or mf or dir:
542 if file_:
542 if file_:
543 msg = _('cannot specify filename with --changelog or --manifest')
543 msg = _('cannot specify filename with --changelog or --manifest')
544 elif not repo:
544 elif not repo:
545 msg = _('cannot specify --changelog or --manifest or --dir '
545 msg = _('cannot specify --changelog or --manifest or --dir '
546 'without a repository')
546 'without a repository')
547 if msg:
547 if msg:
548 raise error.Abort(msg)
548 raise error.Abort(msg)
549
549
550 r = None
550 r = None
551 if repo:
551 if repo:
552 if cl:
552 if cl:
553 r = repo.unfiltered().changelog
553 r = repo.unfiltered().changelog
554 elif dir:
554 elif dir:
555 if 'treemanifest' not in repo.requirements:
555 if 'treemanifest' not in repo.requirements:
556 raise error.Abort(_("--dir can only be used on repos with "
556 raise error.Abort(_("--dir can only be used on repos with "
557 "treemanifest enabled"))
557 "treemanifest enabled"))
558 dirlog = repo.manifest.dirlog(dir)
558 dirlog = repo.manifestlog._revlog.dirlog(dir)
559 if len(dirlog):
559 if len(dirlog):
560 r = dirlog
560 r = dirlog
561 elif mf:
561 elif mf:
562 r = repo.manifest
562 r = repo.manifest
563 elif file_:
563 elif file_:
564 filelog = repo.file(file_)
564 filelog = repo.file(file_)
565 if len(filelog):
565 if len(filelog):
566 r = filelog
566 r = filelog
567 if not r:
567 if not r:
568 if not file_:
568 if not file_:
569 raise error.CommandError(cmd, _('invalid arguments'))
569 raise error.CommandError(cmd, _('invalid arguments'))
570 if not os.path.isfile(file_):
570 if not os.path.isfile(file_):
571 raise error.Abort(_("revlog '%s' not found") % file_)
571 raise error.Abort(_("revlog '%s' not found") % file_)
572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
573 file_[:-2] + ".i")
573 file_[:-2] + ".i")
574 return r
574 return r
575
575
576 def copy(ui, repo, pats, opts, rename=False):
576 def copy(ui, repo, pats, opts, rename=False):
577 # called with the repo lock held
577 # called with the repo lock held
578 #
578 #
579 # hgsep => pathname that uses "/" to separate directories
579 # hgsep => pathname that uses "/" to separate directories
580 # ossep => pathname that uses os.sep to separate directories
580 # ossep => pathname that uses os.sep to separate directories
581 cwd = repo.getcwd()
581 cwd = repo.getcwd()
582 targets = {}
582 targets = {}
583 after = opts.get("after")
583 after = opts.get("after")
584 dryrun = opts.get("dry_run")
584 dryrun = opts.get("dry_run")
585 wctx = repo[None]
585 wctx = repo[None]
586
586
587 def walkpat(pat):
587 def walkpat(pat):
588 srcs = []
588 srcs = []
589 if after:
589 if after:
590 badstates = '?'
590 badstates = '?'
591 else:
591 else:
592 badstates = '?r'
592 badstates = '?r'
593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
594 for abs in repo.walk(m):
594 for abs in repo.walk(m):
595 state = repo.dirstate[abs]
595 state = repo.dirstate[abs]
596 rel = m.rel(abs)
596 rel = m.rel(abs)
597 exact = m.exact(abs)
597 exact = m.exact(abs)
598 if state in badstates:
598 if state in badstates:
599 if exact and state == '?':
599 if exact and state == '?':
600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
601 if exact and state == 'r':
601 if exact and state == 'r':
602 ui.warn(_('%s: not copying - file has been marked for'
602 ui.warn(_('%s: not copying - file has been marked for'
603 ' remove\n') % rel)
603 ' remove\n') % rel)
604 continue
604 continue
605 # abs: hgsep
605 # abs: hgsep
606 # rel: ossep
606 # rel: ossep
607 srcs.append((abs, rel, exact))
607 srcs.append((abs, rel, exact))
608 return srcs
608 return srcs
609
609
610 # abssrc: hgsep
610 # abssrc: hgsep
611 # relsrc: ossep
611 # relsrc: ossep
612 # otarget: ossep
612 # otarget: ossep
613 def copyfile(abssrc, relsrc, otarget, exact):
613 def copyfile(abssrc, relsrc, otarget, exact):
614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
615 if '/' in abstarget:
615 if '/' in abstarget:
616 # We cannot normalize abstarget itself, this would prevent
616 # We cannot normalize abstarget itself, this would prevent
617 # case only renames, like a => A.
617 # case only renames, like a => A.
618 abspath, absname = abstarget.rsplit('/', 1)
618 abspath, absname = abstarget.rsplit('/', 1)
619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
620 reltarget = repo.pathto(abstarget, cwd)
620 reltarget = repo.pathto(abstarget, cwd)
621 target = repo.wjoin(abstarget)
621 target = repo.wjoin(abstarget)
622 src = repo.wjoin(abssrc)
622 src = repo.wjoin(abssrc)
623 state = repo.dirstate[abstarget]
623 state = repo.dirstate[abstarget]
624
624
625 scmutil.checkportable(ui, abstarget)
625 scmutil.checkportable(ui, abstarget)
626
626
627 # check for collisions
627 # check for collisions
628 prevsrc = targets.get(abstarget)
628 prevsrc = targets.get(abstarget)
629 if prevsrc is not None:
629 if prevsrc is not None:
630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
631 (reltarget, repo.pathto(abssrc, cwd),
631 (reltarget, repo.pathto(abssrc, cwd),
632 repo.pathto(prevsrc, cwd)))
632 repo.pathto(prevsrc, cwd)))
633 return
633 return
634
634
635 # check for overwrites
635 # check for overwrites
636 exists = os.path.lexists(target)
636 exists = os.path.lexists(target)
637 samefile = False
637 samefile = False
638 if exists and abssrc != abstarget:
638 if exists and abssrc != abstarget:
639 if (repo.dirstate.normalize(abssrc) ==
639 if (repo.dirstate.normalize(abssrc) ==
640 repo.dirstate.normalize(abstarget)):
640 repo.dirstate.normalize(abstarget)):
641 if not rename:
641 if not rename:
642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
643 return
643 return
644 exists = False
644 exists = False
645 samefile = True
645 samefile = True
646
646
647 if not after and exists or after and state in 'mn':
647 if not after and exists or after and state in 'mn':
648 if not opts['force']:
648 if not opts['force']:
649 if state in 'mn':
649 if state in 'mn':
650 msg = _('%s: not overwriting - file already committed\n')
650 msg = _('%s: not overwriting - file already committed\n')
651 if after:
651 if after:
652 flags = '--after --force'
652 flags = '--after --force'
653 else:
653 else:
654 flags = '--force'
654 flags = '--force'
655 if rename:
655 if rename:
656 hint = _('(hg rename %s to replace the file by '
656 hint = _('(hg rename %s to replace the file by '
657 'recording a rename)\n') % flags
657 'recording a rename)\n') % flags
658 else:
658 else:
659 hint = _('(hg copy %s to replace the file by '
659 hint = _('(hg copy %s to replace the file by '
660 'recording a copy)\n') % flags
660 'recording a copy)\n') % flags
661 else:
661 else:
662 msg = _('%s: not overwriting - file exists\n')
662 msg = _('%s: not overwriting - file exists\n')
663 if rename:
663 if rename:
664 hint = _('(hg rename --after to record the rename)\n')
664 hint = _('(hg rename --after to record the rename)\n')
665 else:
665 else:
666 hint = _('(hg copy --after to record the copy)\n')
666 hint = _('(hg copy --after to record the copy)\n')
667 ui.warn(msg % reltarget)
667 ui.warn(msg % reltarget)
668 ui.warn(hint)
668 ui.warn(hint)
669 return
669 return
670
670
671 if after:
671 if after:
672 if not exists:
672 if not exists:
673 if rename:
673 if rename:
674 ui.warn(_('%s: not recording move - %s does not exist\n') %
674 ui.warn(_('%s: not recording move - %s does not exist\n') %
675 (relsrc, reltarget))
675 (relsrc, reltarget))
676 else:
676 else:
677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
678 (relsrc, reltarget))
678 (relsrc, reltarget))
679 return
679 return
680 elif not dryrun:
680 elif not dryrun:
681 try:
681 try:
682 if exists:
682 if exists:
683 os.unlink(target)
683 os.unlink(target)
684 targetdir = os.path.dirname(target) or '.'
684 targetdir = os.path.dirname(target) or '.'
685 if not os.path.isdir(targetdir):
685 if not os.path.isdir(targetdir):
686 os.makedirs(targetdir)
686 os.makedirs(targetdir)
687 if samefile:
687 if samefile:
688 tmp = target + "~hgrename"
688 tmp = target + "~hgrename"
689 os.rename(src, tmp)
689 os.rename(src, tmp)
690 os.rename(tmp, target)
690 os.rename(tmp, target)
691 else:
691 else:
692 util.copyfile(src, target)
692 util.copyfile(src, target)
693 srcexists = True
693 srcexists = True
694 except IOError as inst:
694 except IOError as inst:
695 if inst.errno == errno.ENOENT:
695 if inst.errno == errno.ENOENT:
696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
697 srcexists = False
697 srcexists = False
698 else:
698 else:
699 ui.warn(_('%s: cannot copy - %s\n') %
699 ui.warn(_('%s: cannot copy - %s\n') %
700 (relsrc, inst.strerror))
700 (relsrc, inst.strerror))
701 return True # report a failure
701 return True # report a failure
702
702
703 if ui.verbose or not exact:
703 if ui.verbose or not exact:
704 if rename:
704 if rename:
705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
706 else:
706 else:
707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
708
708
709 targets[abstarget] = abssrc
709 targets[abstarget] = abssrc
710
710
711 # fix up dirstate
711 # fix up dirstate
712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
713 dryrun=dryrun, cwd=cwd)
713 dryrun=dryrun, cwd=cwd)
714 if rename and not dryrun:
714 if rename and not dryrun:
715 if not after and srcexists and not samefile:
715 if not after and srcexists and not samefile:
716 util.unlinkpath(repo.wjoin(abssrc))
716 util.unlinkpath(repo.wjoin(abssrc))
717 wctx.forget([abssrc])
717 wctx.forget([abssrc])
718
718
719 # pat: ossep
719 # pat: ossep
720 # dest ossep
720 # dest ossep
721 # srcs: list of (hgsep, hgsep, ossep, bool)
721 # srcs: list of (hgsep, hgsep, ossep, bool)
722 # return: function that takes hgsep and returns ossep
722 # return: function that takes hgsep and returns ossep
723 def targetpathfn(pat, dest, srcs):
723 def targetpathfn(pat, dest, srcs):
724 if os.path.isdir(pat):
724 if os.path.isdir(pat):
725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
726 abspfx = util.localpath(abspfx)
726 abspfx = util.localpath(abspfx)
727 if destdirexists:
727 if destdirexists:
728 striplen = len(os.path.split(abspfx)[0])
728 striplen = len(os.path.split(abspfx)[0])
729 else:
729 else:
730 striplen = len(abspfx)
730 striplen = len(abspfx)
731 if striplen:
731 if striplen:
732 striplen += len(os.sep)
732 striplen += len(os.sep)
733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
734 elif destdirexists:
734 elif destdirexists:
735 res = lambda p: os.path.join(dest,
735 res = lambda p: os.path.join(dest,
736 os.path.basename(util.localpath(p)))
736 os.path.basename(util.localpath(p)))
737 else:
737 else:
738 res = lambda p: dest
738 res = lambda p: dest
739 return res
739 return res
740
740
741 # pat: ossep
741 # pat: ossep
742 # dest ossep
742 # dest ossep
743 # srcs: list of (hgsep, hgsep, ossep, bool)
743 # srcs: list of (hgsep, hgsep, ossep, bool)
744 # return: function that takes hgsep and returns ossep
744 # return: function that takes hgsep and returns ossep
745 def targetpathafterfn(pat, dest, srcs):
745 def targetpathafterfn(pat, dest, srcs):
746 if matchmod.patkind(pat):
746 if matchmod.patkind(pat):
747 # a mercurial pattern
747 # a mercurial pattern
748 res = lambda p: os.path.join(dest,
748 res = lambda p: os.path.join(dest,
749 os.path.basename(util.localpath(p)))
749 os.path.basename(util.localpath(p)))
750 else:
750 else:
751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
752 if len(abspfx) < len(srcs[0][0]):
752 if len(abspfx) < len(srcs[0][0]):
753 # A directory. Either the target path contains the last
753 # A directory. Either the target path contains the last
754 # component of the source path or it does not.
754 # component of the source path or it does not.
755 def evalpath(striplen):
755 def evalpath(striplen):
756 score = 0
756 score = 0
757 for s in srcs:
757 for s in srcs:
758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
759 if os.path.lexists(t):
759 if os.path.lexists(t):
760 score += 1
760 score += 1
761 return score
761 return score
762
762
763 abspfx = util.localpath(abspfx)
763 abspfx = util.localpath(abspfx)
764 striplen = len(abspfx)
764 striplen = len(abspfx)
765 if striplen:
765 if striplen:
766 striplen += len(os.sep)
766 striplen += len(os.sep)
767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
768 score = evalpath(striplen)
768 score = evalpath(striplen)
769 striplen1 = len(os.path.split(abspfx)[0])
769 striplen1 = len(os.path.split(abspfx)[0])
770 if striplen1:
770 if striplen1:
771 striplen1 += len(os.sep)
771 striplen1 += len(os.sep)
772 if evalpath(striplen1) > score:
772 if evalpath(striplen1) > score:
773 striplen = striplen1
773 striplen = striplen1
774 res = lambda p: os.path.join(dest,
774 res = lambda p: os.path.join(dest,
775 util.localpath(p)[striplen:])
775 util.localpath(p)[striplen:])
776 else:
776 else:
777 # a file
777 # a file
778 if destdirexists:
778 if destdirexists:
779 res = lambda p: os.path.join(dest,
779 res = lambda p: os.path.join(dest,
780 os.path.basename(util.localpath(p)))
780 os.path.basename(util.localpath(p)))
781 else:
781 else:
782 res = lambda p: dest
782 res = lambda p: dest
783 return res
783 return res
784
784
785 pats = scmutil.expandpats(pats)
785 pats = scmutil.expandpats(pats)
786 if not pats:
786 if not pats:
787 raise error.Abort(_('no source or destination specified'))
787 raise error.Abort(_('no source or destination specified'))
788 if len(pats) == 1:
788 if len(pats) == 1:
789 raise error.Abort(_('no destination specified'))
789 raise error.Abort(_('no destination specified'))
790 dest = pats.pop()
790 dest = pats.pop()
791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
792 if not destdirexists:
792 if not destdirexists:
793 if len(pats) > 1 or matchmod.patkind(pats[0]):
793 if len(pats) > 1 or matchmod.patkind(pats[0]):
794 raise error.Abort(_('with multiple sources, destination must be an '
794 raise error.Abort(_('with multiple sources, destination must be an '
795 'existing directory'))
795 'existing directory'))
796 if util.endswithsep(dest):
796 if util.endswithsep(dest):
797 raise error.Abort(_('destination %s is not a directory') % dest)
797 raise error.Abort(_('destination %s is not a directory') % dest)
798
798
799 tfn = targetpathfn
799 tfn = targetpathfn
800 if after:
800 if after:
801 tfn = targetpathafterfn
801 tfn = targetpathafterfn
802 copylist = []
802 copylist = []
803 for pat in pats:
803 for pat in pats:
804 srcs = walkpat(pat)
804 srcs = walkpat(pat)
805 if not srcs:
805 if not srcs:
806 continue
806 continue
807 copylist.append((tfn(pat, dest, srcs), srcs))
807 copylist.append((tfn(pat, dest, srcs), srcs))
808 if not copylist:
808 if not copylist:
809 raise error.Abort(_('no files to copy'))
809 raise error.Abort(_('no files to copy'))
810
810
811 errors = 0
811 errors = 0
812 for targetpath, srcs in copylist:
812 for targetpath, srcs in copylist:
813 for abssrc, relsrc, exact in srcs:
813 for abssrc, relsrc, exact in srcs:
814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
815 errors += 1
815 errors += 1
816
816
817 if errors:
817 if errors:
818 ui.warn(_('(consider using --after)\n'))
818 ui.warn(_('(consider using --after)\n'))
819
819
820 return errors != 0
820 return errors != 0
821
821
822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
823 runargs=None, appendpid=False):
823 runargs=None, appendpid=False):
824 '''Run a command as a service.'''
824 '''Run a command as a service.'''
825
825
826 def writepid(pid):
826 def writepid(pid):
827 if opts['pid_file']:
827 if opts['pid_file']:
828 if appendpid:
828 if appendpid:
829 mode = 'a'
829 mode = 'a'
830 else:
830 else:
831 mode = 'w'
831 mode = 'w'
832 fp = open(opts['pid_file'], mode)
832 fp = open(opts['pid_file'], mode)
833 fp.write(str(pid) + '\n')
833 fp.write(str(pid) + '\n')
834 fp.close()
834 fp.close()
835
835
836 if opts['daemon'] and not opts['daemon_postexec']:
836 if opts['daemon'] and not opts['daemon_postexec']:
837 # Signal child process startup with file removal
837 # Signal child process startup with file removal
838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
839 os.close(lockfd)
839 os.close(lockfd)
840 try:
840 try:
841 if not runargs:
841 if not runargs:
842 runargs = util.hgcmd() + sys.argv[1:]
842 runargs = util.hgcmd() + sys.argv[1:]
843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
844 # Don't pass --cwd to the child process, because we've already
844 # Don't pass --cwd to the child process, because we've already
845 # changed directory.
845 # changed directory.
846 for i in xrange(1, len(runargs)):
846 for i in xrange(1, len(runargs)):
847 if runargs[i].startswith('--cwd='):
847 if runargs[i].startswith('--cwd='):
848 del runargs[i]
848 del runargs[i]
849 break
849 break
850 elif runargs[i].startswith('--cwd'):
850 elif runargs[i].startswith('--cwd'):
851 del runargs[i:i + 2]
851 del runargs[i:i + 2]
852 break
852 break
853 def condfn():
853 def condfn():
854 return not os.path.exists(lockpath)
854 return not os.path.exists(lockpath)
855 pid = util.rundetached(runargs, condfn)
855 pid = util.rundetached(runargs, condfn)
856 if pid < 0:
856 if pid < 0:
857 raise error.Abort(_('child process failed to start'))
857 raise error.Abort(_('child process failed to start'))
858 writepid(pid)
858 writepid(pid)
859 finally:
859 finally:
860 try:
860 try:
861 os.unlink(lockpath)
861 os.unlink(lockpath)
862 except OSError as e:
862 except OSError as e:
863 if e.errno != errno.ENOENT:
863 if e.errno != errno.ENOENT:
864 raise
864 raise
865 if parentfn:
865 if parentfn:
866 return parentfn(pid)
866 return parentfn(pid)
867 else:
867 else:
868 return
868 return
869
869
870 if initfn:
870 if initfn:
871 initfn()
871 initfn()
872
872
873 if not opts['daemon']:
873 if not opts['daemon']:
874 writepid(util.getpid())
874 writepid(util.getpid())
875
875
876 if opts['daemon_postexec']:
876 if opts['daemon_postexec']:
877 try:
877 try:
878 os.setsid()
878 os.setsid()
879 except AttributeError:
879 except AttributeError:
880 pass
880 pass
881 for inst in opts['daemon_postexec']:
881 for inst in opts['daemon_postexec']:
882 if inst.startswith('unlink:'):
882 if inst.startswith('unlink:'):
883 lockpath = inst[7:]
883 lockpath = inst[7:]
884 os.unlink(lockpath)
884 os.unlink(lockpath)
885 elif inst.startswith('chdir:'):
885 elif inst.startswith('chdir:'):
886 os.chdir(inst[6:])
886 os.chdir(inst[6:])
887 elif inst != 'none':
887 elif inst != 'none':
888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
889 % inst)
889 % inst)
890 util.hidewindow()
890 util.hidewindow()
891 sys.stdout.flush()
891 sys.stdout.flush()
892 sys.stderr.flush()
892 sys.stderr.flush()
893
893
894 nullfd = os.open(os.devnull, os.O_RDWR)
894 nullfd = os.open(os.devnull, os.O_RDWR)
895 logfilefd = nullfd
895 logfilefd = nullfd
896 if logfile:
896 if logfile:
897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
898 os.dup2(nullfd, 0)
898 os.dup2(nullfd, 0)
899 os.dup2(logfilefd, 1)
899 os.dup2(logfilefd, 1)
900 os.dup2(logfilefd, 2)
900 os.dup2(logfilefd, 2)
901 if nullfd not in (0, 1, 2):
901 if nullfd not in (0, 1, 2):
902 os.close(nullfd)
902 os.close(nullfd)
903 if logfile and logfilefd not in (0, 1, 2):
903 if logfile and logfilefd not in (0, 1, 2):
904 os.close(logfilefd)
904 os.close(logfilefd)
905
905
906 if runfn:
906 if runfn:
907 return runfn()
907 return runfn()
908
908
909 ## facility to let extension process additional data into an import patch
909 ## facility to let extension process additional data into an import patch
910 # list of identifier to be executed in order
910 # list of identifier to be executed in order
911 extrapreimport = [] # run before commit
911 extrapreimport = [] # run before commit
912 extrapostimport = [] # run after commit
912 extrapostimport = [] # run after commit
913 # mapping from identifier to actual import function
913 # mapping from identifier to actual import function
914 #
914 #
915 # 'preimport' are run before the commit is made and are provided the following
915 # 'preimport' are run before the commit is made and are provided the following
916 # arguments:
916 # arguments:
917 # - repo: the localrepository instance,
917 # - repo: the localrepository instance,
918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
919 # - extra: the future extra dictionary of the changeset, please mutate it,
919 # - extra: the future extra dictionary of the changeset, please mutate it,
920 # - opts: the import options.
920 # - opts: the import options.
921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
922 # mutation of in memory commit and more. Feel free to rework the code to get
922 # mutation of in memory commit and more. Feel free to rework the code to get
923 # there.
923 # there.
924 extrapreimportmap = {}
924 extrapreimportmap = {}
925 # 'postimport' are run after the commit is made and are provided the following
925 # 'postimport' are run after the commit is made and are provided the following
926 # argument:
926 # argument:
927 # - ctx: the changectx created by import.
927 # - ctx: the changectx created by import.
928 extrapostimportmap = {}
928 extrapostimportmap = {}
929
929
930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
931 """Utility function used by commands.import to import a single patch
931 """Utility function used by commands.import to import a single patch
932
932
933 This function is explicitly defined here to help the evolve extension to
933 This function is explicitly defined here to help the evolve extension to
934 wrap this part of the import logic.
934 wrap this part of the import logic.
935
935
936 The API is currently a bit ugly because it a simple code translation from
936 The API is currently a bit ugly because it a simple code translation from
937 the import command. Feel free to make it better.
937 the import command. Feel free to make it better.
938
938
939 :hunk: a patch (as a binary string)
939 :hunk: a patch (as a binary string)
940 :parents: nodes that will be parent of the created commit
940 :parents: nodes that will be parent of the created commit
941 :opts: the full dict of option passed to the import command
941 :opts: the full dict of option passed to the import command
942 :msgs: list to save commit message to.
942 :msgs: list to save commit message to.
943 (used in case we need to save it when failing)
943 (used in case we need to save it when failing)
944 :updatefunc: a function that update a repo to a given node
944 :updatefunc: a function that update a repo to a given node
945 updatefunc(<repo>, <node>)
945 updatefunc(<repo>, <node>)
946 """
946 """
947 # avoid cycle context -> subrepo -> cmdutil
947 # avoid cycle context -> subrepo -> cmdutil
948 from . import context
948 from . import context
949 extractdata = patch.extract(ui, hunk)
949 extractdata = patch.extract(ui, hunk)
950 tmpname = extractdata.get('filename')
950 tmpname = extractdata.get('filename')
951 message = extractdata.get('message')
951 message = extractdata.get('message')
952 user = opts.get('user') or extractdata.get('user')
952 user = opts.get('user') or extractdata.get('user')
953 date = opts.get('date') or extractdata.get('date')
953 date = opts.get('date') or extractdata.get('date')
954 branch = extractdata.get('branch')
954 branch = extractdata.get('branch')
955 nodeid = extractdata.get('nodeid')
955 nodeid = extractdata.get('nodeid')
956 p1 = extractdata.get('p1')
956 p1 = extractdata.get('p1')
957 p2 = extractdata.get('p2')
957 p2 = extractdata.get('p2')
958
958
959 nocommit = opts.get('no_commit')
959 nocommit = opts.get('no_commit')
960 importbranch = opts.get('import_branch')
960 importbranch = opts.get('import_branch')
961 update = not opts.get('bypass')
961 update = not opts.get('bypass')
962 strip = opts["strip"]
962 strip = opts["strip"]
963 prefix = opts["prefix"]
963 prefix = opts["prefix"]
964 sim = float(opts.get('similarity') or 0)
964 sim = float(opts.get('similarity') or 0)
965 if not tmpname:
965 if not tmpname:
966 return (None, None, False)
966 return (None, None, False)
967
967
968 rejects = False
968 rejects = False
969
969
970 try:
970 try:
971 cmdline_message = logmessage(ui, opts)
971 cmdline_message = logmessage(ui, opts)
972 if cmdline_message:
972 if cmdline_message:
973 # pickup the cmdline msg
973 # pickup the cmdline msg
974 message = cmdline_message
974 message = cmdline_message
975 elif message:
975 elif message:
976 # pickup the patch msg
976 # pickup the patch msg
977 message = message.strip()
977 message = message.strip()
978 else:
978 else:
979 # launch the editor
979 # launch the editor
980 message = None
980 message = None
981 ui.debug('message:\n%s\n' % message)
981 ui.debug('message:\n%s\n' % message)
982
982
983 if len(parents) == 1:
983 if len(parents) == 1:
984 parents.append(repo[nullid])
984 parents.append(repo[nullid])
985 if opts.get('exact'):
985 if opts.get('exact'):
986 if not nodeid or not p1:
986 if not nodeid or not p1:
987 raise error.Abort(_('not a Mercurial patch'))
987 raise error.Abort(_('not a Mercurial patch'))
988 p1 = repo[p1]
988 p1 = repo[p1]
989 p2 = repo[p2 or nullid]
989 p2 = repo[p2 or nullid]
990 elif p2:
990 elif p2:
991 try:
991 try:
992 p1 = repo[p1]
992 p1 = repo[p1]
993 p2 = repo[p2]
993 p2 = repo[p2]
994 # Without any options, consider p2 only if the
994 # Without any options, consider p2 only if the
995 # patch is being applied on top of the recorded
995 # patch is being applied on top of the recorded
996 # first parent.
996 # first parent.
997 if p1 != parents[0]:
997 if p1 != parents[0]:
998 p1 = parents[0]
998 p1 = parents[0]
999 p2 = repo[nullid]
999 p2 = repo[nullid]
1000 except error.RepoError:
1000 except error.RepoError:
1001 p1, p2 = parents
1001 p1, p2 = parents
1002 if p2.node() == nullid:
1002 if p2.node() == nullid:
1003 ui.warn(_("warning: import the patch as a normal revision\n"
1003 ui.warn(_("warning: import the patch as a normal revision\n"
1004 "(use --exact to import the patch as a merge)\n"))
1004 "(use --exact to import the patch as a merge)\n"))
1005 else:
1005 else:
1006 p1, p2 = parents
1006 p1, p2 = parents
1007
1007
1008 n = None
1008 n = None
1009 if update:
1009 if update:
1010 if p1 != parents[0]:
1010 if p1 != parents[0]:
1011 updatefunc(repo, p1.node())
1011 updatefunc(repo, p1.node())
1012 if p2 != parents[1]:
1012 if p2 != parents[1]:
1013 repo.setparents(p1.node(), p2.node())
1013 repo.setparents(p1.node(), p2.node())
1014
1014
1015 if opts.get('exact') or importbranch:
1015 if opts.get('exact') or importbranch:
1016 repo.dirstate.setbranch(branch or 'default')
1016 repo.dirstate.setbranch(branch or 'default')
1017
1017
1018 partial = opts.get('partial', False)
1018 partial = opts.get('partial', False)
1019 files = set()
1019 files = set()
1020 try:
1020 try:
1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1022 files=files, eolmode=None, similarity=sim / 100.0)
1022 files=files, eolmode=None, similarity=sim / 100.0)
1023 except patch.PatchError as e:
1023 except patch.PatchError as e:
1024 if not partial:
1024 if not partial:
1025 raise error.Abort(str(e))
1025 raise error.Abort(str(e))
1026 if partial:
1026 if partial:
1027 rejects = True
1027 rejects = True
1028
1028
1029 files = list(files)
1029 files = list(files)
1030 if nocommit:
1030 if nocommit:
1031 if message:
1031 if message:
1032 msgs.append(message)
1032 msgs.append(message)
1033 else:
1033 else:
1034 if opts.get('exact') or p2:
1034 if opts.get('exact') or p2:
1035 # If you got here, you either use --force and know what
1035 # If you got here, you either use --force and know what
1036 # you are doing or used --exact or a merge patch while
1036 # you are doing or used --exact or a merge patch while
1037 # being updated to its first parent.
1037 # being updated to its first parent.
1038 m = None
1038 m = None
1039 else:
1039 else:
1040 m = scmutil.matchfiles(repo, files or [])
1040 m = scmutil.matchfiles(repo, files or [])
1041 editform = mergeeditform(repo[None], 'import.normal')
1041 editform = mergeeditform(repo[None], 'import.normal')
1042 if opts.get('exact'):
1042 if opts.get('exact'):
1043 editor = None
1043 editor = None
1044 else:
1044 else:
1045 editor = getcommiteditor(editform=editform, **opts)
1045 editor = getcommiteditor(editform=editform, **opts)
1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1047 extra = {}
1047 extra = {}
1048 for idfunc in extrapreimport:
1048 for idfunc in extrapreimport:
1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1050 try:
1050 try:
1051 if partial:
1051 if partial:
1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1053 n = repo.commit(message, user,
1053 n = repo.commit(message, user,
1054 date, match=m,
1054 date, match=m,
1055 editor=editor, extra=extra)
1055 editor=editor, extra=extra)
1056 for idfunc in extrapostimport:
1056 for idfunc in extrapostimport:
1057 extrapostimportmap[idfunc](repo[n])
1057 extrapostimportmap[idfunc](repo[n])
1058 finally:
1058 finally:
1059 repo.ui.restoreconfig(allowemptyback)
1059 repo.ui.restoreconfig(allowemptyback)
1060 else:
1060 else:
1061 if opts.get('exact') or importbranch:
1061 if opts.get('exact') or importbranch:
1062 branch = branch or 'default'
1062 branch = branch or 'default'
1063 else:
1063 else:
1064 branch = p1.branch()
1064 branch = p1.branch()
1065 store = patch.filestore()
1065 store = patch.filestore()
1066 try:
1066 try:
1067 files = set()
1067 files = set()
1068 try:
1068 try:
1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1070 files, eolmode=None)
1070 files, eolmode=None)
1071 except patch.PatchError as e:
1071 except patch.PatchError as e:
1072 raise error.Abort(str(e))
1072 raise error.Abort(str(e))
1073 if opts.get('exact'):
1073 if opts.get('exact'):
1074 editor = None
1074 editor = None
1075 else:
1075 else:
1076 editor = getcommiteditor(editform='import.bypass')
1076 editor = getcommiteditor(editform='import.bypass')
1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1078 message,
1078 message,
1079 user,
1079 user,
1080 date,
1080 date,
1081 branch, files, store,
1081 branch, files, store,
1082 editor=editor)
1082 editor=editor)
1083 n = memctx.commit()
1083 n = memctx.commit()
1084 finally:
1084 finally:
1085 store.close()
1085 store.close()
1086 if opts.get('exact') and nocommit:
1086 if opts.get('exact') and nocommit:
1087 # --exact with --no-commit is still useful in that it does merge
1087 # --exact with --no-commit is still useful in that it does merge
1088 # and branch bits
1088 # and branch bits
1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1090 elif opts.get('exact') and hex(n) != nodeid:
1090 elif opts.get('exact') and hex(n) != nodeid:
1091 raise error.Abort(_('patch is damaged or loses information'))
1091 raise error.Abort(_('patch is damaged or loses information'))
1092 msg = _('applied to working directory')
1092 msg = _('applied to working directory')
1093 if n:
1093 if n:
1094 # i18n: refers to a short changeset id
1094 # i18n: refers to a short changeset id
1095 msg = _('created %s') % short(n)
1095 msg = _('created %s') % short(n)
1096 return (msg, n, rejects)
1096 return (msg, n, rejects)
1097 finally:
1097 finally:
1098 os.unlink(tmpname)
1098 os.unlink(tmpname)
1099
1099
1100 # facility to let extensions include additional data in an exported patch
1100 # facility to let extensions include additional data in an exported patch
1101 # list of identifiers to be executed in order
1101 # list of identifiers to be executed in order
1102 extraexport = []
1102 extraexport = []
1103 # mapping from identifier to actual export function
1103 # mapping from identifier to actual export function
1104 # function as to return a string to be added to the header or None
1104 # function as to return a string to be added to the header or None
1105 # it is given two arguments (sequencenumber, changectx)
1105 # it is given two arguments (sequencenumber, changectx)
1106 extraexportmap = {}
1106 extraexportmap = {}
1107
1107
1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1109 opts=None, match=None):
1109 opts=None, match=None):
1110 '''export changesets as hg patches.'''
1110 '''export changesets as hg patches.'''
1111
1111
1112 total = len(revs)
1112 total = len(revs)
1113 revwidth = max([len(str(rev)) for rev in revs])
1113 revwidth = max([len(str(rev)) for rev in revs])
1114 filemode = {}
1114 filemode = {}
1115
1115
1116 def single(rev, seqno, fp):
1116 def single(rev, seqno, fp):
1117 ctx = repo[rev]
1117 ctx = repo[rev]
1118 node = ctx.node()
1118 node = ctx.node()
1119 parents = [p.node() for p in ctx.parents() if p]
1119 parents = [p.node() for p in ctx.parents() if p]
1120 branch = ctx.branch()
1120 branch = ctx.branch()
1121 if switch_parent:
1121 if switch_parent:
1122 parents.reverse()
1122 parents.reverse()
1123
1123
1124 if parents:
1124 if parents:
1125 prev = parents[0]
1125 prev = parents[0]
1126 else:
1126 else:
1127 prev = nullid
1127 prev = nullid
1128
1128
1129 shouldclose = False
1129 shouldclose = False
1130 if not fp and len(template) > 0:
1130 if not fp and len(template) > 0:
1131 desc_lines = ctx.description().rstrip().split('\n')
1131 desc_lines = ctx.description().rstrip().split('\n')
1132 desc = desc_lines[0] #Commit always has a first line.
1132 desc = desc_lines[0] #Commit always has a first line.
1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1134 seqno=seqno, revwidth=revwidth, mode='wb',
1134 seqno=seqno, revwidth=revwidth, mode='wb',
1135 modemap=filemode)
1135 modemap=filemode)
1136 shouldclose = True
1136 shouldclose = True
1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1138 repo.ui.note("%s\n" % fp.name)
1138 repo.ui.note("%s\n" % fp.name)
1139
1139
1140 if not fp:
1140 if not fp:
1141 write = repo.ui.write
1141 write = repo.ui.write
1142 else:
1142 else:
1143 def write(s, **kw):
1143 def write(s, **kw):
1144 fp.write(s)
1144 fp.write(s)
1145
1145
1146 write("# HG changeset patch\n")
1146 write("# HG changeset patch\n")
1147 write("# User %s\n" % ctx.user())
1147 write("# User %s\n" % ctx.user())
1148 write("# Date %d %d\n" % ctx.date())
1148 write("# Date %d %d\n" % ctx.date())
1149 write("# %s\n" % util.datestr(ctx.date()))
1149 write("# %s\n" % util.datestr(ctx.date()))
1150 if branch and branch != 'default':
1150 if branch and branch != 'default':
1151 write("# Branch %s\n" % branch)
1151 write("# Branch %s\n" % branch)
1152 write("# Node ID %s\n" % hex(node))
1152 write("# Node ID %s\n" % hex(node))
1153 write("# Parent %s\n" % hex(prev))
1153 write("# Parent %s\n" % hex(prev))
1154 if len(parents) > 1:
1154 if len(parents) > 1:
1155 write("# Parent %s\n" % hex(parents[1]))
1155 write("# Parent %s\n" % hex(parents[1]))
1156
1156
1157 for headerid in extraexport:
1157 for headerid in extraexport:
1158 header = extraexportmap[headerid](seqno, ctx)
1158 header = extraexportmap[headerid](seqno, ctx)
1159 if header is not None:
1159 if header is not None:
1160 write('# %s\n' % header)
1160 write('# %s\n' % header)
1161 write(ctx.description().rstrip())
1161 write(ctx.description().rstrip())
1162 write("\n\n")
1162 write("\n\n")
1163
1163
1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1165 write(chunk, label=label)
1165 write(chunk, label=label)
1166
1166
1167 if shouldclose:
1167 if shouldclose:
1168 fp.close()
1168 fp.close()
1169
1169
1170 for seqno, rev in enumerate(revs):
1170 for seqno, rev in enumerate(revs):
1171 single(rev, seqno + 1, fp)
1171 single(rev, seqno + 1, fp)
1172
1172
1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1174 changes=None, stat=False, fp=None, prefix='',
1174 changes=None, stat=False, fp=None, prefix='',
1175 root='', listsubrepos=False):
1175 root='', listsubrepos=False):
1176 '''show diff or diffstat.'''
1176 '''show diff or diffstat.'''
1177 if fp is None:
1177 if fp is None:
1178 write = ui.write
1178 write = ui.write
1179 else:
1179 else:
1180 def write(s, **kw):
1180 def write(s, **kw):
1181 fp.write(s)
1181 fp.write(s)
1182
1182
1183 if root:
1183 if root:
1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1185 else:
1185 else:
1186 relroot = ''
1186 relroot = ''
1187 if relroot != '':
1187 if relroot != '':
1188 # XXX relative roots currently don't work if the root is within a
1188 # XXX relative roots currently don't work if the root is within a
1189 # subrepo
1189 # subrepo
1190 uirelroot = match.uipath(relroot)
1190 uirelroot = match.uipath(relroot)
1191 relroot += '/'
1191 relroot += '/'
1192 for matchroot in match.files():
1192 for matchroot in match.files():
1193 if not matchroot.startswith(relroot):
1193 if not matchroot.startswith(relroot):
1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1195 match.uipath(matchroot), uirelroot))
1195 match.uipath(matchroot), uirelroot))
1196
1196
1197 if stat:
1197 if stat:
1198 diffopts = diffopts.copy(context=0)
1198 diffopts = diffopts.copy(context=0)
1199 width = 80
1199 width = 80
1200 if not ui.plain():
1200 if not ui.plain():
1201 width = ui.termwidth()
1201 width = ui.termwidth()
1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1203 prefix=prefix, relroot=relroot)
1203 prefix=prefix, relroot=relroot)
1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1205 width=width,
1205 width=width,
1206 git=diffopts.git):
1206 git=diffopts.git):
1207 write(chunk, label=label)
1207 write(chunk, label=label)
1208 else:
1208 else:
1209 for chunk, label in patch.diffui(repo, node1, node2, match,
1209 for chunk, label in patch.diffui(repo, node1, node2, match,
1210 changes, diffopts, prefix=prefix,
1210 changes, diffopts, prefix=prefix,
1211 relroot=relroot):
1211 relroot=relroot):
1212 write(chunk, label=label)
1212 write(chunk, label=label)
1213
1213
1214 if listsubrepos:
1214 if listsubrepos:
1215 ctx1 = repo[node1]
1215 ctx1 = repo[node1]
1216 ctx2 = repo[node2]
1216 ctx2 = repo[node2]
1217 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1217 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1218 tempnode2 = node2
1218 tempnode2 = node2
1219 try:
1219 try:
1220 if node2 is not None:
1220 if node2 is not None:
1221 tempnode2 = ctx2.substate[subpath][1]
1221 tempnode2 = ctx2.substate[subpath][1]
1222 except KeyError:
1222 except KeyError:
1223 # A subrepo that existed in node1 was deleted between node1 and
1223 # A subrepo that existed in node1 was deleted between node1 and
1224 # node2 (inclusive). Thus, ctx2's substate won't contain that
1224 # node2 (inclusive). Thus, ctx2's substate won't contain that
1225 # subpath. The best we can do is to ignore it.
1225 # subpath. The best we can do is to ignore it.
1226 tempnode2 = None
1226 tempnode2 = None
1227 submatch = matchmod.subdirmatcher(subpath, match)
1227 submatch = matchmod.subdirmatcher(subpath, match)
1228 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1228 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1229 stat=stat, fp=fp, prefix=prefix)
1229 stat=stat, fp=fp, prefix=prefix)
1230
1230
1231 class changeset_printer(object):
1231 class changeset_printer(object):
1232 '''show changeset information when templating not requested.'''
1232 '''show changeset information when templating not requested.'''
1233
1233
1234 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1234 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1235 self.ui = ui
1235 self.ui = ui
1236 self.repo = repo
1236 self.repo = repo
1237 self.buffered = buffered
1237 self.buffered = buffered
1238 self.matchfn = matchfn
1238 self.matchfn = matchfn
1239 self.diffopts = diffopts
1239 self.diffopts = diffopts
1240 self.header = {}
1240 self.header = {}
1241 self.hunk = {}
1241 self.hunk = {}
1242 self.lastheader = None
1242 self.lastheader = None
1243 self.footer = None
1243 self.footer = None
1244
1244
1245 def flush(self, ctx):
1245 def flush(self, ctx):
1246 rev = ctx.rev()
1246 rev = ctx.rev()
1247 if rev in self.header:
1247 if rev in self.header:
1248 h = self.header[rev]
1248 h = self.header[rev]
1249 if h != self.lastheader:
1249 if h != self.lastheader:
1250 self.lastheader = h
1250 self.lastheader = h
1251 self.ui.write(h)
1251 self.ui.write(h)
1252 del self.header[rev]
1252 del self.header[rev]
1253 if rev in self.hunk:
1253 if rev in self.hunk:
1254 self.ui.write(self.hunk[rev])
1254 self.ui.write(self.hunk[rev])
1255 del self.hunk[rev]
1255 del self.hunk[rev]
1256 return 1
1256 return 1
1257 return 0
1257 return 0
1258
1258
1259 def close(self):
1259 def close(self):
1260 if self.footer:
1260 if self.footer:
1261 self.ui.write(self.footer)
1261 self.ui.write(self.footer)
1262
1262
1263 def show(self, ctx, copies=None, matchfn=None, **props):
1263 def show(self, ctx, copies=None, matchfn=None, **props):
1264 if self.buffered:
1264 if self.buffered:
1265 self.ui.pushbuffer(labeled=True)
1265 self.ui.pushbuffer(labeled=True)
1266 self._show(ctx, copies, matchfn, props)
1266 self._show(ctx, copies, matchfn, props)
1267 self.hunk[ctx.rev()] = self.ui.popbuffer()
1267 self.hunk[ctx.rev()] = self.ui.popbuffer()
1268 else:
1268 else:
1269 self._show(ctx, copies, matchfn, props)
1269 self._show(ctx, copies, matchfn, props)
1270
1270
1271 def _show(self, ctx, copies, matchfn, props):
1271 def _show(self, ctx, copies, matchfn, props):
1272 '''show a single changeset or file revision'''
1272 '''show a single changeset or file revision'''
1273 changenode = ctx.node()
1273 changenode = ctx.node()
1274 rev = ctx.rev()
1274 rev = ctx.rev()
1275 if self.ui.debugflag:
1275 if self.ui.debugflag:
1276 hexfunc = hex
1276 hexfunc = hex
1277 else:
1277 else:
1278 hexfunc = short
1278 hexfunc = short
1279 # as of now, wctx.node() and wctx.rev() return None, but we want to
1279 # as of now, wctx.node() and wctx.rev() return None, but we want to
1280 # show the same values as {node} and {rev} templatekw
1280 # show the same values as {node} and {rev} templatekw
1281 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1281 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1282
1282
1283 if self.ui.quiet:
1283 if self.ui.quiet:
1284 self.ui.write("%d:%s\n" % revnode, label='log.node')
1284 self.ui.write("%d:%s\n" % revnode, label='log.node')
1285 return
1285 return
1286
1286
1287 date = util.datestr(ctx.date())
1287 date = util.datestr(ctx.date())
1288
1288
1289 # i18n: column positioning for "hg log"
1289 # i18n: column positioning for "hg log"
1290 self.ui.write(_("changeset: %d:%s\n") % revnode,
1290 self.ui.write(_("changeset: %d:%s\n") % revnode,
1291 label='log.changeset changeset.%s' % ctx.phasestr())
1291 label='log.changeset changeset.%s' % ctx.phasestr())
1292
1292
1293 # branches are shown first before any other names due to backwards
1293 # branches are shown first before any other names due to backwards
1294 # compatibility
1294 # compatibility
1295 branch = ctx.branch()
1295 branch = ctx.branch()
1296 # don't show the default branch name
1296 # don't show the default branch name
1297 if branch != 'default':
1297 if branch != 'default':
1298 # i18n: column positioning for "hg log"
1298 # i18n: column positioning for "hg log"
1299 self.ui.write(_("branch: %s\n") % branch,
1299 self.ui.write(_("branch: %s\n") % branch,
1300 label='log.branch')
1300 label='log.branch')
1301
1301
1302 for nsname, ns in self.repo.names.iteritems():
1302 for nsname, ns in self.repo.names.iteritems():
1303 # branches has special logic already handled above, so here we just
1303 # branches has special logic already handled above, so here we just
1304 # skip it
1304 # skip it
1305 if nsname == 'branches':
1305 if nsname == 'branches':
1306 continue
1306 continue
1307 # we will use the templatename as the color name since those two
1307 # we will use the templatename as the color name since those two
1308 # should be the same
1308 # should be the same
1309 for name in ns.names(self.repo, changenode):
1309 for name in ns.names(self.repo, changenode):
1310 self.ui.write(ns.logfmt % name,
1310 self.ui.write(ns.logfmt % name,
1311 label='log.%s' % ns.colorname)
1311 label='log.%s' % ns.colorname)
1312 if self.ui.debugflag:
1312 if self.ui.debugflag:
1313 # i18n: column positioning for "hg log"
1313 # i18n: column positioning for "hg log"
1314 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1314 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1315 label='log.phase')
1315 label='log.phase')
1316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1317 label = 'log.parent changeset.%s' % pctx.phasestr()
1317 label = 'log.parent changeset.%s' % pctx.phasestr()
1318 # i18n: column positioning for "hg log"
1318 # i18n: column positioning for "hg log"
1319 self.ui.write(_("parent: %d:%s\n")
1319 self.ui.write(_("parent: %d:%s\n")
1320 % (pctx.rev(), hexfunc(pctx.node())),
1320 % (pctx.rev(), hexfunc(pctx.node())),
1321 label=label)
1321 label=label)
1322
1322
1323 if self.ui.debugflag and rev is not None:
1323 if self.ui.debugflag and rev is not None:
1324 mnode = ctx.manifestnode()
1324 mnode = ctx.manifestnode()
1325 # i18n: column positioning for "hg log"
1325 # i18n: column positioning for "hg log"
1326 self.ui.write(_("manifest: %d:%s\n") %
1326 self.ui.write(_("manifest: %d:%s\n") %
1327 (self.repo.manifest.rev(mnode), hex(mnode)),
1327 (self.repo.manifest.rev(mnode), hex(mnode)),
1328 label='ui.debug log.manifest')
1328 label='ui.debug log.manifest')
1329 # i18n: column positioning for "hg log"
1329 # i18n: column positioning for "hg log"
1330 self.ui.write(_("user: %s\n") % ctx.user(),
1330 self.ui.write(_("user: %s\n") % ctx.user(),
1331 label='log.user')
1331 label='log.user')
1332 # i18n: column positioning for "hg log"
1332 # i18n: column positioning for "hg log"
1333 self.ui.write(_("date: %s\n") % date,
1333 self.ui.write(_("date: %s\n") % date,
1334 label='log.date')
1334 label='log.date')
1335
1335
1336 if self.ui.debugflag:
1336 if self.ui.debugflag:
1337 files = ctx.p1().status(ctx)[:3]
1337 files = ctx.p1().status(ctx)[:3]
1338 for key, value in zip([# i18n: column positioning for "hg log"
1338 for key, value in zip([# i18n: column positioning for "hg log"
1339 _("files:"),
1339 _("files:"),
1340 # i18n: column positioning for "hg log"
1340 # i18n: column positioning for "hg log"
1341 _("files+:"),
1341 _("files+:"),
1342 # i18n: column positioning for "hg log"
1342 # i18n: column positioning for "hg log"
1343 _("files-:")], files):
1343 _("files-:")], files):
1344 if value:
1344 if value:
1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1346 label='ui.debug log.files')
1346 label='ui.debug log.files')
1347 elif ctx.files() and self.ui.verbose:
1347 elif ctx.files() and self.ui.verbose:
1348 # i18n: column positioning for "hg log"
1348 # i18n: column positioning for "hg log"
1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1350 label='ui.note log.files')
1350 label='ui.note log.files')
1351 if copies and self.ui.verbose:
1351 if copies and self.ui.verbose:
1352 copies = ['%s (%s)' % c for c in copies]
1352 copies = ['%s (%s)' % c for c in copies]
1353 # i18n: column positioning for "hg log"
1353 # i18n: column positioning for "hg log"
1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1355 label='ui.note log.copies')
1355 label='ui.note log.copies')
1356
1356
1357 extra = ctx.extra()
1357 extra = ctx.extra()
1358 if extra and self.ui.debugflag:
1358 if extra and self.ui.debugflag:
1359 for key, value in sorted(extra.items()):
1359 for key, value in sorted(extra.items()):
1360 # i18n: column positioning for "hg log"
1360 # i18n: column positioning for "hg log"
1361 self.ui.write(_("extra: %s=%s\n")
1361 self.ui.write(_("extra: %s=%s\n")
1362 % (key, value.encode('string_escape')),
1362 % (key, value.encode('string_escape')),
1363 label='ui.debug log.extra')
1363 label='ui.debug log.extra')
1364
1364
1365 description = ctx.description().strip()
1365 description = ctx.description().strip()
1366 if description:
1366 if description:
1367 if self.ui.verbose:
1367 if self.ui.verbose:
1368 self.ui.write(_("description:\n"),
1368 self.ui.write(_("description:\n"),
1369 label='ui.note log.description')
1369 label='ui.note log.description')
1370 self.ui.write(description,
1370 self.ui.write(description,
1371 label='ui.note log.description')
1371 label='ui.note log.description')
1372 self.ui.write("\n\n")
1372 self.ui.write("\n\n")
1373 else:
1373 else:
1374 # i18n: column positioning for "hg log"
1374 # i18n: column positioning for "hg log"
1375 self.ui.write(_("summary: %s\n") %
1375 self.ui.write(_("summary: %s\n") %
1376 description.splitlines()[0],
1376 description.splitlines()[0],
1377 label='log.summary')
1377 label='log.summary')
1378 self.ui.write("\n")
1378 self.ui.write("\n")
1379
1379
1380 self.showpatch(ctx, matchfn)
1380 self.showpatch(ctx, matchfn)
1381
1381
1382 def showpatch(self, ctx, matchfn):
1382 def showpatch(self, ctx, matchfn):
1383 if not matchfn:
1383 if not matchfn:
1384 matchfn = self.matchfn
1384 matchfn = self.matchfn
1385 if matchfn:
1385 if matchfn:
1386 stat = self.diffopts.get('stat')
1386 stat = self.diffopts.get('stat')
1387 diff = self.diffopts.get('patch')
1387 diff = self.diffopts.get('patch')
1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1389 node = ctx.node()
1389 node = ctx.node()
1390 prev = ctx.p1().node()
1390 prev = ctx.p1().node()
1391 if stat:
1391 if stat:
1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1393 match=matchfn, stat=True)
1393 match=matchfn, stat=True)
1394 if diff:
1394 if diff:
1395 if stat:
1395 if stat:
1396 self.ui.write("\n")
1396 self.ui.write("\n")
1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1398 match=matchfn, stat=False)
1398 match=matchfn, stat=False)
1399 self.ui.write("\n")
1399 self.ui.write("\n")
1400
1400
1401 class jsonchangeset(changeset_printer):
1401 class jsonchangeset(changeset_printer):
1402 '''format changeset information.'''
1402 '''format changeset information.'''
1403
1403
1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1406 self.cache = {}
1406 self.cache = {}
1407 self._first = True
1407 self._first = True
1408
1408
1409 def close(self):
1409 def close(self):
1410 if not self._first:
1410 if not self._first:
1411 self.ui.write("\n]\n")
1411 self.ui.write("\n]\n")
1412 else:
1412 else:
1413 self.ui.write("[]\n")
1413 self.ui.write("[]\n")
1414
1414
1415 def _show(self, ctx, copies, matchfn, props):
1415 def _show(self, ctx, copies, matchfn, props):
1416 '''show a single changeset or file revision'''
1416 '''show a single changeset or file revision'''
1417 rev = ctx.rev()
1417 rev = ctx.rev()
1418 if rev is None:
1418 if rev is None:
1419 jrev = jnode = 'null'
1419 jrev = jnode = 'null'
1420 else:
1420 else:
1421 jrev = str(rev)
1421 jrev = str(rev)
1422 jnode = '"%s"' % hex(ctx.node())
1422 jnode = '"%s"' % hex(ctx.node())
1423 j = encoding.jsonescape
1423 j = encoding.jsonescape
1424
1424
1425 if self._first:
1425 if self._first:
1426 self.ui.write("[\n {")
1426 self.ui.write("[\n {")
1427 self._first = False
1427 self._first = False
1428 else:
1428 else:
1429 self.ui.write(",\n {")
1429 self.ui.write(",\n {")
1430
1430
1431 if self.ui.quiet:
1431 if self.ui.quiet:
1432 self.ui.write(('\n "rev": %s') % jrev)
1432 self.ui.write(('\n "rev": %s') % jrev)
1433 self.ui.write((',\n "node": %s') % jnode)
1433 self.ui.write((',\n "node": %s') % jnode)
1434 self.ui.write('\n }')
1434 self.ui.write('\n }')
1435 return
1435 return
1436
1436
1437 self.ui.write(('\n "rev": %s') % jrev)
1437 self.ui.write(('\n "rev": %s') % jrev)
1438 self.ui.write((',\n "node": %s') % jnode)
1438 self.ui.write((',\n "node": %s') % jnode)
1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1444
1444
1445 self.ui.write((',\n "bookmarks": [%s]') %
1445 self.ui.write((',\n "bookmarks": [%s]') %
1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1447 self.ui.write((',\n "tags": [%s]') %
1447 self.ui.write((',\n "tags": [%s]') %
1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1449 self.ui.write((',\n "parents": [%s]') %
1449 self.ui.write((',\n "parents": [%s]') %
1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1451
1451
1452 if self.ui.debugflag:
1452 if self.ui.debugflag:
1453 if rev is None:
1453 if rev is None:
1454 jmanifestnode = 'null'
1454 jmanifestnode = 'null'
1455 else:
1455 else:
1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1458
1458
1459 self.ui.write((',\n "extra": {%s}') %
1459 self.ui.write((',\n "extra": {%s}') %
1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1461 for k, v in ctx.extra().items()))
1461 for k, v in ctx.extra().items()))
1462
1462
1463 files = ctx.p1().status(ctx)
1463 files = ctx.p1().status(ctx)
1464 self.ui.write((',\n "modified": [%s]') %
1464 self.ui.write((',\n "modified": [%s]') %
1465 ", ".join('"%s"' % j(f) for f in files[0]))
1465 ", ".join('"%s"' % j(f) for f in files[0]))
1466 self.ui.write((',\n "added": [%s]') %
1466 self.ui.write((',\n "added": [%s]') %
1467 ", ".join('"%s"' % j(f) for f in files[1]))
1467 ", ".join('"%s"' % j(f) for f in files[1]))
1468 self.ui.write((',\n "removed": [%s]') %
1468 self.ui.write((',\n "removed": [%s]') %
1469 ", ".join('"%s"' % j(f) for f in files[2]))
1469 ", ".join('"%s"' % j(f) for f in files[2]))
1470
1470
1471 elif self.ui.verbose:
1471 elif self.ui.verbose:
1472 self.ui.write((',\n "files": [%s]') %
1472 self.ui.write((',\n "files": [%s]') %
1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1474
1474
1475 if copies:
1475 if copies:
1476 self.ui.write((',\n "copies": {%s}') %
1476 self.ui.write((',\n "copies": {%s}') %
1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1478 for k, v in copies))
1478 for k, v in copies))
1479
1479
1480 matchfn = self.matchfn
1480 matchfn = self.matchfn
1481 if matchfn:
1481 if matchfn:
1482 stat = self.diffopts.get('stat')
1482 stat = self.diffopts.get('stat')
1483 diff = self.diffopts.get('patch')
1483 diff = self.diffopts.get('patch')
1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1485 node, prev = ctx.node(), ctx.p1().node()
1485 node, prev = ctx.node(), ctx.p1().node()
1486 if stat:
1486 if stat:
1487 self.ui.pushbuffer()
1487 self.ui.pushbuffer()
1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1489 match=matchfn, stat=True)
1489 match=matchfn, stat=True)
1490 self.ui.write((',\n "diffstat": "%s"')
1490 self.ui.write((',\n "diffstat": "%s"')
1491 % j(self.ui.popbuffer()))
1491 % j(self.ui.popbuffer()))
1492 if diff:
1492 if diff:
1493 self.ui.pushbuffer()
1493 self.ui.pushbuffer()
1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1495 match=matchfn, stat=False)
1495 match=matchfn, stat=False)
1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1497
1497
1498 self.ui.write("\n }")
1498 self.ui.write("\n }")
1499
1499
1500 class changeset_templater(changeset_printer):
1500 class changeset_templater(changeset_printer):
1501 '''format changeset information.'''
1501 '''format changeset information.'''
1502
1502
1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1506 filters = {'formatnode': formatnode}
1506 filters = {'formatnode': formatnode}
1507 defaulttempl = {
1507 defaulttempl = {
1508 'parent': '{rev}:{node|formatnode} ',
1508 'parent': '{rev}:{node|formatnode} ',
1509 'manifest': '{rev}:{node|formatnode}',
1509 'manifest': '{rev}:{node|formatnode}',
1510 'file_copy': '{name} ({source})',
1510 'file_copy': '{name} ({source})',
1511 'extra': '{key}={value|stringescape}'
1511 'extra': '{key}={value|stringescape}'
1512 }
1512 }
1513 # filecopy is preserved for compatibility reasons
1513 # filecopy is preserved for compatibility reasons
1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1515 assert not (tmpl and mapfile)
1515 assert not (tmpl and mapfile)
1516 if mapfile:
1516 if mapfile:
1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1518 cache=defaulttempl)
1518 cache=defaulttempl)
1519 else:
1519 else:
1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1521 filters=filters,
1521 filters=filters,
1522 cache=defaulttempl)
1522 cache=defaulttempl)
1523
1523
1524 self.cache = {}
1524 self.cache = {}
1525
1525
1526 # find correct templates for current mode
1526 # find correct templates for current mode
1527 tmplmodes = [
1527 tmplmodes = [
1528 (True, None),
1528 (True, None),
1529 (self.ui.verbose, 'verbose'),
1529 (self.ui.verbose, 'verbose'),
1530 (self.ui.quiet, 'quiet'),
1530 (self.ui.quiet, 'quiet'),
1531 (self.ui.debugflag, 'debug'),
1531 (self.ui.debugflag, 'debug'),
1532 ]
1532 ]
1533
1533
1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1535 'docheader': '', 'docfooter': ''}
1535 'docheader': '', 'docfooter': ''}
1536 for mode, postfix in tmplmodes:
1536 for mode, postfix in tmplmodes:
1537 for t in self._parts:
1537 for t in self._parts:
1538 cur = t
1538 cur = t
1539 if postfix:
1539 if postfix:
1540 cur += "_" + postfix
1540 cur += "_" + postfix
1541 if mode and cur in self.t:
1541 if mode and cur in self.t:
1542 self._parts[t] = cur
1542 self._parts[t] = cur
1543
1543
1544 if self._parts['docheader']:
1544 if self._parts['docheader']:
1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1546
1546
1547 def close(self):
1547 def close(self):
1548 if self._parts['docfooter']:
1548 if self._parts['docfooter']:
1549 if not self.footer:
1549 if not self.footer:
1550 self.footer = ""
1550 self.footer = ""
1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1552 return super(changeset_templater, self).close()
1552 return super(changeset_templater, self).close()
1553
1553
1554 def _show(self, ctx, copies, matchfn, props):
1554 def _show(self, ctx, copies, matchfn, props):
1555 '''show a single changeset or file revision'''
1555 '''show a single changeset or file revision'''
1556 props = props.copy()
1556 props = props.copy()
1557 props.update(templatekw.keywords)
1557 props.update(templatekw.keywords)
1558 props['templ'] = self.t
1558 props['templ'] = self.t
1559 props['ctx'] = ctx
1559 props['ctx'] = ctx
1560 props['repo'] = self.repo
1560 props['repo'] = self.repo
1561 props['ui'] = self.repo.ui
1561 props['ui'] = self.repo.ui
1562 props['revcache'] = {'copies': copies}
1562 props['revcache'] = {'copies': copies}
1563 props['cache'] = self.cache
1563 props['cache'] = self.cache
1564
1564
1565 # write header
1565 # write header
1566 if self._parts['header']:
1566 if self._parts['header']:
1567 h = templater.stringify(self.t(self._parts['header'], **props))
1567 h = templater.stringify(self.t(self._parts['header'], **props))
1568 if self.buffered:
1568 if self.buffered:
1569 self.header[ctx.rev()] = h
1569 self.header[ctx.rev()] = h
1570 else:
1570 else:
1571 if self.lastheader != h:
1571 if self.lastheader != h:
1572 self.lastheader = h
1572 self.lastheader = h
1573 self.ui.write(h)
1573 self.ui.write(h)
1574
1574
1575 # write changeset metadata, then patch if requested
1575 # write changeset metadata, then patch if requested
1576 key = self._parts['changeset']
1576 key = self._parts['changeset']
1577 self.ui.write(templater.stringify(self.t(key, **props)))
1577 self.ui.write(templater.stringify(self.t(key, **props)))
1578 self.showpatch(ctx, matchfn)
1578 self.showpatch(ctx, matchfn)
1579
1579
1580 if self._parts['footer']:
1580 if self._parts['footer']:
1581 if not self.footer:
1581 if not self.footer:
1582 self.footer = templater.stringify(
1582 self.footer = templater.stringify(
1583 self.t(self._parts['footer'], **props))
1583 self.t(self._parts['footer'], **props))
1584
1584
1585 def gettemplate(ui, tmpl, style):
1585 def gettemplate(ui, tmpl, style):
1586 """
1586 """
1587 Find the template matching the given template spec or style.
1587 Find the template matching the given template spec or style.
1588 """
1588 """
1589
1589
1590 # ui settings
1590 # ui settings
1591 if not tmpl and not style: # template are stronger than style
1591 if not tmpl and not style: # template are stronger than style
1592 tmpl = ui.config('ui', 'logtemplate')
1592 tmpl = ui.config('ui', 'logtemplate')
1593 if tmpl:
1593 if tmpl:
1594 return templater.unquotestring(tmpl), None
1594 return templater.unquotestring(tmpl), None
1595 else:
1595 else:
1596 style = util.expandpath(ui.config('ui', 'style', ''))
1596 style = util.expandpath(ui.config('ui', 'style', ''))
1597
1597
1598 if not tmpl and style:
1598 if not tmpl and style:
1599 mapfile = style
1599 mapfile = style
1600 if not os.path.split(mapfile)[0]:
1600 if not os.path.split(mapfile)[0]:
1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1602 or templater.templatepath(mapfile))
1602 or templater.templatepath(mapfile))
1603 if mapname:
1603 if mapname:
1604 mapfile = mapname
1604 mapfile = mapname
1605 return None, mapfile
1605 return None, mapfile
1606
1606
1607 if not tmpl:
1607 if not tmpl:
1608 return None, None
1608 return None, None
1609
1609
1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1611
1611
1612 def show_changeset(ui, repo, opts, buffered=False):
1612 def show_changeset(ui, repo, opts, buffered=False):
1613 """show one changeset using template or regular display.
1613 """show one changeset using template or regular display.
1614
1614
1615 Display format will be the first non-empty hit of:
1615 Display format will be the first non-empty hit of:
1616 1. option 'template'
1616 1. option 'template'
1617 2. option 'style'
1617 2. option 'style'
1618 3. [ui] setting 'logtemplate'
1618 3. [ui] setting 'logtemplate'
1619 4. [ui] setting 'style'
1619 4. [ui] setting 'style'
1620 If all of these values are either the unset or the empty string,
1620 If all of these values are either the unset or the empty string,
1621 regular display via changeset_printer() is done.
1621 regular display via changeset_printer() is done.
1622 """
1622 """
1623 # options
1623 # options
1624 matchfn = None
1624 matchfn = None
1625 if opts.get('patch') or opts.get('stat'):
1625 if opts.get('patch') or opts.get('stat'):
1626 matchfn = scmutil.matchall(repo)
1626 matchfn = scmutil.matchall(repo)
1627
1627
1628 if opts.get('template') == 'json':
1628 if opts.get('template') == 'json':
1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1630
1630
1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1632
1632
1633 if not tmpl and not mapfile:
1633 if not tmpl and not mapfile:
1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1635
1635
1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1637
1637
1638 def showmarker(fm, marker, index=None):
1638 def showmarker(fm, marker, index=None):
1639 """utility function to display obsolescence marker in a readable way
1639 """utility function to display obsolescence marker in a readable way
1640
1640
1641 To be used by debug function."""
1641 To be used by debug function."""
1642 if index is not None:
1642 if index is not None:
1643 fm.write('index', '%i ', index)
1643 fm.write('index', '%i ', index)
1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1645 succs = marker.succnodes()
1645 succs = marker.succnodes()
1646 fm.condwrite(succs, 'succnodes', '%s ',
1646 fm.condwrite(succs, 'succnodes', '%s ',
1647 fm.formatlist(map(hex, succs), name='node'))
1647 fm.formatlist(map(hex, succs), name='node'))
1648 fm.write('flag', '%X ', marker.flags())
1648 fm.write('flag', '%X ', marker.flags())
1649 parents = marker.parentnodes()
1649 parents = marker.parentnodes()
1650 if parents is not None:
1650 if parents is not None:
1651 fm.write('parentnodes', '{%s} ',
1651 fm.write('parentnodes', '{%s} ',
1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1654 meta = marker.metadata().copy()
1654 meta = marker.metadata().copy()
1655 meta.pop('date', None)
1655 meta.pop('date', None)
1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1657 fm.plain('\n')
1657 fm.plain('\n')
1658
1658
1659 def finddate(ui, repo, date):
1659 def finddate(ui, repo, date):
1660 """Find the tipmost changeset that matches the given date spec"""
1660 """Find the tipmost changeset that matches the given date spec"""
1661
1661
1662 df = util.matchdate(date)
1662 df = util.matchdate(date)
1663 m = scmutil.matchall(repo)
1663 m = scmutil.matchall(repo)
1664 results = {}
1664 results = {}
1665
1665
1666 def prep(ctx, fns):
1666 def prep(ctx, fns):
1667 d = ctx.date()
1667 d = ctx.date()
1668 if df(d[0]):
1668 if df(d[0]):
1669 results[ctx.rev()] = d
1669 results[ctx.rev()] = d
1670
1670
1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1672 rev = ctx.rev()
1672 rev = ctx.rev()
1673 if rev in results:
1673 if rev in results:
1674 ui.status(_("found revision %s from %s\n") %
1674 ui.status(_("found revision %s from %s\n") %
1675 (rev, util.datestr(results[rev])))
1675 (rev, util.datestr(results[rev])))
1676 return str(rev)
1676 return str(rev)
1677
1677
1678 raise error.Abort(_("revision matching date not found"))
1678 raise error.Abort(_("revision matching date not found"))
1679
1679
1680 def increasingwindows(windowsize=8, sizelimit=512):
1680 def increasingwindows(windowsize=8, sizelimit=512):
1681 while True:
1681 while True:
1682 yield windowsize
1682 yield windowsize
1683 if windowsize < sizelimit:
1683 if windowsize < sizelimit:
1684 windowsize *= 2
1684 windowsize *= 2
1685
1685
1686 class FileWalkError(Exception):
1686 class FileWalkError(Exception):
1687 pass
1687 pass
1688
1688
1689 def walkfilerevs(repo, match, follow, revs, fncache):
1689 def walkfilerevs(repo, match, follow, revs, fncache):
1690 '''Walks the file history for the matched files.
1690 '''Walks the file history for the matched files.
1691
1691
1692 Returns the changeset revs that are involved in the file history.
1692 Returns the changeset revs that are involved in the file history.
1693
1693
1694 Throws FileWalkError if the file history can't be walked using
1694 Throws FileWalkError if the file history can't be walked using
1695 filelogs alone.
1695 filelogs alone.
1696 '''
1696 '''
1697 wanted = set()
1697 wanted = set()
1698 copies = []
1698 copies = []
1699 minrev, maxrev = min(revs), max(revs)
1699 minrev, maxrev = min(revs), max(revs)
1700 def filerevgen(filelog, last):
1700 def filerevgen(filelog, last):
1701 """
1701 """
1702 Only files, no patterns. Check the history of each file.
1702 Only files, no patterns. Check the history of each file.
1703
1703
1704 Examines filelog entries within minrev, maxrev linkrev range
1704 Examines filelog entries within minrev, maxrev linkrev range
1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1706 tuples in backwards order
1706 tuples in backwards order
1707 """
1707 """
1708 cl_count = len(repo)
1708 cl_count = len(repo)
1709 revs = []
1709 revs = []
1710 for j in xrange(0, last + 1):
1710 for j in xrange(0, last + 1):
1711 linkrev = filelog.linkrev(j)
1711 linkrev = filelog.linkrev(j)
1712 if linkrev < minrev:
1712 if linkrev < minrev:
1713 continue
1713 continue
1714 # only yield rev for which we have the changelog, it can
1714 # only yield rev for which we have the changelog, it can
1715 # happen while doing "hg log" during a pull or commit
1715 # happen while doing "hg log" during a pull or commit
1716 if linkrev >= cl_count:
1716 if linkrev >= cl_count:
1717 break
1717 break
1718
1718
1719 parentlinkrevs = []
1719 parentlinkrevs = []
1720 for p in filelog.parentrevs(j):
1720 for p in filelog.parentrevs(j):
1721 if p != nullrev:
1721 if p != nullrev:
1722 parentlinkrevs.append(filelog.linkrev(p))
1722 parentlinkrevs.append(filelog.linkrev(p))
1723 n = filelog.node(j)
1723 n = filelog.node(j)
1724 revs.append((linkrev, parentlinkrevs,
1724 revs.append((linkrev, parentlinkrevs,
1725 follow and filelog.renamed(n)))
1725 follow and filelog.renamed(n)))
1726
1726
1727 return reversed(revs)
1727 return reversed(revs)
1728 def iterfiles():
1728 def iterfiles():
1729 pctx = repo['.']
1729 pctx = repo['.']
1730 for filename in match.files():
1730 for filename in match.files():
1731 if follow:
1731 if follow:
1732 if filename not in pctx:
1732 if filename not in pctx:
1733 raise error.Abort(_('cannot follow file not in parent '
1733 raise error.Abort(_('cannot follow file not in parent '
1734 'revision: "%s"') % filename)
1734 'revision: "%s"') % filename)
1735 yield filename, pctx[filename].filenode()
1735 yield filename, pctx[filename].filenode()
1736 else:
1736 else:
1737 yield filename, None
1737 yield filename, None
1738 for filename_node in copies:
1738 for filename_node in copies:
1739 yield filename_node
1739 yield filename_node
1740
1740
1741 for file_, node in iterfiles():
1741 for file_, node in iterfiles():
1742 filelog = repo.file(file_)
1742 filelog = repo.file(file_)
1743 if not len(filelog):
1743 if not len(filelog):
1744 if node is None:
1744 if node is None:
1745 # A zero count may be a directory or deleted file, so
1745 # A zero count may be a directory or deleted file, so
1746 # try to find matching entries on the slow path.
1746 # try to find matching entries on the slow path.
1747 if follow:
1747 if follow:
1748 raise error.Abort(
1748 raise error.Abort(
1749 _('cannot follow nonexistent file: "%s"') % file_)
1749 _('cannot follow nonexistent file: "%s"') % file_)
1750 raise FileWalkError("Cannot walk via filelog")
1750 raise FileWalkError("Cannot walk via filelog")
1751 else:
1751 else:
1752 continue
1752 continue
1753
1753
1754 if node is None:
1754 if node is None:
1755 last = len(filelog) - 1
1755 last = len(filelog) - 1
1756 else:
1756 else:
1757 last = filelog.rev(node)
1757 last = filelog.rev(node)
1758
1758
1759 # keep track of all ancestors of the file
1759 # keep track of all ancestors of the file
1760 ancestors = set([filelog.linkrev(last)])
1760 ancestors = set([filelog.linkrev(last)])
1761
1761
1762 # iterate from latest to oldest revision
1762 # iterate from latest to oldest revision
1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1764 if not follow:
1764 if not follow:
1765 if rev > maxrev:
1765 if rev > maxrev:
1766 continue
1766 continue
1767 else:
1767 else:
1768 # Note that last might not be the first interesting
1768 # Note that last might not be the first interesting
1769 # rev to us:
1769 # rev to us:
1770 # if the file has been changed after maxrev, we'll
1770 # if the file has been changed after maxrev, we'll
1771 # have linkrev(last) > maxrev, and we still need
1771 # have linkrev(last) > maxrev, and we still need
1772 # to explore the file graph
1772 # to explore the file graph
1773 if rev not in ancestors:
1773 if rev not in ancestors:
1774 continue
1774 continue
1775 # XXX insert 1327 fix here
1775 # XXX insert 1327 fix here
1776 if flparentlinkrevs:
1776 if flparentlinkrevs:
1777 ancestors.update(flparentlinkrevs)
1777 ancestors.update(flparentlinkrevs)
1778
1778
1779 fncache.setdefault(rev, []).append(file_)
1779 fncache.setdefault(rev, []).append(file_)
1780 wanted.add(rev)
1780 wanted.add(rev)
1781 if copied:
1781 if copied:
1782 copies.append(copied)
1782 copies.append(copied)
1783
1783
1784 return wanted
1784 return wanted
1785
1785
1786 class _followfilter(object):
1786 class _followfilter(object):
1787 def __init__(self, repo, onlyfirst=False):
1787 def __init__(self, repo, onlyfirst=False):
1788 self.repo = repo
1788 self.repo = repo
1789 self.startrev = nullrev
1789 self.startrev = nullrev
1790 self.roots = set()
1790 self.roots = set()
1791 self.onlyfirst = onlyfirst
1791 self.onlyfirst = onlyfirst
1792
1792
1793 def match(self, rev):
1793 def match(self, rev):
1794 def realparents(rev):
1794 def realparents(rev):
1795 if self.onlyfirst:
1795 if self.onlyfirst:
1796 return self.repo.changelog.parentrevs(rev)[0:1]
1796 return self.repo.changelog.parentrevs(rev)[0:1]
1797 else:
1797 else:
1798 return filter(lambda x: x != nullrev,
1798 return filter(lambda x: x != nullrev,
1799 self.repo.changelog.parentrevs(rev))
1799 self.repo.changelog.parentrevs(rev))
1800
1800
1801 if self.startrev == nullrev:
1801 if self.startrev == nullrev:
1802 self.startrev = rev
1802 self.startrev = rev
1803 return True
1803 return True
1804
1804
1805 if rev > self.startrev:
1805 if rev > self.startrev:
1806 # forward: all descendants
1806 # forward: all descendants
1807 if not self.roots:
1807 if not self.roots:
1808 self.roots.add(self.startrev)
1808 self.roots.add(self.startrev)
1809 for parent in realparents(rev):
1809 for parent in realparents(rev):
1810 if parent in self.roots:
1810 if parent in self.roots:
1811 self.roots.add(rev)
1811 self.roots.add(rev)
1812 return True
1812 return True
1813 else:
1813 else:
1814 # backwards: all parents
1814 # backwards: all parents
1815 if not self.roots:
1815 if not self.roots:
1816 self.roots.update(realparents(self.startrev))
1816 self.roots.update(realparents(self.startrev))
1817 if rev in self.roots:
1817 if rev in self.roots:
1818 self.roots.remove(rev)
1818 self.roots.remove(rev)
1819 self.roots.update(realparents(rev))
1819 self.roots.update(realparents(rev))
1820 return True
1820 return True
1821
1821
1822 return False
1822 return False
1823
1823
1824 def walkchangerevs(repo, match, opts, prepare):
1824 def walkchangerevs(repo, match, opts, prepare):
1825 '''Iterate over files and the revs in which they changed.
1825 '''Iterate over files and the revs in which they changed.
1826
1826
1827 Callers most commonly need to iterate backwards over the history
1827 Callers most commonly need to iterate backwards over the history
1828 in which they are interested. Doing so has awful (quadratic-looking)
1828 in which they are interested. Doing so has awful (quadratic-looking)
1829 performance, so we use iterators in a "windowed" way.
1829 performance, so we use iterators in a "windowed" way.
1830
1830
1831 We walk a window of revisions in the desired order. Within the
1831 We walk a window of revisions in the desired order. Within the
1832 window, we first walk forwards to gather data, then in the desired
1832 window, we first walk forwards to gather data, then in the desired
1833 order (usually backwards) to display it.
1833 order (usually backwards) to display it.
1834
1834
1835 This function returns an iterator yielding contexts. Before
1835 This function returns an iterator yielding contexts. Before
1836 yielding each context, the iterator will first call the prepare
1836 yielding each context, the iterator will first call the prepare
1837 function on each context in the window in forward order.'''
1837 function on each context in the window in forward order.'''
1838
1838
1839 follow = opts.get('follow') or opts.get('follow_first')
1839 follow = opts.get('follow') or opts.get('follow_first')
1840 revs = _logrevs(repo, opts)
1840 revs = _logrevs(repo, opts)
1841 if not revs:
1841 if not revs:
1842 return []
1842 return []
1843 wanted = set()
1843 wanted = set()
1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1845 opts.get('removed'))
1845 opts.get('removed'))
1846 fncache = {}
1846 fncache = {}
1847 change = repo.changectx
1847 change = repo.changectx
1848
1848
1849 # First step is to fill wanted, the set of revisions that we want to yield.
1849 # First step is to fill wanted, the set of revisions that we want to yield.
1850 # When it does not induce extra cost, we also fill fncache for revisions in
1850 # When it does not induce extra cost, we also fill fncache for revisions in
1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1852 # match the file filtering conditions.
1852 # match the file filtering conditions.
1853
1853
1854 if match.always():
1854 if match.always():
1855 # No files, no patterns. Display all revs.
1855 # No files, no patterns. Display all revs.
1856 wanted = revs
1856 wanted = revs
1857 elif not slowpath:
1857 elif not slowpath:
1858 # We only have to read through the filelog to find wanted revisions
1858 # We only have to read through the filelog to find wanted revisions
1859
1859
1860 try:
1860 try:
1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1862 except FileWalkError:
1862 except FileWalkError:
1863 slowpath = True
1863 slowpath = True
1864
1864
1865 # We decided to fall back to the slowpath because at least one
1865 # We decided to fall back to the slowpath because at least one
1866 # of the paths was not a file. Check to see if at least one of them
1866 # of the paths was not a file. Check to see if at least one of them
1867 # existed in history, otherwise simply return
1867 # existed in history, otherwise simply return
1868 for path in match.files():
1868 for path in match.files():
1869 if path == '.' or path in repo.store:
1869 if path == '.' or path in repo.store:
1870 break
1870 break
1871 else:
1871 else:
1872 return []
1872 return []
1873
1873
1874 if slowpath:
1874 if slowpath:
1875 # We have to read the changelog to match filenames against
1875 # We have to read the changelog to match filenames against
1876 # changed files
1876 # changed files
1877
1877
1878 if follow:
1878 if follow:
1879 raise error.Abort(_('can only follow copies/renames for explicit '
1879 raise error.Abort(_('can only follow copies/renames for explicit '
1880 'filenames'))
1880 'filenames'))
1881
1881
1882 # The slow path checks files modified in every changeset.
1882 # The slow path checks files modified in every changeset.
1883 # This is really slow on large repos, so compute the set lazily.
1883 # This is really slow on large repos, so compute the set lazily.
1884 class lazywantedset(object):
1884 class lazywantedset(object):
1885 def __init__(self):
1885 def __init__(self):
1886 self.set = set()
1886 self.set = set()
1887 self.revs = set(revs)
1887 self.revs = set(revs)
1888
1888
1889 # No need to worry about locality here because it will be accessed
1889 # No need to worry about locality here because it will be accessed
1890 # in the same order as the increasing window below.
1890 # in the same order as the increasing window below.
1891 def __contains__(self, value):
1891 def __contains__(self, value):
1892 if value in self.set:
1892 if value in self.set:
1893 return True
1893 return True
1894 elif not value in self.revs:
1894 elif not value in self.revs:
1895 return False
1895 return False
1896 else:
1896 else:
1897 self.revs.discard(value)
1897 self.revs.discard(value)
1898 ctx = change(value)
1898 ctx = change(value)
1899 matches = filter(match, ctx.files())
1899 matches = filter(match, ctx.files())
1900 if matches:
1900 if matches:
1901 fncache[value] = matches
1901 fncache[value] = matches
1902 self.set.add(value)
1902 self.set.add(value)
1903 return True
1903 return True
1904 return False
1904 return False
1905
1905
1906 def discard(self, value):
1906 def discard(self, value):
1907 self.revs.discard(value)
1907 self.revs.discard(value)
1908 self.set.discard(value)
1908 self.set.discard(value)
1909
1909
1910 wanted = lazywantedset()
1910 wanted = lazywantedset()
1911
1911
1912 # it might be worthwhile to do this in the iterator if the rev range
1912 # it might be worthwhile to do this in the iterator if the rev range
1913 # is descending and the prune args are all within that range
1913 # is descending and the prune args are all within that range
1914 for rev in opts.get('prune', ()):
1914 for rev in opts.get('prune', ()):
1915 rev = repo[rev].rev()
1915 rev = repo[rev].rev()
1916 ff = _followfilter(repo)
1916 ff = _followfilter(repo)
1917 stop = min(revs[0], revs[-1])
1917 stop = min(revs[0], revs[-1])
1918 for x in xrange(rev, stop - 1, -1):
1918 for x in xrange(rev, stop - 1, -1):
1919 if ff.match(x):
1919 if ff.match(x):
1920 wanted = wanted - [x]
1920 wanted = wanted - [x]
1921
1921
1922 # Now that wanted is correctly initialized, we can iterate over the
1922 # Now that wanted is correctly initialized, we can iterate over the
1923 # revision range, yielding only revisions in wanted.
1923 # revision range, yielding only revisions in wanted.
1924 def iterate():
1924 def iterate():
1925 if follow and match.always():
1925 if follow and match.always():
1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1927 def want(rev):
1927 def want(rev):
1928 return ff.match(rev) and rev in wanted
1928 return ff.match(rev) and rev in wanted
1929 else:
1929 else:
1930 def want(rev):
1930 def want(rev):
1931 return rev in wanted
1931 return rev in wanted
1932
1932
1933 it = iter(revs)
1933 it = iter(revs)
1934 stopiteration = False
1934 stopiteration = False
1935 for windowsize in increasingwindows():
1935 for windowsize in increasingwindows():
1936 nrevs = []
1936 nrevs = []
1937 for i in xrange(windowsize):
1937 for i in xrange(windowsize):
1938 rev = next(it, None)
1938 rev = next(it, None)
1939 if rev is None:
1939 if rev is None:
1940 stopiteration = True
1940 stopiteration = True
1941 break
1941 break
1942 elif want(rev):
1942 elif want(rev):
1943 nrevs.append(rev)
1943 nrevs.append(rev)
1944 for rev in sorted(nrevs):
1944 for rev in sorted(nrevs):
1945 fns = fncache.get(rev)
1945 fns = fncache.get(rev)
1946 ctx = change(rev)
1946 ctx = change(rev)
1947 if not fns:
1947 if not fns:
1948 def fns_generator():
1948 def fns_generator():
1949 for f in ctx.files():
1949 for f in ctx.files():
1950 if match(f):
1950 if match(f):
1951 yield f
1951 yield f
1952 fns = fns_generator()
1952 fns = fns_generator()
1953 prepare(ctx, fns)
1953 prepare(ctx, fns)
1954 for rev in nrevs:
1954 for rev in nrevs:
1955 yield change(rev)
1955 yield change(rev)
1956
1956
1957 if stopiteration:
1957 if stopiteration:
1958 break
1958 break
1959
1959
1960 return iterate()
1960 return iterate()
1961
1961
1962 def _makefollowlogfilematcher(repo, files, followfirst):
1962 def _makefollowlogfilematcher(repo, files, followfirst):
1963 # When displaying a revision with --patch --follow FILE, we have
1963 # When displaying a revision with --patch --follow FILE, we have
1964 # to know which file of the revision must be diffed. With
1964 # to know which file of the revision must be diffed. With
1965 # --follow, we want the names of the ancestors of FILE in the
1965 # --follow, we want the names of the ancestors of FILE in the
1966 # revision, stored in "fcache". "fcache" is populated by
1966 # revision, stored in "fcache". "fcache" is populated by
1967 # reproducing the graph traversal already done by --follow revset
1967 # reproducing the graph traversal already done by --follow revset
1968 # and relating revs to file names (which is not "correct" but
1968 # and relating revs to file names (which is not "correct" but
1969 # good enough).
1969 # good enough).
1970 fcache = {}
1970 fcache = {}
1971 fcacheready = [False]
1971 fcacheready = [False]
1972 pctx = repo['.']
1972 pctx = repo['.']
1973
1973
1974 def populate():
1974 def populate():
1975 for fn in files:
1975 for fn in files:
1976 fctx = pctx[fn]
1976 fctx = pctx[fn]
1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1978 for c in fctx.ancestors(followfirst=followfirst):
1978 for c in fctx.ancestors(followfirst=followfirst):
1979 fcache.setdefault(c.rev(), set()).add(c.path())
1979 fcache.setdefault(c.rev(), set()).add(c.path())
1980
1980
1981 def filematcher(rev):
1981 def filematcher(rev):
1982 if not fcacheready[0]:
1982 if not fcacheready[0]:
1983 # Lazy initialization
1983 # Lazy initialization
1984 fcacheready[0] = True
1984 fcacheready[0] = True
1985 populate()
1985 populate()
1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1987
1987
1988 return filematcher
1988 return filematcher
1989
1989
1990 def _makenofollowlogfilematcher(repo, pats, opts):
1990 def _makenofollowlogfilematcher(repo, pats, opts):
1991 '''hook for extensions to override the filematcher for non-follow cases'''
1991 '''hook for extensions to override the filematcher for non-follow cases'''
1992 return None
1992 return None
1993
1993
1994 def _makelogrevset(repo, pats, opts, revs):
1994 def _makelogrevset(repo, pats, opts, revs):
1995 """Return (expr, filematcher) where expr is a revset string built
1995 """Return (expr, filematcher) where expr is a revset string built
1996 from log options and file patterns or None. If --stat or --patch
1996 from log options and file patterns or None. If --stat or --patch
1997 are not passed filematcher is None. Otherwise it is a callable
1997 are not passed filematcher is None. Otherwise it is a callable
1998 taking a revision number and returning a match objects filtering
1998 taking a revision number and returning a match objects filtering
1999 the files to be detailed when displaying the revision.
1999 the files to be detailed when displaying the revision.
2000 """
2000 """
2001 opt2revset = {
2001 opt2revset = {
2002 'no_merges': ('not merge()', None),
2002 'no_merges': ('not merge()', None),
2003 'only_merges': ('merge()', None),
2003 'only_merges': ('merge()', None),
2004 '_ancestors': ('ancestors(%(val)s)', None),
2004 '_ancestors': ('ancestors(%(val)s)', None),
2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2006 '_descendants': ('descendants(%(val)s)', None),
2006 '_descendants': ('descendants(%(val)s)', None),
2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2009 'date': ('date(%(val)r)', None),
2009 'date': ('date(%(val)r)', None),
2010 'branch': ('branch(%(val)r)', ' or '),
2010 'branch': ('branch(%(val)r)', ' or '),
2011 '_patslog': ('filelog(%(val)r)', ' or '),
2011 '_patslog': ('filelog(%(val)r)', ' or '),
2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2014 'keyword': ('keyword(%(val)r)', ' or '),
2014 'keyword': ('keyword(%(val)r)', ' or '),
2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2016 'user': ('user(%(val)r)', ' or '),
2016 'user': ('user(%(val)r)', ' or '),
2017 }
2017 }
2018
2018
2019 opts = dict(opts)
2019 opts = dict(opts)
2020 # follow or not follow?
2020 # follow or not follow?
2021 follow = opts.get('follow') or opts.get('follow_first')
2021 follow = opts.get('follow') or opts.get('follow_first')
2022 if opts.get('follow_first'):
2022 if opts.get('follow_first'):
2023 followfirst = 1
2023 followfirst = 1
2024 else:
2024 else:
2025 followfirst = 0
2025 followfirst = 0
2026 # --follow with FILE behavior depends on revs...
2026 # --follow with FILE behavior depends on revs...
2027 it = iter(revs)
2027 it = iter(revs)
2028 startrev = next(it)
2028 startrev = next(it)
2029 followdescendants = startrev < next(it, startrev)
2029 followdescendants = startrev < next(it, startrev)
2030
2030
2031 # branch and only_branch are really aliases and must be handled at
2031 # branch and only_branch are really aliases and must be handled at
2032 # the same time
2032 # the same time
2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2035 # pats/include/exclude are passed to match.match() directly in
2035 # pats/include/exclude are passed to match.match() directly in
2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2037 # scmutil.match(). The difference is input pats are globbed on
2037 # scmutil.match(). The difference is input pats are globbed on
2038 # platforms without shell expansion (windows).
2038 # platforms without shell expansion (windows).
2039 wctx = repo[None]
2039 wctx = repo[None]
2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2042 opts.get('removed'))
2042 opts.get('removed'))
2043 if not slowpath:
2043 if not slowpath:
2044 for f in match.files():
2044 for f in match.files():
2045 if follow and f not in wctx:
2045 if follow and f not in wctx:
2046 # If the file exists, it may be a directory, so let it
2046 # If the file exists, it may be a directory, so let it
2047 # take the slow path.
2047 # take the slow path.
2048 if os.path.exists(repo.wjoin(f)):
2048 if os.path.exists(repo.wjoin(f)):
2049 slowpath = True
2049 slowpath = True
2050 continue
2050 continue
2051 else:
2051 else:
2052 raise error.Abort(_('cannot follow file not in parent '
2052 raise error.Abort(_('cannot follow file not in parent '
2053 'revision: "%s"') % f)
2053 'revision: "%s"') % f)
2054 filelog = repo.file(f)
2054 filelog = repo.file(f)
2055 if not filelog:
2055 if not filelog:
2056 # A zero count may be a directory or deleted file, so
2056 # A zero count may be a directory or deleted file, so
2057 # try to find matching entries on the slow path.
2057 # try to find matching entries on the slow path.
2058 if follow:
2058 if follow:
2059 raise error.Abort(
2059 raise error.Abort(
2060 _('cannot follow nonexistent file: "%s"') % f)
2060 _('cannot follow nonexistent file: "%s"') % f)
2061 slowpath = True
2061 slowpath = True
2062
2062
2063 # We decided to fall back to the slowpath because at least one
2063 # We decided to fall back to the slowpath because at least one
2064 # of the paths was not a file. Check to see if at least one of them
2064 # of the paths was not a file. Check to see if at least one of them
2065 # existed in history - in that case, we'll continue down the
2065 # existed in history - in that case, we'll continue down the
2066 # slowpath; otherwise, we can turn off the slowpath
2066 # slowpath; otherwise, we can turn off the slowpath
2067 if slowpath:
2067 if slowpath:
2068 for path in match.files():
2068 for path in match.files():
2069 if path == '.' or path in repo.store:
2069 if path == '.' or path in repo.store:
2070 break
2070 break
2071 else:
2071 else:
2072 slowpath = False
2072 slowpath = False
2073
2073
2074 fpats = ('_patsfollow', '_patsfollowfirst')
2074 fpats = ('_patsfollow', '_patsfollowfirst')
2075 fnopats = (('_ancestors', '_fancestors'),
2075 fnopats = (('_ancestors', '_fancestors'),
2076 ('_descendants', '_fdescendants'))
2076 ('_descendants', '_fdescendants'))
2077 if slowpath:
2077 if slowpath:
2078 # See walkchangerevs() slow path.
2078 # See walkchangerevs() slow path.
2079 #
2079 #
2080 # pats/include/exclude cannot be represented as separate
2080 # pats/include/exclude cannot be represented as separate
2081 # revset expressions as their filtering logic applies at file
2081 # revset expressions as their filtering logic applies at file
2082 # level. For instance "-I a -X a" matches a revision touching
2082 # level. For instance "-I a -X a" matches a revision touching
2083 # "a" and "b" while "file(a) and not file(b)" does
2083 # "a" and "b" while "file(a) and not file(b)" does
2084 # not. Besides, filesets are evaluated against the working
2084 # not. Besides, filesets are evaluated against the working
2085 # directory.
2085 # directory.
2086 matchargs = ['r:', 'd:relpath']
2086 matchargs = ['r:', 'd:relpath']
2087 for p in pats:
2087 for p in pats:
2088 matchargs.append('p:' + p)
2088 matchargs.append('p:' + p)
2089 for p in opts.get('include', []):
2089 for p in opts.get('include', []):
2090 matchargs.append('i:' + p)
2090 matchargs.append('i:' + p)
2091 for p in opts.get('exclude', []):
2091 for p in opts.get('exclude', []):
2092 matchargs.append('x:' + p)
2092 matchargs.append('x:' + p)
2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2094 opts['_matchfiles'] = matchargs
2094 opts['_matchfiles'] = matchargs
2095 if follow:
2095 if follow:
2096 opts[fnopats[0][followfirst]] = '.'
2096 opts[fnopats[0][followfirst]] = '.'
2097 else:
2097 else:
2098 if follow:
2098 if follow:
2099 if pats:
2099 if pats:
2100 # follow() revset interprets its file argument as a
2100 # follow() revset interprets its file argument as a
2101 # manifest entry, so use match.files(), not pats.
2101 # manifest entry, so use match.files(), not pats.
2102 opts[fpats[followfirst]] = list(match.files())
2102 opts[fpats[followfirst]] = list(match.files())
2103 else:
2103 else:
2104 op = fnopats[followdescendants][followfirst]
2104 op = fnopats[followdescendants][followfirst]
2105 opts[op] = 'rev(%d)' % startrev
2105 opts[op] = 'rev(%d)' % startrev
2106 else:
2106 else:
2107 opts['_patslog'] = list(pats)
2107 opts['_patslog'] = list(pats)
2108
2108
2109 filematcher = None
2109 filematcher = None
2110 if opts.get('patch') or opts.get('stat'):
2110 if opts.get('patch') or opts.get('stat'):
2111 # When following files, track renames via a special matcher.
2111 # When following files, track renames via a special matcher.
2112 # If we're forced to take the slowpath it means we're following
2112 # If we're forced to take the slowpath it means we're following
2113 # at least one pattern/directory, so don't bother with rename tracking.
2113 # at least one pattern/directory, so don't bother with rename tracking.
2114 if follow and not match.always() and not slowpath:
2114 if follow and not match.always() and not slowpath:
2115 # _makefollowlogfilematcher expects its files argument to be
2115 # _makefollowlogfilematcher expects its files argument to be
2116 # relative to the repo root, so use match.files(), not pats.
2116 # relative to the repo root, so use match.files(), not pats.
2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2118 followfirst)
2118 followfirst)
2119 else:
2119 else:
2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2121 if filematcher is None:
2121 if filematcher is None:
2122 filematcher = lambda rev: match
2122 filematcher = lambda rev: match
2123
2123
2124 expr = []
2124 expr = []
2125 for op, val in sorted(opts.iteritems()):
2125 for op, val in sorted(opts.iteritems()):
2126 if not val:
2126 if not val:
2127 continue
2127 continue
2128 if op not in opt2revset:
2128 if op not in opt2revset:
2129 continue
2129 continue
2130 revop, andor = opt2revset[op]
2130 revop, andor = opt2revset[op]
2131 if '%(val)' not in revop:
2131 if '%(val)' not in revop:
2132 expr.append(revop)
2132 expr.append(revop)
2133 else:
2133 else:
2134 if not isinstance(val, list):
2134 if not isinstance(val, list):
2135 e = revop % {'val': val}
2135 e = revop % {'val': val}
2136 else:
2136 else:
2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2138 expr.append(e)
2138 expr.append(e)
2139
2139
2140 if expr:
2140 if expr:
2141 expr = '(' + ' and '.join(expr) + ')'
2141 expr = '(' + ' and '.join(expr) + ')'
2142 else:
2142 else:
2143 expr = None
2143 expr = None
2144 return expr, filematcher
2144 return expr, filematcher
2145
2145
2146 def _logrevs(repo, opts):
2146 def _logrevs(repo, opts):
2147 # Default --rev value depends on --follow but --follow behavior
2147 # Default --rev value depends on --follow but --follow behavior
2148 # depends on revisions resolved from --rev...
2148 # depends on revisions resolved from --rev...
2149 follow = opts.get('follow') or opts.get('follow_first')
2149 follow = opts.get('follow') or opts.get('follow_first')
2150 if opts.get('rev'):
2150 if opts.get('rev'):
2151 revs = scmutil.revrange(repo, opts['rev'])
2151 revs = scmutil.revrange(repo, opts['rev'])
2152 elif follow and repo.dirstate.p1() == nullid:
2152 elif follow and repo.dirstate.p1() == nullid:
2153 revs = revset.baseset()
2153 revs = revset.baseset()
2154 elif follow:
2154 elif follow:
2155 revs = repo.revs('reverse(:.)')
2155 revs = repo.revs('reverse(:.)')
2156 else:
2156 else:
2157 revs = revset.spanset(repo)
2157 revs = revset.spanset(repo)
2158 revs.reverse()
2158 revs.reverse()
2159 return revs
2159 return revs
2160
2160
2161 def getgraphlogrevs(repo, pats, opts):
2161 def getgraphlogrevs(repo, pats, opts):
2162 """Return (revs, expr, filematcher) where revs is an iterable of
2162 """Return (revs, expr, filematcher) where revs is an iterable of
2163 revision numbers, expr is a revset string built from log options
2163 revision numbers, expr is a revset string built from log options
2164 and file patterns or None, and used to filter 'revs'. If --stat or
2164 and file patterns or None, and used to filter 'revs'. If --stat or
2165 --patch are not passed filematcher is None. Otherwise it is a
2165 --patch are not passed filematcher is None. Otherwise it is a
2166 callable taking a revision number and returning a match objects
2166 callable taking a revision number and returning a match objects
2167 filtering the files to be detailed when displaying the revision.
2167 filtering the files to be detailed when displaying the revision.
2168 """
2168 """
2169 limit = loglimit(opts)
2169 limit = loglimit(opts)
2170 revs = _logrevs(repo, opts)
2170 revs = _logrevs(repo, opts)
2171 if not revs:
2171 if not revs:
2172 return revset.baseset(), None, None
2172 return revset.baseset(), None, None
2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2174 if opts.get('rev'):
2174 if opts.get('rev'):
2175 # User-specified revs might be unsorted, but don't sort before
2175 # User-specified revs might be unsorted, but don't sort before
2176 # _makelogrevset because it might depend on the order of revs
2176 # _makelogrevset because it might depend on the order of revs
2177 if not (revs.isdescending() or revs.istopo()):
2177 if not (revs.isdescending() or revs.istopo()):
2178 revs.sort(reverse=True)
2178 revs.sort(reverse=True)
2179 if expr:
2179 if expr:
2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2181 revs = matcher(repo, revs)
2181 revs = matcher(repo, revs)
2182 if limit is not None:
2182 if limit is not None:
2183 limitedrevs = []
2183 limitedrevs = []
2184 for idx, rev in enumerate(revs):
2184 for idx, rev in enumerate(revs):
2185 if idx >= limit:
2185 if idx >= limit:
2186 break
2186 break
2187 limitedrevs.append(rev)
2187 limitedrevs.append(rev)
2188 revs = revset.baseset(limitedrevs)
2188 revs = revset.baseset(limitedrevs)
2189
2189
2190 return revs, expr, filematcher
2190 return revs, expr, filematcher
2191
2191
2192 def getlogrevs(repo, pats, opts):
2192 def getlogrevs(repo, pats, opts):
2193 """Return (revs, expr, filematcher) where revs is an iterable of
2193 """Return (revs, expr, filematcher) where revs is an iterable of
2194 revision numbers, expr is a revset string built from log options
2194 revision numbers, expr is a revset string built from log options
2195 and file patterns or None, and used to filter 'revs'. If --stat or
2195 and file patterns or None, and used to filter 'revs'. If --stat or
2196 --patch are not passed filematcher is None. Otherwise it is a
2196 --patch are not passed filematcher is None. Otherwise it is a
2197 callable taking a revision number and returning a match objects
2197 callable taking a revision number and returning a match objects
2198 filtering the files to be detailed when displaying the revision.
2198 filtering the files to be detailed when displaying the revision.
2199 """
2199 """
2200 limit = loglimit(opts)
2200 limit = loglimit(opts)
2201 revs = _logrevs(repo, opts)
2201 revs = _logrevs(repo, opts)
2202 if not revs:
2202 if not revs:
2203 return revset.baseset([]), None, None
2203 return revset.baseset([]), None, None
2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2205 if expr:
2205 if expr:
2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2207 revs = matcher(repo, revs)
2207 revs = matcher(repo, revs)
2208 if limit is not None:
2208 if limit is not None:
2209 limitedrevs = []
2209 limitedrevs = []
2210 for idx, r in enumerate(revs):
2210 for idx, r in enumerate(revs):
2211 if limit <= idx:
2211 if limit <= idx:
2212 break
2212 break
2213 limitedrevs.append(r)
2213 limitedrevs.append(r)
2214 revs = revset.baseset(limitedrevs)
2214 revs = revset.baseset(limitedrevs)
2215
2215
2216 return revs, expr, filematcher
2216 return revs, expr, filematcher
2217
2217
2218 def _graphnodeformatter(ui, displayer):
2218 def _graphnodeformatter(ui, displayer):
2219 spec = ui.config('ui', 'graphnodetemplate')
2219 spec = ui.config('ui', 'graphnodetemplate')
2220 if not spec:
2220 if not spec:
2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2222
2222
2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2224 cache = {}
2224 cache = {}
2225 if isinstance(displayer, changeset_templater):
2225 if isinstance(displayer, changeset_templater):
2226 cache = displayer.cache # reuse cache of slow templates
2226 cache = displayer.cache # reuse cache of slow templates
2227 props = templatekw.keywords.copy()
2227 props = templatekw.keywords.copy()
2228 props['templ'] = templ
2228 props['templ'] = templ
2229 props['cache'] = cache
2229 props['cache'] = cache
2230 def formatnode(repo, ctx):
2230 def formatnode(repo, ctx):
2231 props['ctx'] = ctx
2231 props['ctx'] = ctx
2232 props['repo'] = repo
2232 props['repo'] = repo
2233 props['ui'] = repo.ui
2233 props['ui'] = repo.ui
2234 props['revcache'] = {}
2234 props['revcache'] = {}
2235 return templater.stringify(templ('graphnode', **props))
2235 return templater.stringify(templ('graphnode', **props))
2236 return formatnode
2236 return formatnode
2237
2237
2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2239 filematcher=None):
2239 filematcher=None):
2240 formatnode = _graphnodeformatter(ui, displayer)
2240 formatnode = _graphnodeformatter(ui, displayer)
2241 state = graphmod.asciistate()
2241 state = graphmod.asciistate()
2242 styles = state['styles']
2242 styles = state['styles']
2243
2243
2244 # only set graph styling if HGPLAIN is not set.
2244 # only set graph styling if HGPLAIN is not set.
2245 if ui.plain('graph'):
2245 if ui.plain('graph'):
2246 # set all edge styles to |, the default pre-3.8 behaviour
2246 # set all edge styles to |, the default pre-3.8 behaviour
2247 styles.update(dict.fromkeys(styles, '|'))
2247 styles.update(dict.fromkeys(styles, '|'))
2248 else:
2248 else:
2249 edgetypes = {
2249 edgetypes = {
2250 'parent': graphmod.PARENT,
2250 'parent': graphmod.PARENT,
2251 'grandparent': graphmod.GRANDPARENT,
2251 'grandparent': graphmod.GRANDPARENT,
2252 'missing': graphmod.MISSINGPARENT
2252 'missing': graphmod.MISSINGPARENT
2253 }
2253 }
2254 for name, key in edgetypes.items():
2254 for name, key in edgetypes.items():
2255 # experimental config: experimental.graphstyle.*
2255 # experimental config: experimental.graphstyle.*
2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2257 styles[key])
2257 styles[key])
2258 if not styles[key]:
2258 if not styles[key]:
2259 styles[key] = None
2259 styles[key] = None
2260
2260
2261 # experimental config: experimental.graphshorten
2261 # experimental config: experimental.graphshorten
2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2263
2263
2264 for rev, type, ctx, parents in dag:
2264 for rev, type, ctx, parents in dag:
2265 char = formatnode(repo, ctx)
2265 char = formatnode(repo, ctx)
2266 copies = None
2266 copies = None
2267 if getrenamed and ctx.rev():
2267 if getrenamed and ctx.rev():
2268 copies = []
2268 copies = []
2269 for fn in ctx.files():
2269 for fn in ctx.files():
2270 rename = getrenamed(fn, ctx.rev())
2270 rename = getrenamed(fn, ctx.rev())
2271 if rename:
2271 if rename:
2272 copies.append((fn, rename[0]))
2272 copies.append((fn, rename[0]))
2273 revmatchfn = None
2273 revmatchfn = None
2274 if filematcher is not None:
2274 if filematcher is not None:
2275 revmatchfn = filematcher(ctx.rev())
2275 revmatchfn = filematcher(ctx.rev())
2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2277 lines = displayer.hunk.pop(rev).split('\n')
2277 lines = displayer.hunk.pop(rev).split('\n')
2278 if not lines[-1]:
2278 if not lines[-1]:
2279 del lines[-1]
2279 del lines[-1]
2280 displayer.flush(ctx)
2280 displayer.flush(ctx)
2281 edges = edgefn(type, char, lines, state, rev, parents)
2281 edges = edgefn(type, char, lines, state, rev, parents)
2282 for type, char, lines, coldata in edges:
2282 for type, char, lines, coldata in edges:
2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2284 displayer.close()
2284 displayer.close()
2285
2285
2286 def graphlog(ui, repo, *pats, **opts):
2286 def graphlog(ui, repo, *pats, **opts):
2287 # Parameters are identical to log command ones
2287 # Parameters are identical to log command ones
2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2289 revdag = graphmod.dagwalker(repo, revs)
2289 revdag = graphmod.dagwalker(repo, revs)
2290
2290
2291 getrenamed = None
2291 getrenamed = None
2292 if opts.get('copies'):
2292 if opts.get('copies'):
2293 endrev = None
2293 endrev = None
2294 if opts.get('rev'):
2294 if opts.get('rev'):
2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2299 filematcher)
2299 filematcher)
2300
2300
2301 def checkunsupportedgraphflags(pats, opts):
2301 def checkunsupportedgraphflags(pats, opts):
2302 for op in ["newest_first"]:
2302 for op in ["newest_first"]:
2303 if op in opts and opts[op]:
2303 if op in opts and opts[op]:
2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2305 % op.replace("_", "-"))
2305 % op.replace("_", "-"))
2306
2306
2307 def graphrevs(repo, nodes, opts):
2307 def graphrevs(repo, nodes, opts):
2308 limit = loglimit(opts)
2308 limit = loglimit(opts)
2309 nodes.reverse()
2309 nodes.reverse()
2310 if limit is not None:
2310 if limit is not None:
2311 nodes = nodes[:limit]
2311 nodes = nodes[:limit]
2312 return graphmod.nodes(repo, nodes)
2312 return graphmod.nodes(repo, nodes)
2313
2313
2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2315 join = lambda f: os.path.join(prefix, f)
2315 join = lambda f: os.path.join(prefix, f)
2316 bad = []
2316 bad = []
2317
2317
2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2319 names = []
2319 names = []
2320 wctx = repo[None]
2320 wctx = repo[None]
2321 cca = None
2321 cca = None
2322 abort, warn = scmutil.checkportabilityalert(ui)
2322 abort, warn = scmutil.checkportabilityalert(ui)
2323 if abort or warn:
2323 if abort or warn:
2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2325
2325
2326 badmatch = matchmod.badmatch(match, badfn)
2326 badmatch = matchmod.badmatch(match, badfn)
2327 dirstate = repo.dirstate
2327 dirstate = repo.dirstate
2328 # We don't want to just call wctx.walk here, since it would return a lot of
2328 # We don't want to just call wctx.walk here, since it would return a lot of
2329 # clean files, which we aren't interested in and takes time.
2329 # clean files, which we aren't interested in and takes time.
2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2331 True, False, full=False)):
2331 True, False, full=False)):
2332 exact = match.exact(f)
2332 exact = match.exact(f)
2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2334 if cca:
2334 if cca:
2335 cca(f)
2335 cca(f)
2336 names.append(f)
2336 names.append(f)
2337 if ui.verbose or not exact:
2337 if ui.verbose or not exact:
2338 ui.status(_('adding %s\n') % match.rel(f))
2338 ui.status(_('adding %s\n') % match.rel(f))
2339
2339
2340 for subpath in sorted(wctx.substate):
2340 for subpath in sorted(wctx.substate):
2341 sub = wctx.sub(subpath)
2341 sub = wctx.sub(subpath)
2342 try:
2342 try:
2343 submatch = matchmod.subdirmatcher(subpath, match)
2343 submatch = matchmod.subdirmatcher(subpath, match)
2344 if opts.get('subrepos'):
2344 if opts.get('subrepos'):
2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2346 else:
2346 else:
2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2348 except error.LookupError:
2348 except error.LookupError:
2349 ui.status(_("skipping missing subrepository: %s\n")
2349 ui.status(_("skipping missing subrepository: %s\n")
2350 % join(subpath))
2350 % join(subpath))
2351
2351
2352 if not opts.get('dry_run'):
2352 if not opts.get('dry_run'):
2353 rejected = wctx.add(names, prefix)
2353 rejected = wctx.add(names, prefix)
2354 bad.extend(f for f in rejected if f in match.files())
2354 bad.extend(f for f in rejected if f in match.files())
2355 return bad
2355 return bad
2356
2356
2357 def forget(ui, repo, match, prefix, explicitonly):
2357 def forget(ui, repo, match, prefix, explicitonly):
2358 join = lambda f: os.path.join(prefix, f)
2358 join = lambda f: os.path.join(prefix, f)
2359 bad = []
2359 bad = []
2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2361 wctx = repo[None]
2361 wctx = repo[None]
2362 forgot = []
2362 forgot = []
2363
2363
2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2366 if explicitonly:
2366 if explicitonly:
2367 forget = [f for f in forget if match.exact(f)]
2367 forget = [f for f in forget if match.exact(f)]
2368
2368
2369 for subpath in sorted(wctx.substate):
2369 for subpath in sorted(wctx.substate):
2370 sub = wctx.sub(subpath)
2370 sub = wctx.sub(subpath)
2371 try:
2371 try:
2372 submatch = matchmod.subdirmatcher(subpath, match)
2372 submatch = matchmod.subdirmatcher(subpath, match)
2373 subbad, subforgot = sub.forget(submatch, prefix)
2373 subbad, subforgot = sub.forget(submatch, prefix)
2374 bad.extend([subpath + '/' + f for f in subbad])
2374 bad.extend([subpath + '/' + f for f in subbad])
2375 forgot.extend([subpath + '/' + f for f in subforgot])
2375 forgot.extend([subpath + '/' + f for f in subforgot])
2376 except error.LookupError:
2376 except error.LookupError:
2377 ui.status(_("skipping missing subrepository: %s\n")
2377 ui.status(_("skipping missing subrepository: %s\n")
2378 % join(subpath))
2378 % join(subpath))
2379
2379
2380 if not explicitonly:
2380 if not explicitonly:
2381 for f in match.files():
2381 for f in match.files():
2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2383 if f not in forgot:
2383 if f not in forgot:
2384 if repo.wvfs.exists(f):
2384 if repo.wvfs.exists(f):
2385 # Don't complain if the exact case match wasn't given.
2385 # Don't complain if the exact case match wasn't given.
2386 # But don't do this until after checking 'forgot', so
2386 # But don't do this until after checking 'forgot', so
2387 # that subrepo files aren't normalized, and this op is
2387 # that subrepo files aren't normalized, and this op is
2388 # purely from data cached by the status walk above.
2388 # purely from data cached by the status walk above.
2389 if repo.dirstate.normalize(f) in repo.dirstate:
2389 if repo.dirstate.normalize(f) in repo.dirstate:
2390 continue
2390 continue
2391 ui.warn(_('not removing %s: '
2391 ui.warn(_('not removing %s: '
2392 'file is already untracked\n')
2392 'file is already untracked\n')
2393 % match.rel(f))
2393 % match.rel(f))
2394 bad.append(f)
2394 bad.append(f)
2395
2395
2396 for f in forget:
2396 for f in forget:
2397 if ui.verbose or not match.exact(f):
2397 if ui.verbose or not match.exact(f):
2398 ui.status(_('removing %s\n') % match.rel(f))
2398 ui.status(_('removing %s\n') % match.rel(f))
2399
2399
2400 rejected = wctx.forget(forget, prefix)
2400 rejected = wctx.forget(forget, prefix)
2401 bad.extend(f for f in rejected if f in match.files())
2401 bad.extend(f for f in rejected if f in match.files())
2402 forgot.extend(f for f in forget if f not in rejected)
2402 forgot.extend(f for f in forget if f not in rejected)
2403 return bad, forgot
2403 return bad, forgot
2404
2404
2405 def files(ui, ctx, m, fm, fmt, subrepos):
2405 def files(ui, ctx, m, fm, fmt, subrepos):
2406 rev = ctx.rev()
2406 rev = ctx.rev()
2407 ret = 1
2407 ret = 1
2408 ds = ctx.repo().dirstate
2408 ds = ctx.repo().dirstate
2409
2409
2410 for f in ctx.matches(m):
2410 for f in ctx.matches(m):
2411 if rev is None and ds[f] == 'r':
2411 if rev is None and ds[f] == 'r':
2412 continue
2412 continue
2413 fm.startitem()
2413 fm.startitem()
2414 if ui.verbose:
2414 if ui.verbose:
2415 fc = ctx[f]
2415 fc = ctx[f]
2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2417 fm.data(abspath=f)
2417 fm.data(abspath=f)
2418 fm.write('path', fmt, m.rel(f))
2418 fm.write('path', fmt, m.rel(f))
2419 ret = 0
2419 ret = 0
2420
2420
2421 for subpath in sorted(ctx.substate):
2421 for subpath in sorted(ctx.substate):
2422 submatch = matchmod.subdirmatcher(subpath, m)
2422 submatch = matchmod.subdirmatcher(subpath, m)
2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2424 sub = ctx.sub(subpath)
2424 sub = ctx.sub(subpath)
2425 try:
2425 try:
2426 recurse = m.exact(subpath) or subrepos
2426 recurse = m.exact(subpath) or subrepos
2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2428 ret = 0
2428 ret = 0
2429 except error.LookupError:
2429 except error.LookupError:
2430 ui.status(_("skipping missing subrepository: %s\n")
2430 ui.status(_("skipping missing subrepository: %s\n")
2431 % m.abs(subpath))
2431 % m.abs(subpath))
2432
2432
2433 return ret
2433 return ret
2434
2434
2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2436 join = lambda f: os.path.join(prefix, f)
2436 join = lambda f: os.path.join(prefix, f)
2437 ret = 0
2437 ret = 0
2438 s = repo.status(match=m, clean=True)
2438 s = repo.status(match=m, clean=True)
2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2440
2440
2441 wctx = repo[None]
2441 wctx = repo[None]
2442
2442
2443 if warnings is None:
2443 if warnings is None:
2444 warnings = []
2444 warnings = []
2445 warn = True
2445 warn = True
2446 else:
2446 else:
2447 warn = False
2447 warn = False
2448
2448
2449 subs = sorted(wctx.substate)
2449 subs = sorted(wctx.substate)
2450 total = len(subs)
2450 total = len(subs)
2451 count = 0
2451 count = 0
2452 for subpath in subs:
2452 for subpath in subs:
2453 count += 1
2453 count += 1
2454 submatch = matchmod.subdirmatcher(subpath, m)
2454 submatch = matchmod.subdirmatcher(subpath, m)
2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2457 sub = wctx.sub(subpath)
2457 sub = wctx.sub(subpath)
2458 try:
2458 try:
2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2460 warnings):
2460 warnings):
2461 ret = 1
2461 ret = 1
2462 except error.LookupError:
2462 except error.LookupError:
2463 warnings.append(_("skipping missing subrepository: %s\n")
2463 warnings.append(_("skipping missing subrepository: %s\n")
2464 % join(subpath))
2464 % join(subpath))
2465 ui.progress(_('searching'), None)
2465 ui.progress(_('searching'), None)
2466
2466
2467 # warn about failure to delete explicit files/dirs
2467 # warn about failure to delete explicit files/dirs
2468 deleteddirs = util.dirs(deleted)
2468 deleteddirs = util.dirs(deleted)
2469 files = m.files()
2469 files = m.files()
2470 total = len(files)
2470 total = len(files)
2471 count = 0
2471 count = 0
2472 for f in files:
2472 for f in files:
2473 def insubrepo():
2473 def insubrepo():
2474 for subpath in wctx.substate:
2474 for subpath in wctx.substate:
2475 if f.startswith(subpath + '/'):
2475 if f.startswith(subpath + '/'):
2476 return True
2476 return True
2477 return False
2477 return False
2478
2478
2479 count += 1
2479 count += 1
2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2481 isdir = f in deleteddirs or wctx.hasdir(f)
2481 isdir = f in deleteddirs or wctx.hasdir(f)
2482 if (f in repo.dirstate or isdir or f == '.'
2482 if (f in repo.dirstate or isdir or f == '.'
2483 or insubrepo() or f in subs):
2483 or insubrepo() or f in subs):
2484 continue
2484 continue
2485
2485
2486 if repo.wvfs.exists(f):
2486 if repo.wvfs.exists(f):
2487 if repo.wvfs.isdir(f):
2487 if repo.wvfs.isdir(f):
2488 warnings.append(_('not removing %s: no tracked files\n')
2488 warnings.append(_('not removing %s: no tracked files\n')
2489 % m.rel(f))
2489 % m.rel(f))
2490 else:
2490 else:
2491 warnings.append(_('not removing %s: file is untracked\n')
2491 warnings.append(_('not removing %s: file is untracked\n')
2492 % m.rel(f))
2492 % m.rel(f))
2493 # missing files will generate a warning elsewhere
2493 # missing files will generate a warning elsewhere
2494 ret = 1
2494 ret = 1
2495 ui.progress(_('deleting'), None)
2495 ui.progress(_('deleting'), None)
2496
2496
2497 if force:
2497 if force:
2498 list = modified + deleted + clean + added
2498 list = modified + deleted + clean + added
2499 elif after:
2499 elif after:
2500 list = deleted
2500 list = deleted
2501 remaining = modified + added + clean
2501 remaining = modified + added + clean
2502 total = len(remaining)
2502 total = len(remaining)
2503 count = 0
2503 count = 0
2504 for f in remaining:
2504 for f in remaining:
2505 count += 1
2505 count += 1
2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2507 warnings.append(_('not removing %s: file still exists\n')
2507 warnings.append(_('not removing %s: file still exists\n')
2508 % m.rel(f))
2508 % m.rel(f))
2509 ret = 1
2509 ret = 1
2510 ui.progress(_('skipping'), None)
2510 ui.progress(_('skipping'), None)
2511 else:
2511 else:
2512 list = deleted + clean
2512 list = deleted + clean
2513 total = len(modified) + len(added)
2513 total = len(modified) + len(added)
2514 count = 0
2514 count = 0
2515 for f in modified:
2515 for f in modified:
2516 count += 1
2516 count += 1
2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2518 warnings.append(_('not removing %s: file is modified (use -f'
2518 warnings.append(_('not removing %s: file is modified (use -f'
2519 ' to force removal)\n') % m.rel(f))
2519 ' to force removal)\n') % m.rel(f))
2520 ret = 1
2520 ret = 1
2521 for f in added:
2521 for f in added:
2522 count += 1
2522 count += 1
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 warnings.append(_("not removing %s: file has been marked for add"
2524 warnings.append(_("not removing %s: file has been marked for add"
2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2526 ret = 1
2526 ret = 1
2527 ui.progress(_('skipping'), None)
2527 ui.progress(_('skipping'), None)
2528
2528
2529 list = sorted(list)
2529 list = sorted(list)
2530 total = len(list)
2530 total = len(list)
2531 count = 0
2531 count = 0
2532 for f in list:
2532 for f in list:
2533 count += 1
2533 count += 1
2534 if ui.verbose or not m.exact(f):
2534 if ui.verbose or not m.exact(f):
2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2536 ui.status(_('removing %s\n') % m.rel(f))
2536 ui.status(_('removing %s\n') % m.rel(f))
2537 ui.progress(_('deleting'), None)
2537 ui.progress(_('deleting'), None)
2538
2538
2539 with repo.wlock():
2539 with repo.wlock():
2540 if not after:
2540 if not after:
2541 for f in list:
2541 for f in list:
2542 if f in added:
2542 if f in added:
2543 continue # we never unlink added files on remove
2543 continue # we never unlink added files on remove
2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2545 repo[None].forget(list)
2545 repo[None].forget(list)
2546
2546
2547 if warn:
2547 if warn:
2548 for warning in warnings:
2548 for warning in warnings:
2549 ui.warn(warning)
2549 ui.warn(warning)
2550
2550
2551 return ret
2551 return ret
2552
2552
2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2554 err = 1
2554 err = 1
2555
2555
2556 def write(path):
2556 def write(path):
2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2558 pathname=os.path.join(prefix, path))
2558 pathname=os.path.join(prefix, path))
2559 data = ctx[path].data()
2559 data = ctx[path].data()
2560 if opts.get('decode'):
2560 if opts.get('decode'):
2561 data = repo.wwritedata(path, data)
2561 data = repo.wwritedata(path, data)
2562 fp.write(data)
2562 fp.write(data)
2563 fp.close()
2563 fp.close()
2564
2564
2565 # Automation often uses hg cat on single files, so special case it
2565 # Automation often uses hg cat on single files, so special case it
2566 # for performance to avoid the cost of parsing the manifest.
2566 # for performance to avoid the cost of parsing the manifest.
2567 if len(matcher.files()) == 1 and not matcher.anypats():
2567 if len(matcher.files()) == 1 and not matcher.anypats():
2568 file = matcher.files()[0]
2568 file = matcher.files()[0]
2569 mfl = repo.manifestlog
2569 mfl = repo.manifestlog
2570 mfnode = ctx.manifestnode()
2570 mfnode = ctx.manifestnode()
2571 try:
2571 try:
2572 if mfnode and mfl[mfnode].find(file)[0]:
2572 if mfnode and mfl[mfnode].find(file)[0]:
2573 write(file)
2573 write(file)
2574 return 0
2574 return 0
2575 except KeyError:
2575 except KeyError:
2576 pass
2576 pass
2577
2577
2578 for abs in ctx.walk(matcher):
2578 for abs in ctx.walk(matcher):
2579 write(abs)
2579 write(abs)
2580 err = 0
2580 err = 0
2581
2581
2582 for subpath in sorted(ctx.substate):
2582 for subpath in sorted(ctx.substate):
2583 sub = ctx.sub(subpath)
2583 sub = ctx.sub(subpath)
2584 try:
2584 try:
2585 submatch = matchmod.subdirmatcher(subpath, matcher)
2585 submatch = matchmod.subdirmatcher(subpath, matcher)
2586
2586
2587 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2587 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2588 **opts):
2588 **opts):
2589 err = 0
2589 err = 0
2590 except error.RepoLookupError:
2590 except error.RepoLookupError:
2591 ui.status(_("skipping missing subrepository: %s\n")
2591 ui.status(_("skipping missing subrepository: %s\n")
2592 % os.path.join(prefix, subpath))
2592 % os.path.join(prefix, subpath))
2593
2593
2594 return err
2594 return err
2595
2595
2596 def commit(ui, repo, commitfunc, pats, opts):
2596 def commit(ui, repo, commitfunc, pats, opts):
2597 '''commit the specified files or all outstanding changes'''
2597 '''commit the specified files or all outstanding changes'''
2598 date = opts.get('date')
2598 date = opts.get('date')
2599 if date:
2599 if date:
2600 opts['date'] = util.parsedate(date)
2600 opts['date'] = util.parsedate(date)
2601 message = logmessage(ui, opts)
2601 message = logmessage(ui, opts)
2602 matcher = scmutil.match(repo[None], pats, opts)
2602 matcher = scmutil.match(repo[None], pats, opts)
2603
2603
2604 # extract addremove carefully -- this function can be called from a command
2604 # extract addremove carefully -- this function can be called from a command
2605 # that doesn't support addremove
2605 # that doesn't support addremove
2606 if opts.get('addremove'):
2606 if opts.get('addremove'):
2607 if scmutil.addremove(repo, matcher, "", opts) != 0:
2607 if scmutil.addremove(repo, matcher, "", opts) != 0:
2608 raise error.Abort(
2608 raise error.Abort(
2609 _("failed to mark all new/missing files as added/removed"))
2609 _("failed to mark all new/missing files as added/removed"))
2610
2610
2611 return commitfunc(ui, repo, message, matcher, opts)
2611 return commitfunc(ui, repo, message, matcher, opts)
2612
2612
2613 def samefile(f, ctx1, ctx2):
2613 def samefile(f, ctx1, ctx2):
2614 if f in ctx1.manifest():
2614 if f in ctx1.manifest():
2615 a = ctx1.filectx(f)
2615 a = ctx1.filectx(f)
2616 if f in ctx2.manifest():
2616 if f in ctx2.manifest():
2617 b = ctx2.filectx(f)
2617 b = ctx2.filectx(f)
2618 return (not a.cmp(b)
2618 return (not a.cmp(b)
2619 and a.flags() == b.flags())
2619 and a.flags() == b.flags())
2620 else:
2620 else:
2621 return False
2621 return False
2622 else:
2622 else:
2623 return f not in ctx2.manifest()
2623 return f not in ctx2.manifest()
2624
2624
2625 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2625 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2626 # avoid cycle context -> subrepo -> cmdutil
2626 # avoid cycle context -> subrepo -> cmdutil
2627 from . import context
2627 from . import context
2628
2628
2629 # amend will reuse the existing user if not specified, but the obsolete
2629 # amend will reuse the existing user if not specified, but the obsolete
2630 # marker creation requires that the current user's name is specified.
2630 # marker creation requires that the current user's name is specified.
2631 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2631 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2632 ui.username() # raise exception if username not set
2632 ui.username() # raise exception if username not set
2633
2633
2634 ui.note(_('amending changeset %s\n') % old)
2634 ui.note(_('amending changeset %s\n') % old)
2635 base = old.p1()
2635 base = old.p1()
2636 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2636 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2637
2637
2638 wlock = lock = newid = None
2638 wlock = lock = newid = None
2639 try:
2639 try:
2640 wlock = repo.wlock()
2640 wlock = repo.wlock()
2641 lock = repo.lock()
2641 lock = repo.lock()
2642 with repo.transaction('amend') as tr:
2642 with repo.transaction('amend') as tr:
2643 # See if we got a message from -m or -l, if not, open the editor
2643 # See if we got a message from -m or -l, if not, open the editor
2644 # with the message of the changeset to amend
2644 # with the message of the changeset to amend
2645 message = logmessage(ui, opts)
2645 message = logmessage(ui, opts)
2646 # ensure logfile does not conflict with later enforcement of the
2646 # ensure logfile does not conflict with later enforcement of the
2647 # message. potential logfile content has been processed by
2647 # message. potential logfile content has been processed by
2648 # `logmessage` anyway.
2648 # `logmessage` anyway.
2649 opts.pop('logfile')
2649 opts.pop('logfile')
2650 # First, do a regular commit to record all changes in the working
2650 # First, do a regular commit to record all changes in the working
2651 # directory (if there are any)
2651 # directory (if there are any)
2652 ui.callhooks = False
2652 ui.callhooks = False
2653 activebookmark = repo._bookmarks.active
2653 activebookmark = repo._bookmarks.active
2654 try:
2654 try:
2655 repo._bookmarks.active = None
2655 repo._bookmarks.active = None
2656 opts['message'] = 'temporary amend commit for %s' % old
2656 opts['message'] = 'temporary amend commit for %s' % old
2657 node = commit(ui, repo, commitfunc, pats, opts)
2657 node = commit(ui, repo, commitfunc, pats, opts)
2658 finally:
2658 finally:
2659 repo._bookmarks.active = activebookmark
2659 repo._bookmarks.active = activebookmark
2660 repo._bookmarks.recordchange(tr)
2660 repo._bookmarks.recordchange(tr)
2661 ui.callhooks = True
2661 ui.callhooks = True
2662 ctx = repo[node]
2662 ctx = repo[node]
2663
2663
2664 # Participating changesets:
2664 # Participating changesets:
2665 #
2665 #
2666 # node/ctx o - new (intermediate) commit that contains changes
2666 # node/ctx o - new (intermediate) commit that contains changes
2667 # | from working dir to go into amending commit
2667 # | from working dir to go into amending commit
2668 # | (or a workingctx if there were no changes)
2668 # | (or a workingctx if there were no changes)
2669 # |
2669 # |
2670 # old o - changeset to amend
2670 # old o - changeset to amend
2671 # |
2671 # |
2672 # base o - parent of amending changeset
2672 # base o - parent of amending changeset
2673
2673
2674 # Update extra dict from amended commit (e.g. to preserve graft
2674 # Update extra dict from amended commit (e.g. to preserve graft
2675 # source)
2675 # source)
2676 extra.update(old.extra())
2676 extra.update(old.extra())
2677
2677
2678 # Also update it from the intermediate commit or from the wctx
2678 # Also update it from the intermediate commit or from the wctx
2679 extra.update(ctx.extra())
2679 extra.update(ctx.extra())
2680
2680
2681 if len(old.parents()) > 1:
2681 if len(old.parents()) > 1:
2682 # ctx.files() isn't reliable for merges, so fall back to the
2682 # ctx.files() isn't reliable for merges, so fall back to the
2683 # slower repo.status() method
2683 # slower repo.status() method
2684 files = set([fn for st in repo.status(base, old)[:3]
2684 files = set([fn for st in repo.status(base, old)[:3]
2685 for fn in st])
2685 for fn in st])
2686 else:
2686 else:
2687 files = set(old.files())
2687 files = set(old.files())
2688
2688
2689 # Second, we use either the commit we just did, or if there were no
2689 # Second, we use either the commit we just did, or if there were no
2690 # changes the parent of the working directory as the version of the
2690 # changes the parent of the working directory as the version of the
2691 # files in the final amend commit
2691 # files in the final amend commit
2692 if node:
2692 if node:
2693 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2693 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2694
2694
2695 user = ctx.user()
2695 user = ctx.user()
2696 date = ctx.date()
2696 date = ctx.date()
2697 # Recompute copies (avoid recording a -> b -> a)
2697 # Recompute copies (avoid recording a -> b -> a)
2698 copied = copies.pathcopies(base, ctx)
2698 copied = copies.pathcopies(base, ctx)
2699 if old.p2:
2699 if old.p2:
2700 copied.update(copies.pathcopies(old.p2(), ctx))
2700 copied.update(copies.pathcopies(old.p2(), ctx))
2701
2701
2702 # Prune files which were reverted by the updates: if old
2702 # Prune files which were reverted by the updates: if old
2703 # introduced file X and our intermediate commit, node,
2703 # introduced file X and our intermediate commit, node,
2704 # renamed that file, then those two files are the same and
2704 # renamed that file, then those two files are the same and
2705 # we can discard X from our list of files. Likewise if X
2705 # we can discard X from our list of files. Likewise if X
2706 # was deleted, it's no longer relevant
2706 # was deleted, it's no longer relevant
2707 files.update(ctx.files())
2707 files.update(ctx.files())
2708 files = [f for f in files if not samefile(f, ctx, base)]
2708 files = [f for f in files if not samefile(f, ctx, base)]
2709
2709
2710 def filectxfn(repo, ctx_, path):
2710 def filectxfn(repo, ctx_, path):
2711 try:
2711 try:
2712 fctx = ctx[path]
2712 fctx = ctx[path]
2713 flags = fctx.flags()
2713 flags = fctx.flags()
2714 mctx = context.memfilectx(repo,
2714 mctx = context.memfilectx(repo,
2715 fctx.path(), fctx.data(),
2715 fctx.path(), fctx.data(),
2716 islink='l' in flags,
2716 islink='l' in flags,
2717 isexec='x' in flags,
2717 isexec='x' in flags,
2718 copied=copied.get(path))
2718 copied=copied.get(path))
2719 return mctx
2719 return mctx
2720 except KeyError:
2720 except KeyError:
2721 return None
2721 return None
2722 else:
2722 else:
2723 ui.note(_('copying changeset %s to %s\n') % (old, base))
2723 ui.note(_('copying changeset %s to %s\n') % (old, base))
2724
2724
2725 # Use version of files as in the old cset
2725 # Use version of files as in the old cset
2726 def filectxfn(repo, ctx_, path):
2726 def filectxfn(repo, ctx_, path):
2727 try:
2727 try:
2728 return old.filectx(path)
2728 return old.filectx(path)
2729 except KeyError:
2729 except KeyError:
2730 return None
2730 return None
2731
2731
2732 user = opts.get('user') or old.user()
2732 user = opts.get('user') or old.user()
2733 date = opts.get('date') or old.date()
2733 date = opts.get('date') or old.date()
2734 editform = mergeeditform(old, 'commit.amend')
2734 editform = mergeeditform(old, 'commit.amend')
2735 editor = getcommiteditor(editform=editform, **opts)
2735 editor = getcommiteditor(editform=editform, **opts)
2736 if not message:
2736 if not message:
2737 editor = getcommiteditor(edit=True, editform=editform)
2737 editor = getcommiteditor(edit=True, editform=editform)
2738 message = old.description()
2738 message = old.description()
2739
2739
2740 pureextra = extra.copy()
2740 pureextra = extra.copy()
2741 extra['amend_source'] = old.hex()
2741 extra['amend_source'] = old.hex()
2742
2742
2743 new = context.memctx(repo,
2743 new = context.memctx(repo,
2744 parents=[base.node(), old.p2().node()],
2744 parents=[base.node(), old.p2().node()],
2745 text=message,
2745 text=message,
2746 files=files,
2746 files=files,
2747 filectxfn=filectxfn,
2747 filectxfn=filectxfn,
2748 user=user,
2748 user=user,
2749 date=date,
2749 date=date,
2750 extra=extra,
2750 extra=extra,
2751 editor=editor)
2751 editor=editor)
2752
2752
2753 newdesc = changelog.stripdesc(new.description())
2753 newdesc = changelog.stripdesc(new.description())
2754 if ((not node)
2754 if ((not node)
2755 and newdesc == old.description()
2755 and newdesc == old.description()
2756 and user == old.user()
2756 and user == old.user()
2757 and date == old.date()
2757 and date == old.date()
2758 and pureextra == old.extra()):
2758 and pureextra == old.extra()):
2759 # nothing changed. continuing here would create a new node
2759 # nothing changed. continuing here would create a new node
2760 # anyway because of the amend_source noise.
2760 # anyway because of the amend_source noise.
2761 #
2761 #
2762 # This not what we expect from amend.
2762 # This not what we expect from amend.
2763 return old.node()
2763 return old.node()
2764
2764
2765 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2765 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2766 try:
2766 try:
2767 if opts.get('secret'):
2767 if opts.get('secret'):
2768 commitphase = 'secret'
2768 commitphase = 'secret'
2769 else:
2769 else:
2770 commitphase = old.phase()
2770 commitphase = old.phase()
2771 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2771 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2772 newid = repo.commitctx(new)
2772 newid = repo.commitctx(new)
2773 finally:
2773 finally:
2774 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2774 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2775 if newid != old.node():
2775 if newid != old.node():
2776 # Reroute the working copy parent to the new changeset
2776 # Reroute the working copy parent to the new changeset
2777 repo.setparents(newid, nullid)
2777 repo.setparents(newid, nullid)
2778
2778
2779 # Move bookmarks from old parent to amend commit
2779 # Move bookmarks from old parent to amend commit
2780 bms = repo.nodebookmarks(old.node())
2780 bms = repo.nodebookmarks(old.node())
2781 if bms:
2781 if bms:
2782 marks = repo._bookmarks
2782 marks = repo._bookmarks
2783 for bm in bms:
2783 for bm in bms:
2784 ui.debug('moving bookmarks %r from %s to %s\n' %
2784 ui.debug('moving bookmarks %r from %s to %s\n' %
2785 (marks, old.hex(), hex(newid)))
2785 (marks, old.hex(), hex(newid)))
2786 marks[bm] = newid
2786 marks[bm] = newid
2787 marks.recordchange(tr)
2787 marks.recordchange(tr)
2788 #commit the whole amend process
2788 #commit the whole amend process
2789 if createmarkers:
2789 if createmarkers:
2790 # mark the new changeset as successor of the rewritten one
2790 # mark the new changeset as successor of the rewritten one
2791 new = repo[newid]
2791 new = repo[newid]
2792 obs = [(old, (new,))]
2792 obs = [(old, (new,))]
2793 if node:
2793 if node:
2794 obs.append((ctx, ()))
2794 obs.append((ctx, ()))
2795
2795
2796 obsolete.createmarkers(repo, obs)
2796 obsolete.createmarkers(repo, obs)
2797 if not createmarkers and newid != old.node():
2797 if not createmarkers and newid != old.node():
2798 # Strip the intermediate commit (if there was one) and the amended
2798 # Strip the intermediate commit (if there was one) and the amended
2799 # commit
2799 # commit
2800 if node:
2800 if node:
2801 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2801 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2802 ui.note(_('stripping amended changeset %s\n') % old)
2802 ui.note(_('stripping amended changeset %s\n') % old)
2803 repair.strip(ui, repo, old.node(), topic='amend-backup')
2803 repair.strip(ui, repo, old.node(), topic='amend-backup')
2804 finally:
2804 finally:
2805 lockmod.release(lock, wlock)
2805 lockmod.release(lock, wlock)
2806 return newid
2806 return newid
2807
2807
2808 def commiteditor(repo, ctx, subs, editform=''):
2808 def commiteditor(repo, ctx, subs, editform=''):
2809 if ctx.description():
2809 if ctx.description():
2810 return ctx.description()
2810 return ctx.description()
2811 return commitforceeditor(repo, ctx, subs, editform=editform,
2811 return commitforceeditor(repo, ctx, subs, editform=editform,
2812 unchangedmessagedetection=True)
2812 unchangedmessagedetection=True)
2813
2813
2814 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2814 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2815 editform='', unchangedmessagedetection=False):
2815 editform='', unchangedmessagedetection=False):
2816 if not extramsg:
2816 if not extramsg:
2817 extramsg = _("Leave message empty to abort commit.")
2817 extramsg = _("Leave message empty to abort commit.")
2818
2818
2819 forms = [e for e in editform.split('.') if e]
2819 forms = [e for e in editform.split('.') if e]
2820 forms.insert(0, 'changeset')
2820 forms.insert(0, 'changeset')
2821 templatetext = None
2821 templatetext = None
2822 while forms:
2822 while forms:
2823 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2823 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2824 if tmpl:
2824 if tmpl:
2825 templatetext = committext = buildcommittemplate(
2825 templatetext = committext = buildcommittemplate(
2826 repo, ctx, subs, extramsg, tmpl)
2826 repo, ctx, subs, extramsg, tmpl)
2827 break
2827 break
2828 forms.pop()
2828 forms.pop()
2829 else:
2829 else:
2830 committext = buildcommittext(repo, ctx, subs, extramsg)
2830 committext = buildcommittext(repo, ctx, subs, extramsg)
2831
2831
2832 # run editor in the repository root
2832 # run editor in the repository root
2833 olddir = os.getcwd()
2833 olddir = os.getcwd()
2834 os.chdir(repo.root)
2834 os.chdir(repo.root)
2835
2835
2836 # make in-memory changes visible to external process
2836 # make in-memory changes visible to external process
2837 tr = repo.currenttransaction()
2837 tr = repo.currenttransaction()
2838 repo.dirstate.write(tr)
2838 repo.dirstate.write(tr)
2839 pending = tr and tr.writepending() and repo.root
2839 pending = tr and tr.writepending() and repo.root
2840
2840
2841 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2841 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2842 editform=editform, pending=pending)
2842 editform=editform, pending=pending)
2843 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2843 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2844 os.chdir(olddir)
2844 os.chdir(olddir)
2845
2845
2846 if finishdesc:
2846 if finishdesc:
2847 text = finishdesc(text)
2847 text = finishdesc(text)
2848 if not text.strip():
2848 if not text.strip():
2849 raise error.Abort(_("empty commit message"))
2849 raise error.Abort(_("empty commit message"))
2850 if unchangedmessagedetection and editortext == templatetext:
2850 if unchangedmessagedetection and editortext == templatetext:
2851 raise error.Abort(_("commit message unchanged"))
2851 raise error.Abort(_("commit message unchanged"))
2852
2852
2853 return text
2853 return text
2854
2854
2855 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2855 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2856 ui = repo.ui
2856 ui = repo.ui
2857 tmpl, mapfile = gettemplate(ui, tmpl, None)
2857 tmpl, mapfile = gettemplate(ui, tmpl, None)
2858
2858
2859 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2859 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2860
2860
2861 for k, v in repo.ui.configitems('committemplate'):
2861 for k, v in repo.ui.configitems('committemplate'):
2862 if k != 'changeset':
2862 if k != 'changeset':
2863 t.t.cache[k] = v
2863 t.t.cache[k] = v
2864
2864
2865 if not extramsg:
2865 if not extramsg:
2866 extramsg = '' # ensure that extramsg is string
2866 extramsg = '' # ensure that extramsg is string
2867
2867
2868 ui.pushbuffer()
2868 ui.pushbuffer()
2869 t.show(ctx, extramsg=extramsg)
2869 t.show(ctx, extramsg=extramsg)
2870 return ui.popbuffer()
2870 return ui.popbuffer()
2871
2871
2872 def hgprefix(msg):
2872 def hgprefix(msg):
2873 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2873 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2874
2874
2875 def buildcommittext(repo, ctx, subs, extramsg):
2875 def buildcommittext(repo, ctx, subs, extramsg):
2876 edittext = []
2876 edittext = []
2877 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2877 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2878 if ctx.description():
2878 if ctx.description():
2879 edittext.append(ctx.description())
2879 edittext.append(ctx.description())
2880 edittext.append("")
2880 edittext.append("")
2881 edittext.append("") # Empty line between message and comments.
2881 edittext.append("") # Empty line between message and comments.
2882 edittext.append(hgprefix(_("Enter commit message."
2882 edittext.append(hgprefix(_("Enter commit message."
2883 " Lines beginning with 'HG:' are removed.")))
2883 " Lines beginning with 'HG:' are removed.")))
2884 edittext.append(hgprefix(extramsg))
2884 edittext.append(hgprefix(extramsg))
2885 edittext.append("HG: --")
2885 edittext.append("HG: --")
2886 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2886 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2887 if ctx.p2():
2887 if ctx.p2():
2888 edittext.append(hgprefix(_("branch merge")))
2888 edittext.append(hgprefix(_("branch merge")))
2889 if ctx.branch():
2889 if ctx.branch():
2890 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2890 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2891 if bookmarks.isactivewdirparent(repo):
2891 if bookmarks.isactivewdirparent(repo):
2892 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2892 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2893 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2893 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2894 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2894 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2895 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2895 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2896 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2896 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2897 if not added and not modified and not removed:
2897 if not added and not modified and not removed:
2898 edittext.append(hgprefix(_("no files changed")))
2898 edittext.append(hgprefix(_("no files changed")))
2899 edittext.append("")
2899 edittext.append("")
2900
2900
2901 return "\n".join(edittext)
2901 return "\n".join(edittext)
2902
2902
2903 def commitstatus(repo, node, branch, bheads=None, opts=None):
2903 def commitstatus(repo, node, branch, bheads=None, opts=None):
2904 if opts is None:
2904 if opts is None:
2905 opts = {}
2905 opts = {}
2906 ctx = repo[node]
2906 ctx = repo[node]
2907 parents = ctx.parents()
2907 parents = ctx.parents()
2908
2908
2909 if (not opts.get('amend') and bheads and node not in bheads and not
2909 if (not opts.get('amend') and bheads and node not in bheads and not
2910 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2910 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2911 repo.ui.status(_('created new head\n'))
2911 repo.ui.status(_('created new head\n'))
2912 # The message is not printed for initial roots. For the other
2912 # The message is not printed for initial roots. For the other
2913 # changesets, it is printed in the following situations:
2913 # changesets, it is printed in the following situations:
2914 #
2914 #
2915 # Par column: for the 2 parents with ...
2915 # Par column: for the 2 parents with ...
2916 # N: null or no parent
2916 # N: null or no parent
2917 # B: parent is on another named branch
2917 # B: parent is on another named branch
2918 # C: parent is a regular non head changeset
2918 # C: parent is a regular non head changeset
2919 # H: parent was a branch head of the current branch
2919 # H: parent was a branch head of the current branch
2920 # Msg column: whether we print "created new head" message
2920 # Msg column: whether we print "created new head" message
2921 # In the following, it is assumed that there already exists some
2921 # In the following, it is assumed that there already exists some
2922 # initial branch heads of the current branch, otherwise nothing is
2922 # initial branch heads of the current branch, otherwise nothing is
2923 # printed anyway.
2923 # printed anyway.
2924 #
2924 #
2925 # Par Msg Comment
2925 # Par Msg Comment
2926 # N N y additional topo root
2926 # N N y additional topo root
2927 #
2927 #
2928 # B N y additional branch root
2928 # B N y additional branch root
2929 # C N y additional topo head
2929 # C N y additional topo head
2930 # H N n usual case
2930 # H N n usual case
2931 #
2931 #
2932 # B B y weird additional branch root
2932 # B B y weird additional branch root
2933 # C B y branch merge
2933 # C B y branch merge
2934 # H B n merge with named branch
2934 # H B n merge with named branch
2935 #
2935 #
2936 # C C y additional head from merge
2936 # C C y additional head from merge
2937 # C H n merge with a head
2937 # C H n merge with a head
2938 #
2938 #
2939 # H H n head merge: head count decreases
2939 # H H n head merge: head count decreases
2940
2940
2941 if not opts.get('close_branch'):
2941 if not opts.get('close_branch'):
2942 for r in parents:
2942 for r in parents:
2943 if r.closesbranch() and r.branch() == branch:
2943 if r.closesbranch() and r.branch() == branch:
2944 repo.ui.status(_('reopening closed branch head %d\n') % r)
2944 repo.ui.status(_('reopening closed branch head %d\n') % r)
2945
2945
2946 if repo.ui.debugflag:
2946 if repo.ui.debugflag:
2947 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2947 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2948 elif repo.ui.verbose:
2948 elif repo.ui.verbose:
2949 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2949 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2950
2950
2951 def postcommitstatus(repo, pats, opts):
2951 def postcommitstatus(repo, pats, opts):
2952 return repo.status(match=scmutil.match(repo[None], pats, opts))
2952 return repo.status(match=scmutil.match(repo[None], pats, opts))
2953
2953
2954 def revert(ui, repo, ctx, parents, *pats, **opts):
2954 def revert(ui, repo, ctx, parents, *pats, **opts):
2955 parent, p2 = parents
2955 parent, p2 = parents
2956 node = ctx.node()
2956 node = ctx.node()
2957
2957
2958 mf = ctx.manifest()
2958 mf = ctx.manifest()
2959 if node == p2:
2959 if node == p2:
2960 parent = p2
2960 parent = p2
2961
2961
2962 # need all matching names in dirstate and manifest of target rev,
2962 # need all matching names in dirstate and manifest of target rev,
2963 # so have to walk both. do not print errors if files exist in one
2963 # so have to walk both. do not print errors if files exist in one
2964 # but not other. in both cases, filesets should be evaluated against
2964 # but not other. in both cases, filesets should be evaluated against
2965 # workingctx to get consistent result (issue4497). this means 'set:**'
2965 # workingctx to get consistent result (issue4497). this means 'set:**'
2966 # cannot be used to select missing files from target rev.
2966 # cannot be used to select missing files from target rev.
2967
2967
2968 # `names` is a mapping for all elements in working copy and target revision
2968 # `names` is a mapping for all elements in working copy and target revision
2969 # The mapping is in the form:
2969 # The mapping is in the form:
2970 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2970 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2971 names = {}
2971 names = {}
2972
2972
2973 with repo.wlock():
2973 with repo.wlock():
2974 ## filling of the `names` mapping
2974 ## filling of the `names` mapping
2975 # walk dirstate to fill `names`
2975 # walk dirstate to fill `names`
2976
2976
2977 interactive = opts.get('interactive', False)
2977 interactive = opts.get('interactive', False)
2978 wctx = repo[None]
2978 wctx = repo[None]
2979 m = scmutil.match(wctx, pats, opts)
2979 m = scmutil.match(wctx, pats, opts)
2980
2980
2981 # we'll need this later
2981 # we'll need this later
2982 targetsubs = sorted(s for s in wctx.substate if m(s))
2982 targetsubs = sorted(s for s in wctx.substate if m(s))
2983
2983
2984 if not m.always():
2984 if not m.always():
2985 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2985 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2986 names[abs] = m.rel(abs), m.exact(abs)
2986 names[abs] = m.rel(abs), m.exact(abs)
2987
2987
2988 # walk target manifest to fill `names`
2988 # walk target manifest to fill `names`
2989
2989
2990 def badfn(path, msg):
2990 def badfn(path, msg):
2991 if path in names:
2991 if path in names:
2992 return
2992 return
2993 if path in ctx.substate:
2993 if path in ctx.substate:
2994 return
2994 return
2995 path_ = path + '/'
2995 path_ = path + '/'
2996 for f in names:
2996 for f in names:
2997 if f.startswith(path_):
2997 if f.startswith(path_):
2998 return
2998 return
2999 ui.warn("%s: %s\n" % (m.rel(path), msg))
2999 ui.warn("%s: %s\n" % (m.rel(path), msg))
3000
3000
3001 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3001 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3002 if abs not in names:
3002 if abs not in names:
3003 names[abs] = m.rel(abs), m.exact(abs)
3003 names[abs] = m.rel(abs), m.exact(abs)
3004
3004
3005 # Find status of all file in `names`.
3005 # Find status of all file in `names`.
3006 m = scmutil.matchfiles(repo, names)
3006 m = scmutil.matchfiles(repo, names)
3007
3007
3008 changes = repo.status(node1=node, match=m,
3008 changes = repo.status(node1=node, match=m,
3009 unknown=True, ignored=True, clean=True)
3009 unknown=True, ignored=True, clean=True)
3010 else:
3010 else:
3011 changes = repo.status(node1=node, match=m)
3011 changes = repo.status(node1=node, match=m)
3012 for kind in changes:
3012 for kind in changes:
3013 for abs in kind:
3013 for abs in kind:
3014 names[abs] = m.rel(abs), m.exact(abs)
3014 names[abs] = m.rel(abs), m.exact(abs)
3015
3015
3016 m = scmutil.matchfiles(repo, names)
3016 m = scmutil.matchfiles(repo, names)
3017
3017
3018 modified = set(changes.modified)
3018 modified = set(changes.modified)
3019 added = set(changes.added)
3019 added = set(changes.added)
3020 removed = set(changes.removed)
3020 removed = set(changes.removed)
3021 _deleted = set(changes.deleted)
3021 _deleted = set(changes.deleted)
3022 unknown = set(changes.unknown)
3022 unknown = set(changes.unknown)
3023 unknown.update(changes.ignored)
3023 unknown.update(changes.ignored)
3024 clean = set(changes.clean)
3024 clean = set(changes.clean)
3025 modadded = set()
3025 modadded = set()
3026
3026
3027 # split between files known in target manifest and the others
3027 # split between files known in target manifest and the others
3028 smf = set(mf)
3028 smf = set(mf)
3029
3029
3030 # determine the exact nature of the deleted changesets
3030 # determine the exact nature of the deleted changesets
3031 deladded = _deleted - smf
3031 deladded = _deleted - smf
3032 deleted = _deleted - deladded
3032 deleted = _deleted - deladded
3033
3033
3034 # We need to account for the state of the file in the dirstate,
3034 # We need to account for the state of the file in the dirstate,
3035 # even when we revert against something else than parent. This will
3035 # even when we revert against something else than parent. This will
3036 # slightly alter the behavior of revert (doing back up or not, delete
3036 # slightly alter the behavior of revert (doing back up or not, delete
3037 # or just forget etc).
3037 # or just forget etc).
3038 if parent == node:
3038 if parent == node:
3039 dsmodified = modified
3039 dsmodified = modified
3040 dsadded = added
3040 dsadded = added
3041 dsremoved = removed
3041 dsremoved = removed
3042 # store all local modifications, useful later for rename detection
3042 # store all local modifications, useful later for rename detection
3043 localchanges = dsmodified | dsadded
3043 localchanges = dsmodified | dsadded
3044 modified, added, removed = set(), set(), set()
3044 modified, added, removed = set(), set(), set()
3045 else:
3045 else:
3046 changes = repo.status(node1=parent, match=m)
3046 changes = repo.status(node1=parent, match=m)
3047 dsmodified = set(changes.modified)
3047 dsmodified = set(changes.modified)
3048 dsadded = set(changes.added)
3048 dsadded = set(changes.added)
3049 dsremoved = set(changes.removed)
3049 dsremoved = set(changes.removed)
3050 # store all local modifications, useful later for rename detection
3050 # store all local modifications, useful later for rename detection
3051 localchanges = dsmodified | dsadded
3051 localchanges = dsmodified | dsadded
3052
3052
3053 # only take into account for removes between wc and target
3053 # only take into account for removes between wc and target
3054 clean |= dsremoved - removed
3054 clean |= dsremoved - removed
3055 dsremoved &= removed
3055 dsremoved &= removed
3056 # distinct between dirstate remove and other
3056 # distinct between dirstate remove and other
3057 removed -= dsremoved
3057 removed -= dsremoved
3058
3058
3059 modadded = added & dsmodified
3059 modadded = added & dsmodified
3060 added -= modadded
3060 added -= modadded
3061
3061
3062 # tell newly modified apart.
3062 # tell newly modified apart.
3063 dsmodified &= modified
3063 dsmodified &= modified
3064 dsmodified |= modified & dsadded # dirstate added may need backup
3064 dsmodified |= modified & dsadded # dirstate added may need backup
3065 modified -= dsmodified
3065 modified -= dsmodified
3066
3066
3067 # We need to wait for some post-processing to update this set
3067 # We need to wait for some post-processing to update this set
3068 # before making the distinction. The dirstate will be used for
3068 # before making the distinction. The dirstate will be used for
3069 # that purpose.
3069 # that purpose.
3070 dsadded = added
3070 dsadded = added
3071
3071
3072 # in case of merge, files that are actually added can be reported as
3072 # in case of merge, files that are actually added can be reported as
3073 # modified, we need to post process the result
3073 # modified, we need to post process the result
3074 if p2 != nullid:
3074 if p2 != nullid:
3075 mergeadd = dsmodified - smf
3075 mergeadd = dsmodified - smf
3076 dsadded |= mergeadd
3076 dsadded |= mergeadd
3077 dsmodified -= mergeadd
3077 dsmodified -= mergeadd
3078
3078
3079 # if f is a rename, update `names` to also revert the source
3079 # if f is a rename, update `names` to also revert the source
3080 cwd = repo.getcwd()
3080 cwd = repo.getcwd()
3081 for f in localchanges:
3081 for f in localchanges:
3082 src = repo.dirstate.copied(f)
3082 src = repo.dirstate.copied(f)
3083 # XXX should we check for rename down to target node?
3083 # XXX should we check for rename down to target node?
3084 if src and src not in names and repo.dirstate[src] == 'r':
3084 if src and src not in names and repo.dirstate[src] == 'r':
3085 dsremoved.add(src)
3085 dsremoved.add(src)
3086 names[src] = (repo.pathto(src, cwd), True)
3086 names[src] = (repo.pathto(src, cwd), True)
3087
3087
3088 # distinguish between file to forget and the other
3088 # distinguish between file to forget and the other
3089 added = set()
3089 added = set()
3090 for abs in dsadded:
3090 for abs in dsadded:
3091 if repo.dirstate[abs] != 'a':
3091 if repo.dirstate[abs] != 'a':
3092 added.add(abs)
3092 added.add(abs)
3093 dsadded -= added
3093 dsadded -= added
3094
3094
3095 for abs in deladded:
3095 for abs in deladded:
3096 if repo.dirstate[abs] == 'a':
3096 if repo.dirstate[abs] == 'a':
3097 dsadded.add(abs)
3097 dsadded.add(abs)
3098 deladded -= dsadded
3098 deladded -= dsadded
3099
3099
3100 # For files marked as removed, we check if an unknown file is present at
3100 # For files marked as removed, we check if an unknown file is present at
3101 # the same path. If a such file exists it may need to be backed up.
3101 # the same path. If a such file exists it may need to be backed up.
3102 # Making the distinction at this stage helps have simpler backup
3102 # Making the distinction at this stage helps have simpler backup
3103 # logic.
3103 # logic.
3104 removunk = set()
3104 removunk = set()
3105 for abs in removed:
3105 for abs in removed:
3106 target = repo.wjoin(abs)
3106 target = repo.wjoin(abs)
3107 if os.path.lexists(target):
3107 if os.path.lexists(target):
3108 removunk.add(abs)
3108 removunk.add(abs)
3109 removed -= removunk
3109 removed -= removunk
3110
3110
3111 dsremovunk = set()
3111 dsremovunk = set()
3112 for abs in dsremoved:
3112 for abs in dsremoved:
3113 target = repo.wjoin(abs)
3113 target = repo.wjoin(abs)
3114 if os.path.lexists(target):
3114 if os.path.lexists(target):
3115 dsremovunk.add(abs)
3115 dsremovunk.add(abs)
3116 dsremoved -= dsremovunk
3116 dsremoved -= dsremovunk
3117
3117
3118 # action to be actually performed by revert
3118 # action to be actually performed by revert
3119 # (<list of file>, message>) tuple
3119 # (<list of file>, message>) tuple
3120 actions = {'revert': ([], _('reverting %s\n')),
3120 actions = {'revert': ([], _('reverting %s\n')),
3121 'add': ([], _('adding %s\n')),
3121 'add': ([], _('adding %s\n')),
3122 'remove': ([], _('removing %s\n')),
3122 'remove': ([], _('removing %s\n')),
3123 'drop': ([], _('removing %s\n')),
3123 'drop': ([], _('removing %s\n')),
3124 'forget': ([], _('forgetting %s\n')),
3124 'forget': ([], _('forgetting %s\n')),
3125 'undelete': ([], _('undeleting %s\n')),
3125 'undelete': ([], _('undeleting %s\n')),
3126 'noop': (None, _('no changes needed to %s\n')),
3126 'noop': (None, _('no changes needed to %s\n')),
3127 'unknown': (None, _('file not managed: %s\n')),
3127 'unknown': (None, _('file not managed: %s\n')),
3128 }
3128 }
3129
3129
3130 # "constant" that convey the backup strategy.
3130 # "constant" that convey the backup strategy.
3131 # All set to `discard` if `no-backup` is set do avoid checking
3131 # All set to `discard` if `no-backup` is set do avoid checking
3132 # no_backup lower in the code.
3132 # no_backup lower in the code.
3133 # These values are ordered for comparison purposes
3133 # These values are ordered for comparison purposes
3134 backupinteractive = 3 # do backup if interactively modified
3134 backupinteractive = 3 # do backup if interactively modified
3135 backup = 2 # unconditionally do backup
3135 backup = 2 # unconditionally do backup
3136 check = 1 # check if the existing file differs from target
3136 check = 1 # check if the existing file differs from target
3137 discard = 0 # never do backup
3137 discard = 0 # never do backup
3138 if opts.get('no_backup'):
3138 if opts.get('no_backup'):
3139 backupinteractive = backup = check = discard
3139 backupinteractive = backup = check = discard
3140 if interactive:
3140 if interactive:
3141 dsmodifiedbackup = backupinteractive
3141 dsmodifiedbackup = backupinteractive
3142 else:
3142 else:
3143 dsmodifiedbackup = backup
3143 dsmodifiedbackup = backup
3144 tobackup = set()
3144 tobackup = set()
3145
3145
3146 backupanddel = actions['remove']
3146 backupanddel = actions['remove']
3147 if not opts.get('no_backup'):
3147 if not opts.get('no_backup'):
3148 backupanddel = actions['drop']
3148 backupanddel = actions['drop']
3149
3149
3150 disptable = (
3150 disptable = (
3151 # dispatch table:
3151 # dispatch table:
3152 # file state
3152 # file state
3153 # action
3153 # action
3154 # make backup
3154 # make backup
3155
3155
3156 ## Sets that results that will change file on disk
3156 ## Sets that results that will change file on disk
3157 # Modified compared to target, no local change
3157 # Modified compared to target, no local change
3158 (modified, actions['revert'], discard),
3158 (modified, actions['revert'], discard),
3159 # Modified compared to target, but local file is deleted
3159 # Modified compared to target, but local file is deleted
3160 (deleted, actions['revert'], discard),
3160 (deleted, actions['revert'], discard),
3161 # Modified compared to target, local change
3161 # Modified compared to target, local change
3162 (dsmodified, actions['revert'], dsmodifiedbackup),
3162 (dsmodified, actions['revert'], dsmodifiedbackup),
3163 # Added since target
3163 # Added since target
3164 (added, actions['remove'], discard),
3164 (added, actions['remove'], discard),
3165 # Added in working directory
3165 # Added in working directory
3166 (dsadded, actions['forget'], discard),
3166 (dsadded, actions['forget'], discard),
3167 # Added since target, have local modification
3167 # Added since target, have local modification
3168 (modadded, backupanddel, backup),
3168 (modadded, backupanddel, backup),
3169 # Added since target but file is missing in working directory
3169 # Added since target but file is missing in working directory
3170 (deladded, actions['drop'], discard),
3170 (deladded, actions['drop'], discard),
3171 # Removed since target, before working copy parent
3171 # Removed since target, before working copy parent
3172 (removed, actions['add'], discard),
3172 (removed, actions['add'], discard),
3173 # Same as `removed` but an unknown file exists at the same path
3173 # Same as `removed` but an unknown file exists at the same path
3174 (removunk, actions['add'], check),
3174 (removunk, actions['add'], check),
3175 # Removed since targe, marked as such in working copy parent
3175 # Removed since targe, marked as such in working copy parent
3176 (dsremoved, actions['undelete'], discard),
3176 (dsremoved, actions['undelete'], discard),
3177 # Same as `dsremoved` but an unknown file exists at the same path
3177 # Same as `dsremoved` but an unknown file exists at the same path
3178 (dsremovunk, actions['undelete'], check),
3178 (dsremovunk, actions['undelete'], check),
3179 ## the following sets does not result in any file changes
3179 ## the following sets does not result in any file changes
3180 # File with no modification
3180 # File with no modification
3181 (clean, actions['noop'], discard),
3181 (clean, actions['noop'], discard),
3182 # Existing file, not tracked anywhere
3182 # Existing file, not tracked anywhere
3183 (unknown, actions['unknown'], discard),
3183 (unknown, actions['unknown'], discard),
3184 )
3184 )
3185
3185
3186 for abs, (rel, exact) in sorted(names.items()):
3186 for abs, (rel, exact) in sorted(names.items()):
3187 # target file to be touch on disk (relative to cwd)
3187 # target file to be touch on disk (relative to cwd)
3188 target = repo.wjoin(abs)
3188 target = repo.wjoin(abs)
3189 # search the entry in the dispatch table.
3189 # search the entry in the dispatch table.
3190 # if the file is in any of these sets, it was touched in the working
3190 # if the file is in any of these sets, it was touched in the working
3191 # directory parent and we are sure it needs to be reverted.
3191 # directory parent and we are sure it needs to be reverted.
3192 for table, (xlist, msg), dobackup in disptable:
3192 for table, (xlist, msg), dobackup in disptable:
3193 if abs not in table:
3193 if abs not in table:
3194 continue
3194 continue
3195 if xlist is not None:
3195 if xlist is not None:
3196 xlist.append(abs)
3196 xlist.append(abs)
3197 if dobackup:
3197 if dobackup:
3198 # If in interactive mode, don't automatically create
3198 # If in interactive mode, don't automatically create
3199 # .orig files (issue4793)
3199 # .orig files (issue4793)
3200 if dobackup == backupinteractive:
3200 if dobackup == backupinteractive:
3201 tobackup.add(abs)
3201 tobackup.add(abs)
3202 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3202 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3203 bakname = scmutil.origpath(ui, repo, rel)
3203 bakname = scmutil.origpath(ui, repo, rel)
3204 ui.note(_('saving current version of %s as %s\n') %
3204 ui.note(_('saving current version of %s as %s\n') %
3205 (rel, bakname))
3205 (rel, bakname))
3206 if not opts.get('dry_run'):
3206 if not opts.get('dry_run'):
3207 if interactive:
3207 if interactive:
3208 util.copyfile(target, bakname)
3208 util.copyfile(target, bakname)
3209 else:
3209 else:
3210 util.rename(target, bakname)
3210 util.rename(target, bakname)
3211 if ui.verbose or not exact:
3211 if ui.verbose or not exact:
3212 if not isinstance(msg, basestring):
3212 if not isinstance(msg, basestring):
3213 msg = msg(abs)
3213 msg = msg(abs)
3214 ui.status(msg % rel)
3214 ui.status(msg % rel)
3215 elif exact:
3215 elif exact:
3216 ui.warn(msg % rel)
3216 ui.warn(msg % rel)
3217 break
3217 break
3218
3218
3219 if not opts.get('dry_run'):
3219 if not opts.get('dry_run'):
3220 needdata = ('revert', 'add', 'undelete')
3220 needdata = ('revert', 'add', 'undelete')
3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3222 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3222 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3223
3223
3224 if targetsubs:
3224 if targetsubs:
3225 # Revert the subrepos on the revert list
3225 # Revert the subrepos on the revert list
3226 for sub in targetsubs:
3226 for sub in targetsubs:
3227 try:
3227 try:
3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3229 except KeyError:
3229 except KeyError:
3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3231 % (sub, short(ctx.node())))
3231 % (sub, short(ctx.node())))
3232
3232
3233 def _revertprefetch(repo, ctx, *files):
3233 def _revertprefetch(repo, ctx, *files):
3234 """Let extension changing the storage layer prefetch content"""
3234 """Let extension changing the storage layer prefetch content"""
3235 pass
3235 pass
3236
3236
3237 def _performrevert(repo, parents, ctx, actions, interactive=False,
3237 def _performrevert(repo, parents, ctx, actions, interactive=False,
3238 tobackup=None):
3238 tobackup=None):
3239 """function that actually perform all the actions computed for revert
3239 """function that actually perform all the actions computed for revert
3240
3240
3241 This is an independent function to let extension to plug in and react to
3241 This is an independent function to let extension to plug in and react to
3242 the imminent revert.
3242 the imminent revert.
3243
3243
3244 Make sure you have the working directory locked when calling this function.
3244 Make sure you have the working directory locked when calling this function.
3245 """
3245 """
3246 parent, p2 = parents
3246 parent, p2 = parents
3247 node = ctx.node()
3247 node = ctx.node()
3248 excluded_files = []
3248 excluded_files = []
3249 matcher_opts = {"exclude": excluded_files}
3249 matcher_opts = {"exclude": excluded_files}
3250
3250
3251 def checkout(f):
3251 def checkout(f):
3252 fc = ctx[f]
3252 fc = ctx[f]
3253 repo.wwrite(f, fc.data(), fc.flags())
3253 repo.wwrite(f, fc.data(), fc.flags())
3254
3254
3255 audit_path = pathutil.pathauditor(repo.root)
3255 audit_path = pathutil.pathauditor(repo.root)
3256 for f in actions['forget'][0]:
3256 for f in actions['forget'][0]:
3257 if interactive:
3257 if interactive:
3258 choice = \
3258 choice = \
3259 repo.ui.promptchoice(
3259 repo.ui.promptchoice(
3260 _("forget added file %s (yn)?$$ &Yes $$ &No")
3260 _("forget added file %s (yn)?$$ &Yes $$ &No")
3261 % f)
3261 % f)
3262 if choice == 0:
3262 if choice == 0:
3263 repo.dirstate.drop(f)
3263 repo.dirstate.drop(f)
3264 else:
3264 else:
3265 excluded_files.append(repo.wjoin(f))
3265 excluded_files.append(repo.wjoin(f))
3266 else:
3266 else:
3267 repo.dirstate.drop(f)
3267 repo.dirstate.drop(f)
3268 for f in actions['remove'][0]:
3268 for f in actions['remove'][0]:
3269 audit_path(f)
3269 audit_path(f)
3270 try:
3270 try:
3271 util.unlinkpath(repo.wjoin(f))
3271 util.unlinkpath(repo.wjoin(f))
3272 except OSError:
3272 except OSError:
3273 pass
3273 pass
3274 repo.dirstate.remove(f)
3274 repo.dirstate.remove(f)
3275 for f in actions['drop'][0]:
3275 for f in actions['drop'][0]:
3276 audit_path(f)
3276 audit_path(f)
3277 repo.dirstate.remove(f)
3277 repo.dirstate.remove(f)
3278
3278
3279 normal = None
3279 normal = None
3280 if node == parent:
3280 if node == parent:
3281 # We're reverting to our parent. If possible, we'd like status
3281 # We're reverting to our parent. If possible, we'd like status
3282 # to report the file as clean. We have to use normallookup for
3282 # to report the file as clean. We have to use normallookup for
3283 # merges to avoid losing information about merged/dirty files.
3283 # merges to avoid losing information about merged/dirty files.
3284 if p2 != nullid:
3284 if p2 != nullid:
3285 normal = repo.dirstate.normallookup
3285 normal = repo.dirstate.normallookup
3286 else:
3286 else:
3287 normal = repo.dirstate.normal
3287 normal = repo.dirstate.normal
3288
3288
3289 newlyaddedandmodifiedfiles = set()
3289 newlyaddedandmodifiedfiles = set()
3290 if interactive:
3290 if interactive:
3291 # Prompt the user for changes to revert
3291 # Prompt the user for changes to revert
3292 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3292 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3293 m = scmutil.match(ctx, torevert, matcher_opts)
3293 m = scmutil.match(ctx, torevert, matcher_opts)
3294 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3294 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3295 diffopts.nodates = True
3295 diffopts.nodates = True
3296 diffopts.git = True
3296 diffopts.git = True
3297 reversehunks = repo.ui.configbool('experimental',
3297 reversehunks = repo.ui.configbool('experimental',
3298 'revertalternateinteractivemode',
3298 'revertalternateinteractivemode',
3299 True)
3299 True)
3300 if reversehunks:
3300 if reversehunks:
3301 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3301 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3302 else:
3302 else:
3303 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3303 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3304 originalchunks = patch.parsepatch(diff)
3304 originalchunks = patch.parsepatch(diff)
3305 operation = 'discard' if node == parent else 'revert'
3305 operation = 'discard' if node == parent else 'revert'
3306
3306
3307 try:
3307 try:
3308
3308
3309 chunks, opts = recordfilter(repo.ui, originalchunks,
3309 chunks, opts = recordfilter(repo.ui, originalchunks,
3310 operation=operation)
3310 operation=operation)
3311 if reversehunks:
3311 if reversehunks:
3312 chunks = patch.reversehunks(chunks)
3312 chunks = patch.reversehunks(chunks)
3313
3313
3314 except patch.PatchError as err:
3314 except patch.PatchError as err:
3315 raise error.Abort(_('error parsing patch: %s') % err)
3315 raise error.Abort(_('error parsing patch: %s') % err)
3316
3316
3317 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3317 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3318 if tobackup is None:
3318 if tobackup is None:
3319 tobackup = set()
3319 tobackup = set()
3320 # Apply changes
3320 # Apply changes
3321 fp = stringio()
3321 fp = stringio()
3322 for c in chunks:
3322 for c in chunks:
3323 # Create a backup file only if this hunk should be backed up
3323 # Create a backup file only if this hunk should be backed up
3324 if ishunk(c) and c.header.filename() in tobackup:
3324 if ishunk(c) and c.header.filename() in tobackup:
3325 abs = c.header.filename()
3325 abs = c.header.filename()
3326 target = repo.wjoin(abs)
3326 target = repo.wjoin(abs)
3327 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3327 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3328 util.copyfile(target, bakname)
3328 util.copyfile(target, bakname)
3329 tobackup.remove(abs)
3329 tobackup.remove(abs)
3330 c.write(fp)
3330 c.write(fp)
3331 dopatch = fp.tell()
3331 dopatch = fp.tell()
3332 fp.seek(0)
3332 fp.seek(0)
3333 if dopatch:
3333 if dopatch:
3334 try:
3334 try:
3335 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3335 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3336 except patch.PatchError as err:
3336 except patch.PatchError as err:
3337 raise error.Abort(str(err))
3337 raise error.Abort(str(err))
3338 del fp
3338 del fp
3339 else:
3339 else:
3340 for f in actions['revert'][0]:
3340 for f in actions['revert'][0]:
3341 checkout(f)
3341 checkout(f)
3342 if normal:
3342 if normal:
3343 normal(f)
3343 normal(f)
3344
3344
3345 for f in actions['add'][0]:
3345 for f in actions['add'][0]:
3346 # Don't checkout modified files, they are already created by the diff
3346 # Don't checkout modified files, they are already created by the diff
3347 if f not in newlyaddedandmodifiedfiles:
3347 if f not in newlyaddedandmodifiedfiles:
3348 checkout(f)
3348 checkout(f)
3349 repo.dirstate.add(f)
3349 repo.dirstate.add(f)
3350
3350
3351 normal = repo.dirstate.normallookup
3351 normal = repo.dirstate.normallookup
3352 if node == parent and p2 == nullid:
3352 if node == parent and p2 == nullid:
3353 normal = repo.dirstate.normal
3353 normal = repo.dirstate.normal
3354 for f in actions['undelete'][0]:
3354 for f in actions['undelete'][0]:
3355 checkout(f)
3355 checkout(f)
3356 normal(f)
3356 normal(f)
3357
3357
3358 copied = copies.pathcopies(repo[parent], ctx)
3358 copied = copies.pathcopies(repo[parent], ctx)
3359
3359
3360 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3360 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3361 if f in copied:
3361 if f in copied:
3362 repo.dirstate.copy(copied[f], f)
3362 repo.dirstate.copy(copied[f], f)
3363
3363
3364 def command(table):
3364 def command(table):
3365 """Returns a function object to be used as a decorator for making commands.
3365 """Returns a function object to be used as a decorator for making commands.
3366
3366
3367 This function receives a command table as its argument. The table should
3367 This function receives a command table as its argument. The table should
3368 be a dict.
3368 be a dict.
3369
3369
3370 The returned function can be used as a decorator for adding commands
3370 The returned function can be used as a decorator for adding commands
3371 to that command table. This function accepts multiple arguments to define
3371 to that command table. This function accepts multiple arguments to define
3372 a command.
3372 a command.
3373
3373
3374 The first argument is the command name.
3374 The first argument is the command name.
3375
3375
3376 The options argument is an iterable of tuples defining command arguments.
3376 The options argument is an iterable of tuples defining command arguments.
3377 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3377 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3378
3378
3379 The synopsis argument defines a short, one line summary of how to use the
3379 The synopsis argument defines a short, one line summary of how to use the
3380 command. This shows up in the help output.
3380 command. This shows up in the help output.
3381
3381
3382 The norepo argument defines whether the command does not require a
3382 The norepo argument defines whether the command does not require a
3383 local repository. Most commands operate against a repository, thus the
3383 local repository. Most commands operate against a repository, thus the
3384 default is False.
3384 default is False.
3385
3385
3386 The optionalrepo argument defines whether the command optionally requires
3386 The optionalrepo argument defines whether the command optionally requires
3387 a local repository.
3387 a local repository.
3388
3388
3389 The inferrepo argument defines whether to try to find a repository from the
3389 The inferrepo argument defines whether to try to find a repository from the
3390 command line arguments. If True, arguments will be examined for potential
3390 command line arguments. If True, arguments will be examined for potential
3391 repository locations. See ``findrepo()``. If a repository is found, it
3391 repository locations. See ``findrepo()``. If a repository is found, it
3392 will be used.
3392 will be used.
3393 """
3393 """
3394 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3394 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3395 inferrepo=False):
3395 inferrepo=False):
3396 def decorator(func):
3396 def decorator(func):
3397 func.norepo = norepo
3397 func.norepo = norepo
3398 func.optionalrepo = optionalrepo
3398 func.optionalrepo = optionalrepo
3399 func.inferrepo = inferrepo
3399 func.inferrepo = inferrepo
3400 if synopsis:
3400 if synopsis:
3401 table[name] = func, list(options), synopsis
3401 table[name] = func, list(options), synopsis
3402 else:
3402 else:
3403 table[name] = func, list(options)
3403 table[name] = func, list(options)
3404 return func
3404 return func
3405 return decorator
3405 return decorator
3406
3406
3407 return cmd
3407 return cmd
3408
3408
3409 def checkunresolved(ms):
3409 def checkunresolved(ms):
3410 if list(ms.unresolved()):
3410 if list(ms.unresolved()):
3411 raise error.Abort(_("unresolved merge conflicts "
3411 raise error.Abort(_("unresolved merge conflicts "
3412 "(see 'hg help resolve')"))
3412 "(see 'hg help resolve')"))
3413 if ms.mdstate() != 's' or list(ms.driverresolved()):
3413 if ms.mdstate() != 's' or list(ms.driverresolved()):
3414 raise error.Abort(_('driver-resolved merge conflicts'),
3414 raise error.Abort(_('driver-resolved merge conflicts'),
3415 hint=_('run "hg resolve --all" to resolve'))
3415 hint=_('run "hg resolve --all" to resolve'))
3416
3416
3417 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3417 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3418 # commands.outgoing. "missing" is "missing" of the result of
3418 # commands.outgoing. "missing" is "missing" of the result of
3419 # "findcommonoutgoing()"
3419 # "findcommonoutgoing()"
3420 outgoinghooks = util.hooks()
3420 outgoinghooks = util.hooks()
3421
3421
3422 # a list of (ui, repo) functions called by commands.summary
3422 # a list of (ui, repo) functions called by commands.summary
3423 summaryhooks = util.hooks()
3423 summaryhooks = util.hooks()
3424
3424
3425 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3425 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3426 #
3426 #
3427 # functions should return tuple of booleans below, if 'changes' is None:
3427 # functions should return tuple of booleans below, if 'changes' is None:
3428 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3428 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3429 #
3429 #
3430 # otherwise, 'changes' is a tuple of tuples below:
3430 # otherwise, 'changes' is a tuple of tuples below:
3431 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3431 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3432 # - (desturl, destbranch, destpeer, outgoing)
3432 # - (desturl, destbranch, destpeer, outgoing)
3433 summaryremotehooks = util.hooks()
3433 summaryremotehooks = util.hooks()
3434
3434
3435 # A list of state files kept by multistep operations like graft.
3435 # A list of state files kept by multistep operations like graft.
3436 # Since graft cannot be aborted, it is considered 'clearable' by update.
3436 # Since graft cannot be aborted, it is considered 'clearable' by update.
3437 # note: bisect is intentionally excluded
3437 # note: bisect is intentionally excluded
3438 # (state file, clearable, allowcommit, error, hint)
3438 # (state file, clearable, allowcommit, error, hint)
3439 unfinishedstates = [
3439 unfinishedstates = [
3440 ('graftstate', True, False, _('graft in progress'),
3440 ('graftstate', True, False, _('graft in progress'),
3441 _("use 'hg graft --continue' or 'hg update' to abort")),
3441 _("use 'hg graft --continue' or 'hg update' to abort")),
3442 ('updatestate', True, False, _('last update was interrupted'),
3442 ('updatestate', True, False, _('last update was interrupted'),
3443 _("use 'hg update' to get a consistent checkout"))
3443 _("use 'hg update' to get a consistent checkout"))
3444 ]
3444 ]
3445
3445
3446 def checkunfinished(repo, commit=False):
3446 def checkunfinished(repo, commit=False):
3447 '''Look for an unfinished multistep operation, like graft, and abort
3447 '''Look for an unfinished multistep operation, like graft, and abort
3448 if found. It's probably good to check this right before
3448 if found. It's probably good to check this right before
3449 bailifchanged().
3449 bailifchanged().
3450 '''
3450 '''
3451 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3451 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3452 if commit and allowcommit:
3452 if commit and allowcommit:
3453 continue
3453 continue
3454 if repo.vfs.exists(f):
3454 if repo.vfs.exists(f):
3455 raise error.Abort(msg, hint=hint)
3455 raise error.Abort(msg, hint=hint)
3456
3456
3457 def clearunfinished(repo):
3457 def clearunfinished(repo):
3458 '''Check for unfinished operations (as above), and clear the ones
3458 '''Check for unfinished operations (as above), and clear the ones
3459 that are clearable.
3459 that are clearable.
3460 '''
3460 '''
3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3462 if not clearable and repo.vfs.exists(f):
3462 if not clearable and repo.vfs.exists(f):
3463 raise error.Abort(msg, hint=hint)
3463 raise error.Abort(msg, hint=hint)
3464 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3464 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3465 if clearable and repo.vfs.exists(f):
3465 if clearable and repo.vfs.exists(f):
3466 util.unlink(repo.join(f))
3466 util.unlink(repo.join(f))
3467
3467
3468 afterresolvedstates = [
3468 afterresolvedstates = [
3469 ('graftstate',
3469 ('graftstate',
3470 _('hg graft --continue')),
3470 _('hg graft --continue')),
3471 ]
3471 ]
3472
3472
3473 def howtocontinue(repo):
3473 def howtocontinue(repo):
3474 '''Check for an unfinished operation and return the command to finish
3474 '''Check for an unfinished operation and return the command to finish
3475 it.
3475 it.
3476
3476
3477 afterresolvedstates tuples define a .hg/{file} and the corresponding
3477 afterresolvedstates tuples define a .hg/{file} and the corresponding
3478 command needed to finish it.
3478 command needed to finish it.
3479
3479
3480 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3480 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3481 a boolean.
3481 a boolean.
3482 '''
3482 '''
3483 contmsg = _("continue: %s")
3483 contmsg = _("continue: %s")
3484 for f, msg in afterresolvedstates:
3484 for f, msg in afterresolvedstates:
3485 if repo.vfs.exists(f):
3485 if repo.vfs.exists(f):
3486 return contmsg % msg, True
3486 return contmsg % msg, True
3487 workingctx = repo[None]
3487 workingctx = repo[None]
3488 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3488 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3489 for s in workingctx.substate)
3489 for s in workingctx.substate)
3490 if dirty:
3490 if dirty:
3491 return contmsg % _("hg commit"), False
3491 return contmsg % _("hg commit"), False
3492 return None, None
3492 return None, None
3493
3493
3494 def checkafterresolved(repo):
3494 def checkafterresolved(repo):
3495 '''Inform the user about the next action after completing hg resolve
3495 '''Inform the user about the next action after completing hg resolve
3496
3496
3497 If there's a matching afterresolvedstates, howtocontinue will yield
3497 If there's a matching afterresolvedstates, howtocontinue will yield
3498 repo.ui.warn as the reporter.
3498 repo.ui.warn as the reporter.
3499
3499
3500 Otherwise, it will yield repo.ui.note.
3500 Otherwise, it will yield repo.ui.note.
3501 '''
3501 '''
3502 msg, warning = howtocontinue(repo)
3502 msg, warning = howtocontinue(repo)
3503 if msg is not None:
3503 if msg is not None:
3504 if warning:
3504 if warning:
3505 repo.ui.warn("%s\n" % msg)
3505 repo.ui.warn("%s\n" % msg)
3506 else:
3506 else:
3507 repo.ui.note("%s\n" % msg)
3507 repo.ui.note("%s\n" % msg)
3508
3508
3509 def wrongtooltocontinue(repo, task):
3509 def wrongtooltocontinue(repo, task):
3510 '''Raise an abort suggesting how to properly continue if there is an
3510 '''Raise an abort suggesting how to properly continue if there is an
3511 active task.
3511 active task.
3512
3512
3513 Uses howtocontinue() to find the active task.
3513 Uses howtocontinue() to find the active task.
3514
3514
3515 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3515 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3516 a hint.
3516 a hint.
3517 '''
3517 '''
3518 after = howtocontinue(repo)
3518 after = howtocontinue(repo)
3519 hint = None
3519 hint = None
3520 if after[1]:
3520 if after[1]:
3521 hint = after[0]
3521 hint = after[0]
3522 raise error.Abort(_('no %s in progress') % task, hint=hint)
3522 raise error.Abort(_('no %s in progress') % task, hint=hint)
3523
3523
3524 class dirstateguard(object):
3524 class dirstateguard(object):
3525 '''Restore dirstate at unexpected failure.
3525 '''Restore dirstate at unexpected failure.
3526
3526
3527 At the construction, this class does:
3527 At the construction, this class does:
3528
3528
3529 - write current ``repo.dirstate`` out, and
3529 - write current ``repo.dirstate`` out, and
3530 - save ``.hg/dirstate`` into the backup file
3530 - save ``.hg/dirstate`` into the backup file
3531
3531
3532 This restores ``.hg/dirstate`` from backup file, if ``release()``
3532 This restores ``.hg/dirstate`` from backup file, if ``release()``
3533 is invoked before ``close()``.
3533 is invoked before ``close()``.
3534
3534
3535 This just removes the backup file at ``close()`` before ``release()``.
3535 This just removes the backup file at ``close()`` before ``release()``.
3536 '''
3536 '''
3537
3537
3538 def __init__(self, repo, name):
3538 def __init__(self, repo, name):
3539 self._repo = repo
3539 self._repo = repo
3540 self._active = False
3540 self._active = False
3541 self._closed = False
3541 self._closed = False
3542 self._suffix = '.backup.%s.%d' % (name, id(self))
3542 self._suffix = '.backup.%s.%d' % (name, id(self))
3543 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3543 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3544 self._active = True
3544 self._active = True
3545
3545
3546 def __del__(self):
3546 def __del__(self):
3547 if self._active: # still active
3547 if self._active: # still active
3548 # this may occur, even if this class is used correctly:
3548 # this may occur, even if this class is used correctly:
3549 # for example, releasing other resources like transaction
3549 # for example, releasing other resources like transaction
3550 # may raise exception before ``dirstateguard.release`` in
3550 # may raise exception before ``dirstateguard.release`` in
3551 # ``release(tr, ....)``.
3551 # ``release(tr, ....)``.
3552 self._abort()
3552 self._abort()
3553
3553
3554 def close(self):
3554 def close(self):
3555 if not self._active: # already inactivated
3555 if not self._active: # already inactivated
3556 msg = (_("can't close already inactivated backup: dirstate%s")
3556 msg = (_("can't close already inactivated backup: dirstate%s")
3557 % self._suffix)
3557 % self._suffix)
3558 raise error.Abort(msg)
3558 raise error.Abort(msg)
3559
3559
3560 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3560 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3561 self._suffix)
3561 self._suffix)
3562 self._active = False
3562 self._active = False
3563 self._closed = True
3563 self._closed = True
3564
3564
3565 def _abort(self):
3565 def _abort(self):
3566 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3566 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3567 self._suffix)
3567 self._suffix)
3568 self._active = False
3568 self._active = False
3569
3569
3570 def release(self):
3570 def release(self):
3571 if not self._closed:
3571 if not self._closed:
3572 if not self._active: # already inactivated
3572 if not self._active: # already inactivated
3573 msg = (_("can't release already inactivated backup:"
3573 msg = (_("can't release already inactivated backup:"
3574 " dirstate%s")
3574 " dirstate%s")
3575 % self._suffix)
3575 % self._suffix)
3576 raise error.Abort(msg)
3576 raise error.Abort(msg)
3577 self._abort()
3577 self._abort()
@@ -1,1599 +1,1583
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import array
10 import array
11 import heapq
11 import heapq
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 mdiff,
18 mdiff,
19 parsers,
19 parsers,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 def _parsev1(data):
26 def _parsev1(data):
27 # This method does a little bit of excessive-looking
27 # This method does a little bit of excessive-looking
28 # precondition checking. This is so that the behavior of this
28 # precondition checking. This is so that the behavior of this
29 # class exactly matches its C counterpart to try and help
29 # class exactly matches its C counterpart to try and help
30 # prevent surprise breakage for anyone that develops against
30 # prevent surprise breakage for anyone that develops against
31 # the pure version.
31 # the pure version.
32 if data and data[-1] != '\n':
32 if data and data[-1] != '\n':
33 raise ValueError('Manifest did not end in a newline.')
33 raise ValueError('Manifest did not end in a newline.')
34 prev = None
34 prev = None
35 for l in data.splitlines():
35 for l in data.splitlines():
36 if prev is not None and prev > l:
36 if prev is not None and prev > l:
37 raise ValueError('Manifest lines not in sorted order.')
37 raise ValueError('Manifest lines not in sorted order.')
38 prev = l
38 prev = l
39 f, n = l.split('\0')
39 f, n = l.split('\0')
40 if len(n) > 40:
40 if len(n) > 40:
41 yield f, revlog.bin(n[:40]), n[40:]
41 yield f, revlog.bin(n[:40]), n[40:]
42 else:
42 else:
43 yield f, revlog.bin(n), ''
43 yield f, revlog.bin(n), ''
44
44
45 def _parsev2(data):
45 def _parsev2(data):
46 metadataend = data.find('\n')
46 metadataend = data.find('\n')
47 # Just ignore metadata for now
47 # Just ignore metadata for now
48 pos = metadataend + 1
48 pos = metadataend + 1
49 prevf = ''
49 prevf = ''
50 while pos < len(data):
50 while pos < len(data):
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 if end == -1:
52 if end == -1:
53 raise ValueError('Manifest ended with incomplete file entry.')
53 raise ValueError('Manifest ended with incomplete file entry.')
54 stemlen = ord(data[pos])
54 stemlen = ord(data[pos])
55 items = data[pos + 1:end].split('\0')
55 items = data[pos + 1:end].split('\0')
56 f = prevf[:stemlen] + items[0]
56 f = prevf[:stemlen] + items[0]
57 if prevf > f:
57 if prevf > f:
58 raise ValueError('Manifest entries not in sorted order.')
58 raise ValueError('Manifest entries not in sorted order.')
59 fl = items[1]
59 fl = items[1]
60 # Just ignore metadata (items[2:] for now)
60 # Just ignore metadata (items[2:] for now)
61 n = data[end + 1:end + 21]
61 n = data[end + 1:end + 21]
62 yield f, n, fl
62 yield f, n, fl
63 pos = end + 22
63 pos = end + 22
64 prevf = f
64 prevf = f
65
65
66 def _parse(data):
66 def _parse(data):
67 """Generates (path, node, flags) tuples from a manifest text"""
67 """Generates (path, node, flags) tuples from a manifest text"""
68 if data.startswith('\0'):
68 if data.startswith('\0'):
69 return iter(_parsev2(data))
69 return iter(_parsev2(data))
70 else:
70 else:
71 return iter(_parsev1(data))
71 return iter(_parsev1(data))
72
72
73 def _text(it, usemanifestv2):
73 def _text(it, usemanifestv2):
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 text"""
75 text"""
76 if usemanifestv2:
76 if usemanifestv2:
77 return _textv2(it)
77 return _textv2(it)
78 else:
78 else:
79 return _textv1(it)
79 return _textv1(it)
80
80
81 def _textv1(it):
81 def _textv1(it):
82 files = []
82 files = []
83 lines = []
83 lines = []
84 _hex = revlog.hex
84 _hex = revlog.hex
85 for f, n, fl in it:
85 for f, n, fl in it:
86 files.append(f)
86 files.append(f)
87 # if this is changed to support newlines in filenames,
87 # if this is changed to support newlines in filenames,
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90
90
91 _checkforbidden(files)
91 _checkforbidden(files)
92 return ''.join(lines)
92 return ''.join(lines)
93
93
94 def _textv2(it):
94 def _textv2(it):
95 files = []
95 files = []
96 lines = ['\0\n']
96 lines = ['\0\n']
97 prevf = ''
97 prevf = ''
98 for f, n, fl in it:
98 for f, n, fl in it:
99 files.append(f)
99 files.append(f)
100 stem = os.path.commonprefix([prevf, f])
100 stem = os.path.commonprefix([prevf, f])
101 stemlen = min(len(stem), 255)
101 stemlen = min(len(stem), 255)
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 prevf = f
103 prevf = f
104 _checkforbidden(files)
104 _checkforbidden(files)
105 return ''.join(lines)
105 return ''.join(lines)
106
106
107 class lazymanifestiter(object):
107 class lazymanifestiter(object):
108 def __init__(self, lm):
108 def __init__(self, lm):
109 self.pos = 0
109 self.pos = 0
110 self.lm = lm
110 self.lm = lm
111
111
112 def __iter__(self):
112 def __iter__(self):
113 return self
113 return self
114
114
115 def next(self):
115 def next(self):
116 try:
116 try:
117 data, pos = self.lm._get(self.pos)
117 data, pos = self.lm._get(self.pos)
118 except IndexError:
118 except IndexError:
119 raise StopIteration
119 raise StopIteration
120 if pos == -1:
120 if pos == -1:
121 self.pos += 1
121 self.pos += 1
122 return data[0]
122 return data[0]
123 self.pos += 1
123 self.pos += 1
124 zeropos = data.find('\x00', pos)
124 zeropos = data.find('\x00', pos)
125 return data[pos:zeropos]
125 return data[pos:zeropos]
126
126
127 class lazymanifestiterentries(object):
127 class lazymanifestiterentries(object):
128 def __init__(self, lm):
128 def __init__(self, lm):
129 self.lm = lm
129 self.lm = lm
130 self.pos = 0
130 self.pos = 0
131
131
132 def __iter__(self):
132 def __iter__(self):
133 return self
133 return self
134
134
135 def next(self):
135 def next(self):
136 try:
136 try:
137 data, pos = self.lm._get(self.pos)
137 data, pos = self.lm._get(self.pos)
138 except IndexError:
138 except IndexError:
139 raise StopIteration
139 raise StopIteration
140 if pos == -1:
140 if pos == -1:
141 self.pos += 1
141 self.pos += 1
142 return data
142 return data
143 zeropos = data.find('\x00', pos)
143 zeropos = data.find('\x00', pos)
144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
145 zeropos + 1, 40)
145 zeropos + 1, 40)
146 flags = self.lm._getflags(data, self.pos, zeropos)
146 flags = self.lm._getflags(data, self.pos, zeropos)
147 self.pos += 1
147 self.pos += 1
148 return (data[pos:zeropos], hashval, flags)
148 return (data[pos:zeropos], hashval, flags)
149
149
150 def unhexlify(data, extra, pos, length):
150 def unhexlify(data, extra, pos, length):
151 s = data[pos:pos + length].decode('hex')
151 s = data[pos:pos + length].decode('hex')
152 if extra:
152 if extra:
153 s += chr(extra & 0xff)
153 s += chr(extra & 0xff)
154 return s
154 return s
155
155
156 def _cmp(a, b):
156 def _cmp(a, b):
157 return (a > b) - (a < b)
157 return (a > b) - (a < b)
158
158
159 class _lazymanifest(object):
159 class _lazymanifest(object):
160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
161 if positions is None:
161 if positions is None:
162 self.positions = self.findlines(data)
162 self.positions = self.findlines(data)
163 self.extrainfo = [0] * len(self.positions)
163 self.extrainfo = [0] * len(self.positions)
164 self.data = data
164 self.data = data
165 self.extradata = []
165 self.extradata = []
166 else:
166 else:
167 self.positions = positions[:]
167 self.positions = positions[:]
168 self.extrainfo = extrainfo[:]
168 self.extrainfo = extrainfo[:]
169 self.extradata = extradata[:]
169 self.extradata = extradata[:]
170 self.data = data
170 self.data = data
171
171
172 def findlines(self, data):
172 def findlines(self, data):
173 if not data:
173 if not data:
174 return []
174 return []
175 pos = data.find("\n")
175 pos = data.find("\n")
176 if pos == -1 or data[-1] != '\n':
176 if pos == -1 or data[-1] != '\n':
177 raise ValueError("Manifest did not end in a newline.")
177 raise ValueError("Manifest did not end in a newline.")
178 positions = [0]
178 positions = [0]
179 prev = data[:data.find('\x00')]
179 prev = data[:data.find('\x00')]
180 while pos < len(data) - 1 and pos != -1:
180 while pos < len(data) - 1 and pos != -1:
181 positions.append(pos + 1)
181 positions.append(pos + 1)
182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
183 if nexts < prev:
183 if nexts < prev:
184 raise ValueError("Manifest lines not in sorted order.")
184 raise ValueError("Manifest lines not in sorted order.")
185 prev = nexts
185 prev = nexts
186 pos = data.find("\n", pos + 1)
186 pos = data.find("\n", pos + 1)
187 return positions
187 return positions
188
188
189 def _get(self, index):
189 def _get(self, index):
190 # get the position encoded in pos:
190 # get the position encoded in pos:
191 # positive number is an index in 'data'
191 # positive number is an index in 'data'
192 # negative number is in extrapieces
192 # negative number is in extrapieces
193 pos = self.positions[index]
193 pos = self.positions[index]
194 if pos >= 0:
194 if pos >= 0:
195 return self.data, pos
195 return self.data, pos
196 return self.extradata[-pos - 1], -1
196 return self.extradata[-pos - 1], -1
197
197
198 def _getkey(self, pos):
198 def _getkey(self, pos):
199 if pos >= 0:
199 if pos >= 0:
200 return self.data[pos:self.data.find('\x00', pos + 1)]
200 return self.data[pos:self.data.find('\x00', pos + 1)]
201 return self.extradata[-pos - 1][0]
201 return self.extradata[-pos - 1][0]
202
202
203 def bsearch(self, key):
203 def bsearch(self, key):
204 first = 0
204 first = 0
205 last = len(self.positions) - 1
205 last = len(self.positions) - 1
206
206
207 while first <= last:
207 while first <= last:
208 midpoint = (first + last)//2
208 midpoint = (first + last)//2
209 nextpos = self.positions[midpoint]
209 nextpos = self.positions[midpoint]
210 candidate = self._getkey(nextpos)
210 candidate = self._getkey(nextpos)
211 r = _cmp(key, candidate)
211 r = _cmp(key, candidate)
212 if r == 0:
212 if r == 0:
213 return midpoint
213 return midpoint
214 else:
214 else:
215 if r < 0:
215 if r < 0:
216 last = midpoint - 1
216 last = midpoint - 1
217 else:
217 else:
218 first = midpoint + 1
218 first = midpoint + 1
219 return -1
219 return -1
220
220
221 def bsearch2(self, key):
221 def bsearch2(self, key):
222 # same as the above, but will always return the position
222 # same as the above, but will always return the position
223 # done for performance reasons
223 # done for performance reasons
224 first = 0
224 first = 0
225 last = len(self.positions) - 1
225 last = len(self.positions) - 1
226
226
227 while first <= last:
227 while first <= last:
228 midpoint = (first + last)//2
228 midpoint = (first + last)//2
229 nextpos = self.positions[midpoint]
229 nextpos = self.positions[midpoint]
230 candidate = self._getkey(nextpos)
230 candidate = self._getkey(nextpos)
231 r = _cmp(key, candidate)
231 r = _cmp(key, candidate)
232 if r == 0:
232 if r == 0:
233 return (midpoint, True)
233 return (midpoint, True)
234 else:
234 else:
235 if r < 0:
235 if r < 0:
236 last = midpoint - 1
236 last = midpoint - 1
237 else:
237 else:
238 first = midpoint + 1
238 first = midpoint + 1
239 return (first, False)
239 return (first, False)
240
240
241 def __contains__(self, key):
241 def __contains__(self, key):
242 return self.bsearch(key) != -1
242 return self.bsearch(key) != -1
243
243
244 def _getflags(self, data, needle, pos):
244 def _getflags(self, data, needle, pos):
245 start = pos + 41
245 start = pos + 41
246 end = data.find("\n", start)
246 end = data.find("\n", start)
247 if end == -1:
247 if end == -1:
248 end = len(data) - 1
248 end = len(data) - 1
249 if start == end:
249 if start == end:
250 return ''
250 return ''
251 return self.data[start:end]
251 return self.data[start:end]
252
252
253 def __getitem__(self, key):
253 def __getitem__(self, key):
254 if not isinstance(key, str):
254 if not isinstance(key, str):
255 raise TypeError("getitem: manifest keys must be a string.")
255 raise TypeError("getitem: manifest keys must be a string.")
256 needle = self.bsearch(key)
256 needle = self.bsearch(key)
257 if needle == -1:
257 if needle == -1:
258 raise KeyError
258 raise KeyError
259 data, pos = self._get(needle)
259 data, pos = self._get(needle)
260 if pos == -1:
260 if pos == -1:
261 return (data[1], data[2])
261 return (data[1], data[2])
262 zeropos = data.find('\x00', pos)
262 zeropos = data.find('\x00', pos)
263 assert 0 <= needle <= len(self.positions)
263 assert 0 <= needle <= len(self.positions)
264 assert len(self.extrainfo) == len(self.positions)
264 assert len(self.extrainfo) == len(self.positions)
265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
266 flags = self._getflags(data, needle, zeropos)
266 flags = self._getflags(data, needle, zeropos)
267 return (hashval, flags)
267 return (hashval, flags)
268
268
269 def __delitem__(self, key):
269 def __delitem__(self, key):
270 needle, found = self.bsearch2(key)
270 needle, found = self.bsearch2(key)
271 if not found:
271 if not found:
272 raise KeyError
272 raise KeyError
273 cur = self.positions[needle]
273 cur = self.positions[needle]
274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
276 if cur >= 0:
276 if cur >= 0:
277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
278
278
279 def __setitem__(self, key, value):
279 def __setitem__(self, key, value):
280 if not isinstance(key, str):
280 if not isinstance(key, str):
281 raise TypeError("setitem: manifest keys must be a string.")
281 raise TypeError("setitem: manifest keys must be a string.")
282 if not isinstance(value, tuple) or len(value) != 2:
282 if not isinstance(value, tuple) or len(value) != 2:
283 raise TypeError("Manifest values must be a tuple of (node, flags).")
283 raise TypeError("Manifest values must be a tuple of (node, flags).")
284 hashval = value[0]
284 hashval = value[0]
285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
286 raise TypeError("node must be a 20-byte string")
286 raise TypeError("node must be a 20-byte string")
287 flags = value[1]
287 flags = value[1]
288 if len(hashval) == 22:
288 if len(hashval) == 22:
289 hashval = hashval[:-1]
289 hashval = hashval[:-1]
290 if not isinstance(flags, str) or len(flags) > 1:
290 if not isinstance(flags, str) or len(flags) > 1:
291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
292 needle, found = self.bsearch2(key)
292 needle, found = self.bsearch2(key)
293 if found:
293 if found:
294 # put the item
294 # put the item
295 pos = self.positions[needle]
295 pos = self.positions[needle]
296 if pos < 0:
296 if pos < 0:
297 self.extradata[-pos - 1] = (key, hashval, value[1])
297 self.extradata[-pos - 1] = (key, hashval, value[1])
298 else:
298 else:
299 # just don't bother
299 # just don't bother
300 self.extradata.append((key, hashval, value[1]))
300 self.extradata.append((key, hashval, value[1]))
301 self.positions[needle] = -len(self.extradata)
301 self.positions[needle] = -len(self.extradata)
302 else:
302 else:
303 # not found, put it in with extra positions
303 # not found, put it in with extra positions
304 self.extradata.append((key, hashval, value[1]))
304 self.extradata.append((key, hashval, value[1]))
305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
306 + self.positions[needle:])
306 + self.positions[needle:])
307 self.extrainfo = (self.extrainfo[:needle] + [0] +
307 self.extrainfo = (self.extrainfo[:needle] + [0] +
308 self.extrainfo[needle:])
308 self.extrainfo[needle:])
309
309
310 def copy(self):
310 def copy(self):
311 # XXX call _compact like in C?
311 # XXX call _compact like in C?
312 return _lazymanifest(self.data, self.positions, self.extrainfo,
312 return _lazymanifest(self.data, self.positions, self.extrainfo,
313 self.extradata)
313 self.extradata)
314
314
315 def _compact(self):
315 def _compact(self):
316 # hopefully not called TOO often
316 # hopefully not called TOO often
317 if len(self.extradata) == 0:
317 if len(self.extradata) == 0:
318 return
318 return
319 l = []
319 l = []
320 last_cut = 0
320 last_cut = 0
321 i = 0
321 i = 0
322 offset = 0
322 offset = 0
323 self.extrainfo = [0] * len(self.positions)
323 self.extrainfo = [0] * len(self.positions)
324 while i < len(self.positions):
324 while i < len(self.positions):
325 if self.positions[i] >= 0:
325 if self.positions[i] >= 0:
326 cur = self.positions[i]
326 cur = self.positions[i]
327 last_cut = cur
327 last_cut = cur
328 while True:
328 while True:
329 self.positions[i] = offset
329 self.positions[i] = offset
330 i += 1
330 i += 1
331 if i == len(self.positions) or self.positions[i] < 0:
331 if i == len(self.positions) or self.positions[i] < 0:
332 break
332 break
333 offset += self.positions[i] - cur
333 offset += self.positions[i] - cur
334 cur = self.positions[i]
334 cur = self.positions[i]
335 end_cut = self.data.find('\n', cur)
335 end_cut = self.data.find('\n', cur)
336 if end_cut != -1:
336 if end_cut != -1:
337 end_cut += 1
337 end_cut += 1
338 offset += end_cut - cur
338 offset += end_cut - cur
339 l.append(self.data[last_cut:end_cut])
339 l.append(self.data[last_cut:end_cut])
340 else:
340 else:
341 while i < len(self.positions) and self.positions[i] < 0:
341 while i < len(self.positions) and self.positions[i] < 0:
342 cur = self.positions[i]
342 cur = self.positions[i]
343 t = self.extradata[-cur - 1]
343 t = self.extradata[-cur - 1]
344 l.append(self._pack(t))
344 l.append(self._pack(t))
345 self.positions[i] = offset
345 self.positions[i] = offset
346 if len(t[1]) > 20:
346 if len(t[1]) > 20:
347 self.extrainfo[i] = ord(t[1][21])
347 self.extrainfo[i] = ord(t[1][21])
348 offset += len(l[-1])
348 offset += len(l[-1])
349 i += 1
349 i += 1
350 self.data = ''.join(l)
350 self.data = ''.join(l)
351 self.extradata = []
351 self.extradata = []
352
352
353 def _pack(self, d):
353 def _pack(self, d):
354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
355
355
356 def text(self):
356 def text(self):
357 self._compact()
357 self._compact()
358 return self.data
358 return self.data
359
359
360 def diff(self, m2, clean=False):
360 def diff(self, m2, clean=False):
361 '''Finds changes between the current manifest and m2.'''
361 '''Finds changes between the current manifest and m2.'''
362 # XXX think whether efficiency matters here
362 # XXX think whether efficiency matters here
363 diff = {}
363 diff = {}
364
364
365 for fn, e1, flags in self.iterentries():
365 for fn, e1, flags in self.iterentries():
366 if fn not in m2:
366 if fn not in m2:
367 diff[fn] = (e1, flags), (None, '')
367 diff[fn] = (e1, flags), (None, '')
368 else:
368 else:
369 e2 = m2[fn]
369 e2 = m2[fn]
370 if (e1, flags) != e2:
370 if (e1, flags) != e2:
371 diff[fn] = (e1, flags), e2
371 diff[fn] = (e1, flags), e2
372 elif clean:
372 elif clean:
373 diff[fn] = None
373 diff[fn] = None
374
374
375 for fn, e2, flags in m2.iterentries():
375 for fn, e2, flags in m2.iterentries():
376 if fn not in self:
376 if fn not in self:
377 diff[fn] = (None, ''), (e2, flags)
377 diff[fn] = (None, ''), (e2, flags)
378
378
379 return diff
379 return diff
380
380
381 def iterentries(self):
381 def iterentries(self):
382 return lazymanifestiterentries(self)
382 return lazymanifestiterentries(self)
383
383
384 def iterkeys(self):
384 def iterkeys(self):
385 return lazymanifestiter(self)
385 return lazymanifestiter(self)
386
386
387 def __iter__(self):
387 def __iter__(self):
388 return lazymanifestiter(self)
388 return lazymanifestiter(self)
389
389
390 def __len__(self):
390 def __len__(self):
391 return len(self.positions)
391 return len(self.positions)
392
392
393 def filtercopy(self, filterfn):
393 def filtercopy(self, filterfn):
394 # XXX should be optimized
394 # XXX should be optimized
395 c = _lazymanifest('')
395 c = _lazymanifest('')
396 for f, n, fl in self.iterentries():
396 for f, n, fl in self.iterentries():
397 if filterfn(f):
397 if filterfn(f):
398 c[f] = n, fl
398 c[f] = n, fl
399 return c
399 return c
400
400
401 try:
401 try:
402 _lazymanifest = parsers.lazymanifest
402 _lazymanifest = parsers.lazymanifest
403 except AttributeError:
403 except AttributeError:
404 pass
404 pass
405
405
406 class manifestdict(object):
406 class manifestdict(object):
407 def __init__(self, data=''):
407 def __init__(self, data=''):
408 if data.startswith('\0'):
408 if data.startswith('\0'):
409 #_lazymanifest can not parse v2
409 #_lazymanifest can not parse v2
410 self._lm = _lazymanifest('')
410 self._lm = _lazymanifest('')
411 for f, n, fl in _parsev2(data):
411 for f, n, fl in _parsev2(data):
412 self._lm[f] = n, fl
412 self._lm[f] = n, fl
413 else:
413 else:
414 self._lm = _lazymanifest(data)
414 self._lm = _lazymanifest(data)
415
415
416 def __getitem__(self, key):
416 def __getitem__(self, key):
417 return self._lm[key][0]
417 return self._lm[key][0]
418
418
419 def find(self, key):
419 def find(self, key):
420 return self._lm[key]
420 return self._lm[key]
421
421
422 def __len__(self):
422 def __len__(self):
423 return len(self._lm)
423 return len(self._lm)
424
424
425 def __nonzero__(self):
425 def __nonzero__(self):
426 # nonzero is covered by the __len__ function, but implementing it here
426 # nonzero is covered by the __len__ function, but implementing it here
427 # makes it easier for extensions to override.
427 # makes it easier for extensions to override.
428 return len(self._lm) != 0
428 return len(self._lm) != 0
429
429
430 def __setitem__(self, key, node):
430 def __setitem__(self, key, node):
431 self._lm[key] = node, self.flags(key, '')
431 self._lm[key] = node, self.flags(key, '')
432
432
433 def __contains__(self, key):
433 def __contains__(self, key):
434 return key in self._lm
434 return key in self._lm
435
435
436 def __delitem__(self, key):
436 def __delitem__(self, key):
437 del self._lm[key]
437 del self._lm[key]
438
438
439 def __iter__(self):
439 def __iter__(self):
440 return self._lm.__iter__()
440 return self._lm.__iter__()
441
441
442 def iterkeys(self):
442 def iterkeys(self):
443 return self._lm.iterkeys()
443 return self._lm.iterkeys()
444
444
445 def keys(self):
445 def keys(self):
446 return list(self.iterkeys())
446 return list(self.iterkeys())
447
447
448 def filesnotin(self, m2):
448 def filesnotin(self, m2):
449 '''Set of files in this manifest that are not in the other'''
449 '''Set of files in this manifest that are not in the other'''
450 diff = self.diff(m2)
450 diff = self.diff(m2)
451 files = set(filepath
451 files = set(filepath
452 for filepath, hashflags in diff.iteritems()
452 for filepath, hashflags in diff.iteritems()
453 if hashflags[1][0] is None)
453 if hashflags[1][0] is None)
454 return files
454 return files
455
455
456 @propertycache
456 @propertycache
457 def _dirs(self):
457 def _dirs(self):
458 return util.dirs(self)
458 return util.dirs(self)
459
459
460 def dirs(self):
460 def dirs(self):
461 return self._dirs
461 return self._dirs
462
462
463 def hasdir(self, dir):
463 def hasdir(self, dir):
464 return dir in self._dirs
464 return dir in self._dirs
465
465
466 def _filesfastpath(self, match):
466 def _filesfastpath(self, match):
467 '''Checks whether we can correctly and quickly iterate over matcher
467 '''Checks whether we can correctly and quickly iterate over matcher
468 files instead of over manifest files.'''
468 files instead of over manifest files.'''
469 files = match.files()
469 files = match.files()
470 return (len(files) < 100 and (match.isexact() or
470 return (len(files) < 100 and (match.isexact() or
471 (match.prefix() and all(fn in self for fn in files))))
471 (match.prefix() and all(fn in self for fn in files))))
472
472
473 def walk(self, match):
473 def walk(self, match):
474 '''Generates matching file names.
474 '''Generates matching file names.
475
475
476 Equivalent to manifest.matches(match).iterkeys(), but without creating
476 Equivalent to manifest.matches(match).iterkeys(), but without creating
477 an entirely new manifest.
477 an entirely new manifest.
478
478
479 It also reports nonexistent files by marking them bad with match.bad().
479 It also reports nonexistent files by marking them bad with match.bad().
480 '''
480 '''
481 if match.always():
481 if match.always():
482 for f in iter(self):
482 for f in iter(self):
483 yield f
483 yield f
484 return
484 return
485
485
486 fset = set(match.files())
486 fset = set(match.files())
487
487
488 # avoid the entire walk if we're only looking for specific files
488 # avoid the entire walk if we're only looking for specific files
489 if self._filesfastpath(match):
489 if self._filesfastpath(match):
490 for fn in sorted(fset):
490 for fn in sorted(fset):
491 yield fn
491 yield fn
492 return
492 return
493
493
494 for fn in self:
494 for fn in self:
495 if fn in fset:
495 if fn in fset:
496 # specified pattern is the exact name
496 # specified pattern is the exact name
497 fset.remove(fn)
497 fset.remove(fn)
498 if match(fn):
498 if match(fn):
499 yield fn
499 yield fn
500
500
501 # for dirstate.walk, files=['.'] means "walk the whole tree".
501 # for dirstate.walk, files=['.'] means "walk the whole tree".
502 # follow that here, too
502 # follow that here, too
503 fset.discard('.')
503 fset.discard('.')
504
504
505 for fn in sorted(fset):
505 for fn in sorted(fset):
506 if not self.hasdir(fn):
506 if not self.hasdir(fn):
507 match.bad(fn, None)
507 match.bad(fn, None)
508
508
509 def matches(self, match):
509 def matches(self, match):
510 '''generate a new manifest filtered by the match argument'''
510 '''generate a new manifest filtered by the match argument'''
511 if match.always():
511 if match.always():
512 return self.copy()
512 return self.copy()
513
513
514 if self._filesfastpath(match):
514 if self._filesfastpath(match):
515 m = manifestdict()
515 m = manifestdict()
516 lm = self._lm
516 lm = self._lm
517 for fn in match.files():
517 for fn in match.files():
518 if fn in lm:
518 if fn in lm:
519 m._lm[fn] = lm[fn]
519 m._lm[fn] = lm[fn]
520 return m
520 return m
521
521
522 m = manifestdict()
522 m = manifestdict()
523 m._lm = self._lm.filtercopy(match)
523 m._lm = self._lm.filtercopy(match)
524 return m
524 return m
525
525
526 def diff(self, m2, clean=False):
526 def diff(self, m2, clean=False):
527 '''Finds changes between the current manifest and m2.
527 '''Finds changes between the current manifest and m2.
528
528
529 Args:
529 Args:
530 m2: the manifest to which this manifest should be compared.
530 m2: the manifest to which this manifest should be compared.
531 clean: if true, include files unchanged between these manifests
531 clean: if true, include files unchanged between these manifests
532 with a None value in the returned dictionary.
532 with a None value in the returned dictionary.
533
533
534 The result is returned as a dict with filename as key and
534 The result is returned as a dict with filename as key and
535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
536 nodeid in the current/other manifest and fl1/fl2 is the flag
536 nodeid in the current/other manifest and fl1/fl2 is the flag
537 in the current/other manifest. Where the file does not exist,
537 in the current/other manifest. Where the file does not exist,
538 the nodeid will be None and the flags will be the empty
538 the nodeid will be None and the flags will be the empty
539 string.
539 string.
540 '''
540 '''
541 return self._lm.diff(m2._lm, clean)
541 return self._lm.diff(m2._lm, clean)
542
542
543 def setflag(self, key, flag):
543 def setflag(self, key, flag):
544 self._lm[key] = self[key], flag
544 self._lm[key] = self[key], flag
545
545
546 def get(self, key, default=None):
546 def get(self, key, default=None):
547 try:
547 try:
548 return self._lm[key][0]
548 return self._lm[key][0]
549 except KeyError:
549 except KeyError:
550 return default
550 return default
551
551
552 def flags(self, key, default=''):
552 def flags(self, key, default=''):
553 try:
553 try:
554 return self._lm[key][1]
554 return self._lm[key][1]
555 except KeyError:
555 except KeyError:
556 return default
556 return default
557
557
558 def copy(self):
558 def copy(self):
559 c = manifestdict()
559 c = manifestdict()
560 c._lm = self._lm.copy()
560 c._lm = self._lm.copy()
561 return c
561 return c
562
562
563 def iteritems(self):
563 def iteritems(self):
564 return (x[:2] for x in self._lm.iterentries())
564 return (x[:2] for x in self._lm.iterentries())
565
565
566 def iterentries(self):
566 def iterentries(self):
567 return self._lm.iterentries()
567 return self._lm.iterentries()
568
568
569 def text(self, usemanifestv2=False):
569 def text(self, usemanifestv2=False):
570 if usemanifestv2:
570 if usemanifestv2:
571 return _textv2(self._lm.iterentries())
571 return _textv2(self._lm.iterentries())
572 else:
572 else:
573 # use (probably) native version for v1
573 # use (probably) native version for v1
574 return self._lm.text()
574 return self._lm.text()
575
575
576 def fastdelta(self, base, changes):
576 def fastdelta(self, base, changes):
577 """Given a base manifest text as an array.array and a list of changes
577 """Given a base manifest text as an array.array and a list of changes
578 relative to that text, compute a delta that can be used by revlog.
578 relative to that text, compute a delta that can be used by revlog.
579 """
579 """
580 delta = []
580 delta = []
581 dstart = None
581 dstart = None
582 dend = None
582 dend = None
583 dline = [""]
583 dline = [""]
584 start = 0
584 start = 0
585 # zero copy representation of base as a buffer
585 # zero copy representation of base as a buffer
586 addbuf = util.buffer(base)
586 addbuf = util.buffer(base)
587
587
588 changes = list(changes)
588 changes = list(changes)
589 if len(changes) < 1000:
589 if len(changes) < 1000:
590 # start with a readonly loop that finds the offset of
590 # start with a readonly loop that finds the offset of
591 # each line and creates the deltas
591 # each line and creates the deltas
592 for f, todelete in changes:
592 for f, todelete in changes:
593 # bs will either be the index of the item or the insert point
593 # bs will either be the index of the item or the insert point
594 start, end = _msearch(addbuf, f, start)
594 start, end = _msearch(addbuf, f, start)
595 if not todelete:
595 if not todelete:
596 h, fl = self._lm[f]
596 h, fl = self._lm[f]
597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
598 else:
598 else:
599 if start == end:
599 if start == end:
600 # item we want to delete was not found, error out
600 # item we want to delete was not found, error out
601 raise AssertionError(
601 raise AssertionError(
602 _("failed to remove %s from manifest") % f)
602 _("failed to remove %s from manifest") % f)
603 l = ""
603 l = ""
604 if dstart is not None and dstart <= start and dend >= start:
604 if dstart is not None and dstart <= start and dend >= start:
605 if dend < end:
605 if dend < end:
606 dend = end
606 dend = end
607 if l:
607 if l:
608 dline.append(l)
608 dline.append(l)
609 else:
609 else:
610 if dstart is not None:
610 if dstart is not None:
611 delta.append([dstart, dend, "".join(dline)])
611 delta.append([dstart, dend, "".join(dline)])
612 dstart = start
612 dstart = start
613 dend = end
613 dend = end
614 dline = [l]
614 dline = [l]
615
615
616 if dstart is not None:
616 if dstart is not None:
617 delta.append([dstart, dend, "".join(dline)])
617 delta.append([dstart, dend, "".join(dline)])
618 # apply the delta to the base, and get a delta for addrevision
618 # apply the delta to the base, and get a delta for addrevision
619 deltatext, arraytext = _addlistdelta(base, delta)
619 deltatext, arraytext = _addlistdelta(base, delta)
620 else:
620 else:
621 # For large changes, it's much cheaper to just build the text and
621 # For large changes, it's much cheaper to just build the text and
622 # diff it.
622 # diff it.
623 arraytext = array.array('c', self.text())
623 arraytext = array.array('c', self.text())
624 deltatext = mdiff.textdiff(base, arraytext)
624 deltatext = mdiff.textdiff(base, arraytext)
625
625
626 return arraytext, deltatext
626 return arraytext, deltatext
627
627
628 def _msearch(m, s, lo=0, hi=None):
628 def _msearch(m, s, lo=0, hi=None):
629 '''return a tuple (start, end) that says where to find s within m.
629 '''return a tuple (start, end) that says where to find s within m.
630
630
631 If the string is found m[start:end] are the line containing
631 If the string is found m[start:end] are the line containing
632 that string. If start == end the string was not found and
632 that string. If start == end the string was not found and
633 they indicate the proper sorted insertion point.
633 they indicate the proper sorted insertion point.
634
634
635 m should be a buffer or a string
635 m should be a buffer or a string
636 s is a string'''
636 s is a string'''
637 def advance(i, c):
637 def advance(i, c):
638 while i < lenm and m[i] != c:
638 while i < lenm and m[i] != c:
639 i += 1
639 i += 1
640 return i
640 return i
641 if not s:
641 if not s:
642 return (lo, lo)
642 return (lo, lo)
643 lenm = len(m)
643 lenm = len(m)
644 if not hi:
644 if not hi:
645 hi = lenm
645 hi = lenm
646 while lo < hi:
646 while lo < hi:
647 mid = (lo + hi) // 2
647 mid = (lo + hi) // 2
648 start = mid
648 start = mid
649 while start > 0 and m[start - 1] != '\n':
649 while start > 0 and m[start - 1] != '\n':
650 start -= 1
650 start -= 1
651 end = advance(start, '\0')
651 end = advance(start, '\0')
652 if m[start:end] < s:
652 if m[start:end] < s:
653 # we know that after the null there are 40 bytes of sha1
653 # we know that after the null there are 40 bytes of sha1
654 # this translates to the bisect lo = mid + 1
654 # this translates to the bisect lo = mid + 1
655 lo = advance(end + 40, '\n') + 1
655 lo = advance(end + 40, '\n') + 1
656 else:
656 else:
657 # this translates to the bisect hi = mid
657 # this translates to the bisect hi = mid
658 hi = start
658 hi = start
659 end = advance(lo, '\0')
659 end = advance(lo, '\0')
660 found = m[lo:end]
660 found = m[lo:end]
661 if s == found:
661 if s == found:
662 # we know that after the null there are 40 bytes of sha1
662 # we know that after the null there are 40 bytes of sha1
663 end = advance(end + 40, '\n')
663 end = advance(end + 40, '\n')
664 return (lo, end + 1)
664 return (lo, end + 1)
665 else:
665 else:
666 return (lo, lo)
666 return (lo, lo)
667
667
668 def _checkforbidden(l):
668 def _checkforbidden(l):
669 """Check filenames for illegal characters."""
669 """Check filenames for illegal characters."""
670 for f in l:
670 for f in l:
671 if '\n' in f or '\r' in f:
671 if '\n' in f or '\r' in f:
672 raise error.RevlogError(
672 raise error.RevlogError(
673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
674
674
675
675
676 # apply the changes collected during the bisect loop to our addlist
676 # apply the changes collected during the bisect loop to our addlist
677 # return a delta suitable for addrevision
677 # return a delta suitable for addrevision
678 def _addlistdelta(addlist, x):
678 def _addlistdelta(addlist, x):
679 # for large addlist arrays, building a new array is cheaper
679 # for large addlist arrays, building a new array is cheaper
680 # than repeatedly modifying the existing one
680 # than repeatedly modifying the existing one
681 currentposition = 0
681 currentposition = 0
682 newaddlist = array.array('c')
682 newaddlist = array.array('c')
683
683
684 for start, end, content in x:
684 for start, end, content in x:
685 newaddlist += addlist[currentposition:start]
685 newaddlist += addlist[currentposition:start]
686 if content:
686 if content:
687 newaddlist += array.array('c', content)
687 newaddlist += array.array('c', content)
688
688
689 currentposition = end
689 currentposition = end
690
690
691 newaddlist += addlist[currentposition:]
691 newaddlist += addlist[currentposition:]
692
692
693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
694 + content for start, end, content in x)
694 + content for start, end, content in x)
695 return deltatext, newaddlist
695 return deltatext, newaddlist
696
696
697 def _splittopdir(f):
697 def _splittopdir(f):
698 if '/' in f:
698 if '/' in f:
699 dir, subpath = f.split('/', 1)
699 dir, subpath = f.split('/', 1)
700 return dir + '/', subpath
700 return dir + '/', subpath
701 else:
701 else:
702 return '', f
702 return '', f
703
703
704 _noop = lambda s: None
704 _noop = lambda s: None
705
705
706 class treemanifest(object):
706 class treemanifest(object):
707 def __init__(self, dir='', text=''):
707 def __init__(self, dir='', text=''):
708 self._dir = dir
708 self._dir = dir
709 self._node = revlog.nullid
709 self._node = revlog.nullid
710 self._loadfunc = _noop
710 self._loadfunc = _noop
711 self._copyfunc = _noop
711 self._copyfunc = _noop
712 self._dirty = False
712 self._dirty = False
713 self._dirs = {}
713 self._dirs = {}
714 # Using _lazymanifest here is a little slower than plain old dicts
714 # Using _lazymanifest here is a little slower than plain old dicts
715 self._files = {}
715 self._files = {}
716 self._flags = {}
716 self._flags = {}
717 if text:
717 if text:
718 def readsubtree(subdir, subm):
718 def readsubtree(subdir, subm):
719 raise AssertionError('treemanifest constructor only accepts '
719 raise AssertionError('treemanifest constructor only accepts '
720 'flat manifests')
720 'flat manifests')
721 self.parse(text, readsubtree)
721 self.parse(text, readsubtree)
722 self._dirty = True # Mark flat manifest dirty after parsing
722 self._dirty = True # Mark flat manifest dirty after parsing
723
723
724 def _subpath(self, path):
724 def _subpath(self, path):
725 return self._dir + path
725 return self._dir + path
726
726
727 def __len__(self):
727 def __len__(self):
728 self._load()
728 self._load()
729 size = len(self._files)
729 size = len(self._files)
730 for m in self._dirs.values():
730 for m in self._dirs.values():
731 size += m.__len__()
731 size += m.__len__()
732 return size
732 return size
733
733
734 def _isempty(self):
734 def _isempty(self):
735 self._load() # for consistency; already loaded by all callers
735 self._load() # for consistency; already loaded by all callers
736 return (not self._files and (not self._dirs or
736 return (not self._files and (not self._dirs or
737 all(m._isempty() for m in self._dirs.values())))
737 all(m._isempty() for m in self._dirs.values())))
738
738
739 def __repr__(self):
739 def __repr__(self):
740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
741 (self._dir, revlog.hex(self._node),
741 (self._dir, revlog.hex(self._node),
742 bool(self._loadfunc is _noop),
742 bool(self._loadfunc is _noop),
743 self._dirty, id(self)))
743 self._dirty, id(self)))
744
744
745 def dir(self):
745 def dir(self):
746 '''The directory that this tree manifest represents, including a
746 '''The directory that this tree manifest represents, including a
747 trailing '/'. Empty string for the repo root directory.'''
747 trailing '/'. Empty string for the repo root directory.'''
748 return self._dir
748 return self._dir
749
749
750 def node(self):
750 def node(self):
751 '''This node of this instance. nullid for unsaved instances. Should
751 '''This node of this instance. nullid for unsaved instances. Should
752 be updated when the instance is read or written from a revlog.
752 be updated when the instance is read or written from a revlog.
753 '''
753 '''
754 assert not self._dirty
754 assert not self._dirty
755 return self._node
755 return self._node
756
756
757 def setnode(self, node):
757 def setnode(self, node):
758 self._node = node
758 self._node = node
759 self._dirty = False
759 self._dirty = False
760
760
761 def iterentries(self):
761 def iterentries(self):
762 self._load()
762 self._load()
763 for p, n in sorted(self._dirs.items() + self._files.items()):
763 for p, n in sorted(self._dirs.items() + self._files.items()):
764 if p in self._files:
764 if p in self._files:
765 yield self._subpath(p), n, self._flags.get(p, '')
765 yield self._subpath(p), n, self._flags.get(p, '')
766 else:
766 else:
767 for x in n.iterentries():
767 for x in n.iterentries():
768 yield x
768 yield x
769
769
770 def iteritems(self):
770 def iteritems(self):
771 self._load()
771 self._load()
772 for p, n in sorted(self._dirs.items() + self._files.items()):
772 for p, n in sorted(self._dirs.items() + self._files.items()):
773 if p in self._files:
773 if p in self._files:
774 yield self._subpath(p), n
774 yield self._subpath(p), n
775 else:
775 else:
776 for f, sn in n.iteritems():
776 for f, sn in n.iteritems():
777 yield f, sn
777 yield f, sn
778
778
779 def iterkeys(self):
779 def iterkeys(self):
780 self._load()
780 self._load()
781 for p in sorted(self._dirs.keys() + self._files.keys()):
781 for p in sorted(self._dirs.keys() + self._files.keys()):
782 if p in self._files:
782 if p in self._files:
783 yield self._subpath(p)
783 yield self._subpath(p)
784 else:
784 else:
785 for f in self._dirs[p].iterkeys():
785 for f in self._dirs[p].iterkeys():
786 yield f
786 yield f
787
787
788 def keys(self):
788 def keys(self):
789 return list(self.iterkeys())
789 return list(self.iterkeys())
790
790
791 def __iter__(self):
791 def __iter__(self):
792 return self.iterkeys()
792 return self.iterkeys()
793
793
794 def __contains__(self, f):
794 def __contains__(self, f):
795 if f is None:
795 if f is None:
796 return False
796 return False
797 self._load()
797 self._load()
798 dir, subpath = _splittopdir(f)
798 dir, subpath = _splittopdir(f)
799 if dir:
799 if dir:
800 if dir not in self._dirs:
800 if dir not in self._dirs:
801 return False
801 return False
802 return self._dirs[dir].__contains__(subpath)
802 return self._dirs[dir].__contains__(subpath)
803 else:
803 else:
804 return f in self._files
804 return f in self._files
805
805
806 def get(self, f, default=None):
806 def get(self, f, default=None):
807 self._load()
807 self._load()
808 dir, subpath = _splittopdir(f)
808 dir, subpath = _splittopdir(f)
809 if dir:
809 if dir:
810 if dir not in self._dirs:
810 if dir not in self._dirs:
811 return default
811 return default
812 return self._dirs[dir].get(subpath, default)
812 return self._dirs[dir].get(subpath, default)
813 else:
813 else:
814 return self._files.get(f, default)
814 return self._files.get(f, default)
815
815
816 def __getitem__(self, f):
816 def __getitem__(self, f):
817 self._load()
817 self._load()
818 dir, subpath = _splittopdir(f)
818 dir, subpath = _splittopdir(f)
819 if dir:
819 if dir:
820 return self._dirs[dir].__getitem__(subpath)
820 return self._dirs[dir].__getitem__(subpath)
821 else:
821 else:
822 return self._files[f]
822 return self._files[f]
823
823
824 def flags(self, f):
824 def flags(self, f):
825 self._load()
825 self._load()
826 dir, subpath = _splittopdir(f)
826 dir, subpath = _splittopdir(f)
827 if dir:
827 if dir:
828 if dir not in self._dirs:
828 if dir not in self._dirs:
829 return ''
829 return ''
830 return self._dirs[dir].flags(subpath)
830 return self._dirs[dir].flags(subpath)
831 else:
831 else:
832 if f in self._dirs:
832 if f in self._dirs:
833 return ''
833 return ''
834 return self._flags.get(f, '')
834 return self._flags.get(f, '')
835
835
836 def find(self, f):
836 def find(self, f):
837 self._load()
837 self._load()
838 dir, subpath = _splittopdir(f)
838 dir, subpath = _splittopdir(f)
839 if dir:
839 if dir:
840 return self._dirs[dir].find(subpath)
840 return self._dirs[dir].find(subpath)
841 else:
841 else:
842 return self._files[f], self._flags.get(f, '')
842 return self._files[f], self._flags.get(f, '')
843
843
844 def __delitem__(self, f):
844 def __delitem__(self, f):
845 self._load()
845 self._load()
846 dir, subpath = _splittopdir(f)
846 dir, subpath = _splittopdir(f)
847 if dir:
847 if dir:
848 self._dirs[dir].__delitem__(subpath)
848 self._dirs[dir].__delitem__(subpath)
849 # If the directory is now empty, remove it
849 # If the directory is now empty, remove it
850 if self._dirs[dir]._isempty():
850 if self._dirs[dir]._isempty():
851 del self._dirs[dir]
851 del self._dirs[dir]
852 else:
852 else:
853 del self._files[f]
853 del self._files[f]
854 if f in self._flags:
854 if f in self._flags:
855 del self._flags[f]
855 del self._flags[f]
856 self._dirty = True
856 self._dirty = True
857
857
858 def __setitem__(self, f, n):
858 def __setitem__(self, f, n):
859 assert n is not None
859 assert n is not None
860 self._load()
860 self._load()
861 dir, subpath = _splittopdir(f)
861 dir, subpath = _splittopdir(f)
862 if dir:
862 if dir:
863 if dir not in self._dirs:
863 if dir not in self._dirs:
864 self._dirs[dir] = treemanifest(self._subpath(dir))
864 self._dirs[dir] = treemanifest(self._subpath(dir))
865 self._dirs[dir].__setitem__(subpath, n)
865 self._dirs[dir].__setitem__(subpath, n)
866 else:
866 else:
867 self._files[f] = n[:21] # to match manifestdict's behavior
867 self._files[f] = n[:21] # to match manifestdict's behavior
868 self._dirty = True
868 self._dirty = True
869
869
870 def _load(self):
870 def _load(self):
871 if self._loadfunc is not _noop:
871 if self._loadfunc is not _noop:
872 lf, self._loadfunc = self._loadfunc, _noop
872 lf, self._loadfunc = self._loadfunc, _noop
873 lf(self)
873 lf(self)
874 elif self._copyfunc is not _noop:
874 elif self._copyfunc is not _noop:
875 cf, self._copyfunc = self._copyfunc, _noop
875 cf, self._copyfunc = self._copyfunc, _noop
876 cf(self)
876 cf(self)
877
877
878 def setflag(self, f, flags):
878 def setflag(self, f, flags):
879 """Set the flags (symlink, executable) for path f."""
879 """Set the flags (symlink, executable) for path f."""
880 self._load()
880 self._load()
881 dir, subpath = _splittopdir(f)
881 dir, subpath = _splittopdir(f)
882 if dir:
882 if dir:
883 if dir not in self._dirs:
883 if dir not in self._dirs:
884 self._dirs[dir] = treemanifest(self._subpath(dir))
884 self._dirs[dir] = treemanifest(self._subpath(dir))
885 self._dirs[dir].setflag(subpath, flags)
885 self._dirs[dir].setflag(subpath, flags)
886 else:
886 else:
887 self._flags[f] = flags
887 self._flags[f] = flags
888 self._dirty = True
888 self._dirty = True
889
889
890 def copy(self):
890 def copy(self):
891 copy = treemanifest(self._dir)
891 copy = treemanifest(self._dir)
892 copy._node = self._node
892 copy._node = self._node
893 copy._dirty = self._dirty
893 copy._dirty = self._dirty
894 if self._copyfunc is _noop:
894 if self._copyfunc is _noop:
895 def _copyfunc(s):
895 def _copyfunc(s):
896 self._load()
896 self._load()
897 for d in self._dirs:
897 for d in self._dirs:
898 s._dirs[d] = self._dirs[d].copy()
898 s._dirs[d] = self._dirs[d].copy()
899 s._files = dict.copy(self._files)
899 s._files = dict.copy(self._files)
900 s._flags = dict.copy(self._flags)
900 s._flags = dict.copy(self._flags)
901 if self._loadfunc is _noop:
901 if self._loadfunc is _noop:
902 _copyfunc(copy)
902 _copyfunc(copy)
903 else:
903 else:
904 copy._copyfunc = _copyfunc
904 copy._copyfunc = _copyfunc
905 else:
905 else:
906 copy._copyfunc = self._copyfunc
906 copy._copyfunc = self._copyfunc
907 return copy
907 return copy
908
908
909 def filesnotin(self, m2):
909 def filesnotin(self, m2):
910 '''Set of files in this manifest that are not in the other'''
910 '''Set of files in this manifest that are not in the other'''
911 files = set()
911 files = set()
912 def _filesnotin(t1, t2):
912 def _filesnotin(t1, t2):
913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
914 return
914 return
915 t1._load()
915 t1._load()
916 t2._load()
916 t2._load()
917 for d, m1 in t1._dirs.iteritems():
917 for d, m1 in t1._dirs.iteritems():
918 if d in t2._dirs:
918 if d in t2._dirs:
919 m2 = t2._dirs[d]
919 m2 = t2._dirs[d]
920 _filesnotin(m1, m2)
920 _filesnotin(m1, m2)
921 else:
921 else:
922 files.update(m1.iterkeys())
922 files.update(m1.iterkeys())
923
923
924 for fn in t1._files.iterkeys():
924 for fn in t1._files.iterkeys():
925 if fn not in t2._files:
925 if fn not in t2._files:
926 files.add(t1._subpath(fn))
926 files.add(t1._subpath(fn))
927
927
928 _filesnotin(self, m2)
928 _filesnotin(self, m2)
929 return files
929 return files
930
930
931 @propertycache
931 @propertycache
932 def _alldirs(self):
932 def _alldirs(self):
933 return util.dirs(self)
933 return util.dirs(self)
934
934
935 def dirs(self):
935 def dirs(self):
936 return self._alldirs
936 return self._alldirs
937
937
938 def hasdir(self, dir):
938 def hasdir(self, dir):
939 self._load()
939 self._load()
940 topdir, subdir = _splittopdir(dir)
940 topdir, subdir = _splittopdir(dir)
941 if topdir:
941 if topdir:
942 if topdir in self._dirs:
942 if topdir in self._dirs:
943 return self._dirs[topdir].hasdir(subdir)
943 return self._dirs[topdir].hasdir(subdir)
944 return False
944 return False
945 return (dir + '/') in self._dirs
945 return (dir + '/') in self._dirs
946
946
947 def walk(self, match):
947 def walk(self, match):
948 '''Generates matching file names.
948 '''Generates matching file names.
949
949
950 Equivalent to manifest.matches(match).iterkeys(), but without creating
950 Equivalent to manifest.matches(match).iterkeys(), but without creating
951 an entirely new manifest.
951 an entirely new manifest.
952
952
953 It also reports nonexistent files by marking them bad with match.bad().
953 It also reports nonexistent files by marking them bad with match.bad().
954 '''
954 '''
955 if match.always():
955 if match.always():
956 for f in iter(self):
956 for f in iter(self):
957 yield f
957 yield f
958 return
958 return
959
959
960 fset = set(match.files())
960 fset = set(match.files())
961
961
962 for fn in self._walk(match):
962 for fn in self._walk(match):
963 if fn in fset:
963 if fn in fset:
964 # specified pattern is the exact name
964 # specified pattern is the exact name
965 fset.remove(fn)
965 fset.remove(fn)
966 yield fn
966 yield fn
967
967
968 # for dirstate.walk, files=['.'] means "walk the whole tree".
968 # for dirstate.walk, files=['.'] means "walk the whole tree".
969 # follow that here, too
969 # follow that here, too
970 fset.discard('.')
970 fset.discard('.')
971
971
972 for fn in sorted(fset):
972 for fn in sorted(fset):
973 if not self.hasdir(fn):
973 if not self.hasdir(fn):
974 match.bad(fn, None)
974 match.bad(fn, None)
975
975
976 def _walk(self, match):
976 def _walk(self, match):
977 '''Recursively generates matching file names for walk().'''
977 '''Recursively generates matching file names for walk().'''
978 if not match.visitdir(self._dir[:-1] or '.'):
978 if not match.visitdir(self._dir[:-1] or '.'):
979 return
979 return
980
980
981 # yield this dir's files and walk its submanifests
981 # yield this dir's files and walk its submanifests
982 self._load()
982 self._load()
983 for p in sorted(self._dirs.keys() + self._files.keys()):
983 for p in sorted(self._dirs.keys() + self._files.keys()):
984 if p in self._files:
984 if p in self._files:
985 fullp = self._subpath(p)
985 fullp = self._subpath(p)
986 if match(fullp):
986 if match(fullp):
987 yield fullp
987 yield fullp
988 else:
988 else:
989 for f in self._dirs[p]._walk(match):
989 for f in self._dirs[p]._walk(match):
990 yield f
990 yield f
991
991
992 def matches(self, match):
992 def matches(self, match):
993 '''generate a new manifest filtered by the match argument'''
993 '''generate a new manifest filtered by the match argument'''
994 if match.always():
994 if match.always():
995 return self.copy()
995 return self.copy()
996
996
997 return self._matches(match)
997 return self._matches(match)
998
998
999 def _matches(self, match):
999 def _matches(self, match):
1000 '''recursively generate a new manifest filtered by the match argument.
1000 '''recursively generate a new manifest filtered by the match argument.
1001 '''
1001 '''
1002
1002
1003 visit = match.visitdir(self._dir[:-1] or '.')
1003 visit = match.visitdir(self._dir[:-1] or '.')
1004 if visit == 'all':
1004 if visit == 'all':
1005 return self.copy()
1005 return self.copy()
1006 ret = treemanifest(self._dir)
1006 ret = treemanifest(self._dir)
1007 if not visit:
1007 if not visit:
1008 return ret
1008 return ret
1009
1009
1010 self._load()
1010 self._load()
1011 for fn in self._files:
1011 for fn in self._files:
1012 fullp = self._subpath(fn)
1012 fullp = self._subpath(fn)
1013 if not match(fullp):
1013 if not match(fullp):
1014 continue
1014 continue
1015 ret._files[fn] = self._files[fn]
1015 ret._files[fn] = self._files[fn]
1016 if fn in self._flags:
1016 if fn in self._flags:
1017 ret._flags[fn] = self._flags[fn]
1017 ret._flags[fn] = self._flags[fn]
1018
1018
1019 for dir, subm in self._dirs.iteritems():
1019 for dir, subm in self._dirs.iteritems():
1020 m = subm._matches(match)
1020 m = subm._matches(match)
1021 if not m._isempty():
1021 if not m._isempty():
1022 ret._dirs[dir] = m
1022 ret._dirs[dir] = m
1023
1023
1024 if not ret._isempty():
1024 if not ret._isempty():
1025 ret._dirty = True
1025 ret._dirty = True
1026 return ret
1026 return ret
1027
1027
1028 def diff(self, m2, clean=False):
1028 def diff(self, m2, clean=False):
1029 '''Finds changes between the current manifest and m2.
1029 '''Finds changes between the current manifest and m2.
1030
1030
1031 Args:
1031 Args:
1032 m2: the manifest to which this manifest should be compared.
1032 m2: the manifest to which this manifest should be compared.
1033 clean: if true, include files unchanged between these manifests
1033 clean: if true, include files unchanged between these manifests
1034 with a None value in the returned dictionary.
1034 with a None value in the returned dictionary.
1035
1035
1036 The result is returned as a dict with filename as key and
1036 The result is returned as a dict with filename as key and
1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1039 in the current/other manifest. Where the file does not exist,
1039 in the current/other manifest. Where the file does not exist,
1040 the nodeid will be None and the flags will be the empty
1040 the nodeid will be None and the flags will be the empty
1041 string.
1041 string.
1042 '''
1042 '''
1043 result = {}
1043 result = {}
1044 emptytree = treemanifest()
1044 emptytree = treemanifest()
1045 def _diff(t1, t2):
1045 def _diff(t1, t2):
1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1047 return
1047 return
1048 t1._load()
1048 t1._load()
1049 t2._load()
1049 t2._load()
1050 for d, m1 in t1._dirs.iteritems():
1050 for d, m1 in t1._dirs.iteritems():
1051 m2 = t2._dirs.get(d, emptytree)
1051 m2 = t2._dirs.get(d, emptytree)
1052 _diff(m1, m2)
1052 _diff(m1, m2)
1053
1053
1054 for d, m2 in t2._dirs.iteritems():
1054 for d, m2 in t2._dirs.iteritems():
1055 if d not in t1._dirs:
1055 if d not in t1._dirs:
1056 _diff(emptytree, m2)
1056 _diff(emptytree, m2)
1057
1057
1058 for fn, n1 in t1._files.iteritems():
1058 for fn, n1 in t1._files.iteritems():
1059 fl1 = t1._flags.get(fn, '')
1059 fl1 = t1._flags.get(fn, '')
1060 n2 = t2._files.get(fn, None)
1060 n2 = t2._files.get(fn, None)
1061 fl2 = t2._flags.get(fn, '')
1061 fl2 = t2._flags.get(fn, '')
1062 if n1 != n2 or fl1 != fl2:
1062 if n1 != n2 or fl1 != fl2:
1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1064 elif clean:
1064 elif clean:
1065 result[t1._subpath(fn)] = None
1065 result[t1._subpath(fn)] = None
1066
1066
1067 for fn, n2 in t2._files.iteritems():
1067 for fn, n2 in t2._files.iteritems():
1068 if fn not in t1._files:
1068 if fn not in t1._files:
1069 fl2 = t2._flags.get(fn, '')
1069 fl2 = t2._flags.get(fn, '')
1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1071
1071
1072 _diff(self, m2)
1072 _diff(self, m2)
1073 return result
1073 return result
1074
1074
1075 def unmodifiedsince(self, m2):
1075 def unmodifiedsince(self, m2):
1076 return not self._dirty and not m2._dirty and self._node == m2._node
1076 return not self._dirty and not m2._dirty and self._node == m2._node
1077
1077
1078 def parse(self, text, readsubtree):
1078 def parse(self, text, readsubtree):
1079 for f, n, fl in _parse(text):
1079 for f, n, fl in _parse(text):
1080 if fl == 't':
1080 if fl == 't':
1081 f = f + '/'
1081 f = f + '/'
1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1083 elif '/' in f:
1083 elif '/' in f:
1084 # This is a flat manifest, so use __setitem__ and setflag rather
1084 # This is a flat manifest, so use __setitem__ and setflag rather
1085 # than assigning directly to _files and _flags, so we can
1085 # than assigning directly to _files and _flags, so we can
1086 # assign a path in a subdirectory, and to mark dirty (compared
1086 # assign a path in a subdirectory, and to mark dirty (compared
1087 # to nullid).
1087 # to nullid).
1088 self[f] = n
1088 self[f] = n
1089 if fl:
1089 if fl:
1090 self.setflag(f, fl)
1090 self.setflag(f, fl)
1091 else:
1091 else:
1092 # Assigning to _files and _flags avoids marking as dirty,
1092 # Assigning to _files and _flags avoids marking as dirty,
1093 # and should be a little faster.
1093 # and should be a little faster.
1094 self._files[f] = n
1094 self._files[f] = n
1095 if fl:
1095 if fl:
1096 self._flags[f] = fl
1096 self._flags[f] = fl
1097
1097
1098 def text(self, usemanifestv2=False):
1098 def text(self, usemanifestv2=False):
1099 """Get the full data of this manifest as a bytestring."""
1099 """Get the full data of this manifest as a bytestring."""
1100 self._load()
1100 self._load()
1101 return _text(self.iterentries(), usemanifestv2)
1101 return _text(self.iterentries(), usemanifestv2)
1102
1102
1103 def dirtext(self, usemanifestv2=False):
1103 def dirtext(self, usemanifestv2=False):
1104 """Get the full data of this directory as a bytestring. Make sure that
1104 """Get the full data of this directory as a bytestring. Make sure that
1105 any submanifests have been written first, so their nodeids are correct.
1105 any submanifests have been written first, so their nodeids are correct.
1106 """
1106 """
1107 self._load()
1107 self._load()
1108 flags = self.flags
1108 flags = self.flags
1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1111 return _text(sorted(dirs + files), usemanifestv2)
1111 return _text(sorted(dirs + files), usemanifestv2)
1112
1112
1113 def read(self, gettext, readsubtree):
1113 def read(self, gettext, readsubtree):
1114 def _load_for_read(s):
1114 def _load_for_read(s):
1115 s.parse(gettext(), readsubtree)
1115 s.parse(gettext(), readsubtree)
1116 s._dirty = False
1116 s._dirty = False
1117 self._loadfunc = _load_for_read
1117 self._loadfunc = _load_for_read
1118
1118
1119 def writesubtrees(self, m1, m2, writesubtree):
1119 def writesubtrees(self, m1, m2, writesubtree):
1120 self._load() # for consistency; should never have any effect here
1120 self._load() # for consistency; should never have any effect here
1121 m1._load()
1121 m1._load()
1122 m2._load()
1122 m2._load()
1123 emptytree = treemanifest()
1123 emptytree = treemanifest()
1124 for d, subm in self._dirs.iteritems():
1124 for d, subm in self._dirs.iteritems():
1125 subp1 = m1._dirs.get(d, emptytree)._node
1125 subp1 = m1._dirs.get(d, emptytree)._node
1126 subp2 = m2._dirs.get(d, emptytree)._node
1126 subp2 = m2._dirs.get(d, emptytree)._node
1127 if subp1 == revlog.nullid:
1127 if subp1 == revlog.nullid:
1128 subp1, subp2 = subp2, subp1
1128 subp1, subp2 = subp2, subp1
1129 writesubtree(subm, subp1, subp2)
1129 writesubtree(subm, subp1, subp2)
1130
1130
1131 class manifestrevlog(revlog.revlog):
1131 class manifestrevlog(revlog.revlog):
1132 '''A revlog that stores manifest texts. This is responsible for caching the
1132 '''A revlog that stores manifest texts. This is responsible for caching the
1133 full-text manifest contents.
1133 full-text manifest contents.
1134 '''
1134 '''
1135 def __init__(self, opener, dir='', dirlogcache=None):
1135 def __init__(self, opener, dir='', dirlogcache=None):
1136 # During normal operations, we expect to deal with not more than four
1136 # During normal operations, we expect to deal with not more than four
1137 # revs at a time (such as during commit --amend). When rebasing large
1137 # revs at a time (such as during commit --amend). When rebasing large
1138 # stacks of commits, the number can go up, hence the config knob below.
1138 # stacks of commits, the number can go up, hence the config knob below.
1139 cachesize = 4
1139 cachesize = 4
1140 usetreemanifest = False
1140 usetreemanifest = False
1141 usemanifestv2 = False
1141 usemanifestv2 = False
1142 opts = getattr(opener, 'options', None)
1142 opts = getattr(opener, 'options', None)
1143 if opts is not None:
1143 if opts is not None:
1144 cachesize = opts.get('manifestcachesize', cachesize)
1144 cachesize = opts.get('manifestcachesize', cachesize)
1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1147
1147
1148 self._treeondisk = usetreemanifest
1148 self._treeondisk = usetreemanifest
1149 self._usemanifestv2 = usemanifestv2
1149 self._usemanifestv2 = usemanifestv2
1150
1150
1151 self._fulltextcache = util.lrucachedict(cachesize)
1151 self._fulltextcache = util.lrucachedict(cachesize)
1152
1152
1153 indexfile = "00manifest.i"
1153 indexfile = "00manifest.i"
1154 if dir:
1154 if dir:
1155 assert self._treeondisk, 'opts is %r' % opts
1155 assert self._treeondisk, 'opts is %r' % opts
1156 if not dir.endswith('/'):
1156 if not dir.endswith('/'):
1157 dir = dir + '/'
1157 dir = dir + '/'
1158 indexfile = "meta/" + dir + "00manifest.i"
1158 indexfile = "meta/" + dir + "00manifest.i"
1159 self._dir = dir
1159 self._dir = dir
1160 # The dirlogcache is kept on the root manifest log
1160 # The dirlogcache is kept on the root manifest log
1161 if dir:
1161 if dir:
1162 self._dirlogcache = dirlogcache
1162 self._dirlogcache = dirlogcache
1163 else:
1163 else:
1164 self._dirlogcache = {'': self}
1164 self._dirlogcache = {'': self}
1165
1165
1166 super(manifestrevlog, self).__init__(opener, indexfile,
1166 super(manifestrevlog, self).__init__(opener, indexfile,
1167 checkambig=bool(dir))
1167 checkambig=bool(dir))
1168
1168
1169 @property
1169 @property
1170 def fulltextcache(self):
1170 def fulltextcache(self):
1171 return self._fulltextcache
1171 return self._fulltextcache
1172
1172
1173 def clearcaches(self):
1173 def clearcaches(self):
1174 super(manifestrevlog, self).clearcaches()
1174 super(manifestrevlog, self).clearcaches()
1175 self._fulltextcache.clear()
1175 self._fulltextcache.clear()
1176 self._dirlogcache = {'': self}
1176 self._dirlogcache = {'': self}
1177
1177
1178 def dirlog(self, dir):
1178 def dirlog(self, dir):
1179 if dir:
1179 if dir:
1180 assert self._treeondisk
1180 assert self._treeondisk
1181 if dir not in self._dirlogcache:
1181 if dir not in self._dirlogcache:
1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1183 self._dirlogcache)
1183 self._dirlogcache)
1184 return self._dirlogcache[dir]
1184 return self._dirlogcache[dir]
1185
1185
1186 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1186 def add(self, m, transaction, link, p1, p2, added, removed, readtree=None):
1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1188 and not self._usemanifestv2):
1188 and not self._usemanifestv2):
1189 # If our first parent is in the manifest cache, we can
1189 # If our first parent is in the manifest cache, we can
1190 # compute a delta here using properties we know about the
1190 # compute a delta here using properties we know about the
1191 # manifest up-front, which may save time later for the
1191 # manifest up-front, which may save time later for the
1192 # revlog layer.
1192 # revlog layer.
1193
1193
1194 _checkforbidden(added)
1194 _checkforbidden(added)
1195 # combine the changed lists into one sorted iterator
1195 # combine the changed lists into one sorted iterator
1196 work = heapq.merge([(x, False) for x in added],
1196 work = heapq.merge([(x, False) for x in added],
1197 [(x, True) for x in removed])
1197 [(x, True) for x in removed])
1198
1198
1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1200 cachedelta = self.rev(p1), deltatext
1200 cachedelta = self.rev(p1), deltatext
1201 text = util.buffer(arraytext)
1201 text = util.buffer(arraytext)
1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1203 else:
1203 else:
1204 # The first parent manifest isn't already loaded, so we'll
1204 # The first parent manifest isn't already loaded, so we'll
1205 # just encode a fulltext of the manifest and pass that
1205 # just encode a fulltext of the manifest and pass that
1206 # through to the revlog layer, and let it handle the delta
1206 # through to the revlog layer, and let it handle the delta
1207 # process.
1207 # process.
1208 if self._treeondisk:
1208 if self._treeondisk:
1209 assert readtree, "readtree must be set for treemanifest writes"
1209 assert readtree, "readtree must be set for treemanifest writes"
1210 m1 = readtree(self._dir, p1)
1210 m1 = readtree(self._dir, p1)
1211 m2 = readtree(self._dir, p2)
1211 m2 = readtree(self._dir, p2)
1212 n = self._addtree(m, transaction, link, m1, m2, readtree)
1212 n = self._addtree(m, transaction, link, m1, m2, readtree)
1213 arraytext = None
1213 arraytext = None
1214 else:
1214 else:
1215 text = m.text(self._usemanifestv2)
1215 text = m.text(self._usemanifestv2)
1216 n = self.addrevision(text, transaction, link, p1, p2)
1216 n = self.addrevision(text, transaction, link, p1, p2)
1217 arraytext = array.array('c', text)
1217 arraytext = array.array('c', text)
1218
1218
1219 if arraytext is not None:
1219 if arraytext is not None:
1220 self.fulltextcache[n] = arraytext
1220 self.fulltextcache[n] = arraytext
1221
1221
1222 return n
1222 return n
1223
1223
1224 def _addtree(self, m, transaction, link, m1, m2, readtree):
1224 def _addtree(self, m, transaction, link, m1, m2, readtree):
1225 # If the manifest is unchanged compared to one parent,
1225 # If the manifest is unchanged compared to one parent,
1226 # don't write a new revision
1226 # don't write a new revision
1227 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1227 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1228 return m.node()
1228 return m.node()
1229 def writesubtree(subm, subp1, subp2):
1229 def writesubtree(subm, subp1, subp2):
1230 sublog = self.dirlog(subm.dir())
1230 sublog = self.dirlog(subm.dir())
1231 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1231 sublog.add(subm, transaction, link, subp1, subp2, None, None,
1232 readtree=readtree)
1232 readtree=readtree)
1233 m.writesubtrees(m1, m2, writesubtree)
1233 m.writesubtrees(m1, m2, writesubtree)
1234 text = m.dirtext(self._usemanifestv2)
1234 text = m.dirtext(self._usemanifestv2)
1235 # Double-check whether contents are unchanged to one parent
1235 # Double-check whether contents are unchanged to one parent
1236 if text == m1.dirtext(self._usemanifestv2):
1236 if text == m1.dirtext(self._usemanifestv2):
1237 n = m1.node()
1237 n = m1.node()
1238 elif text == m2.dirtext(self._usemanifestv2):
1238 elif text == m2.dirtext(self._usemanifestv2):
1239 n = m2.node()
1239 n = m2.node()
1240 else:
1240 else:
1241 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1241 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1242 # Save nodeid so parent manifest can calculate its nodeid
1242 # Save nodeid so parent manifest can calculate its nodeid
1243 m.setnode(n)
1243 m.setnode(n)
1244 return n
1244 return n
1245
1245
1246 class manifestlog(object):
1246 class manifestlog(object):
1247 """A collection class representing the collection of manifest snapshots
1247 """A collection class representing the collection of manifest snapshots
1248 referenced by commits in the repository.
1248 referenced by commits in the repository.
1249
1249
1250 In this situation, 'manifest' refers to the abstract concept of a snapshot
1250 In this situation, 'manifest' refers to the abstract concept of a snapshot
1251 of the list of files in the given commit. Consumers of the output of this
1251 of the list of files in the given commit. Consumers of the output of this
1252 class do not care about the implementation details of the actual manifests
1252 class do not care about the implementation details of the actual manifests
1253 they receive (i.e. tree or flat or lazily loaded, etc)."""
1253 they receive (i.e. tree or flat or lazily loaded, etc)."""
1254 def __init__(self, opener, repo):
1254 def __init__(self, opener, repo):
1255 self._repo = repo
1255 self._repo = repo
1256
1256
1257 usetreemanifest = False
1257 usetreemanifest = False
1258
1258
1259 opts = getattr(opener, 'options', None)
1259 opts = getattr(opener, 'options', None)
1260 if opts is not None:
1260 if opts is not None:
1261 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1261 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1262 self._treeinmem = usetreemanifest
1262 self._treeinmem = usetreemanifest
1263
1263
1264 self._oldmanifest = repo._constructmanifest()
1264 self._oldmanifest = repo._constructmanifest()
1265 self._revlog = self._oldmanifest
1265 self._revlog = self._oldmanifest
1266
1266
1267 # A cache of the manifestctx or treemanifestctx for each directory
1267 # A cache of the manifestctx or treemanifestctx for each directory
1268 self._dirmancache = {}
1268 self._dirmancache = {}
1269
1269
1270 # We'll separate this into it's own cache once oldmanifest is no longer
1270 # We'll separate this into it's own cache once oldmanifest is no longer
1271 # used
1271 # used
1272 self._mancache = self._oldmanifest._mancache
1272 self._mancache = self._oldmanifest._mancache
1273 self._dirmancache[''] = self._mancache
1273 self._dirmancache[''] = self._mancache
1274
1274
1275 # A future patch makes this use the same config value as the existing
1275 # A future patch makes this use the same config value as the existing
1276 # mancache
1276 # mancache
1277 self.cachesize = 4
1277 self.cachesize = 4
1278
1278
1279 def __getitem__(self, node):
1279 def __getitem__(self, node):
1280 """Retrieves the manifest instance for the given node. Throws a
1280 """Retrieves the manifest instance for the given node. Throws a
1281 LookupError if not found.
1281 LookupError if not found.
1282 """
1282 """
1283 return self.get('', node)
1283 return self.get('', node)
1284
1284
1285 def get(self, dir, node):
1285 def get(self, dir, node):
1286 """Retrieves the manifest instance for the given node. Throws a
1286 """Retrieves the manifest instance for the given node. Throws a
1287 LookupError if not found.
1287 LookupError if not found.
1288 """
1288 """
1289 if node in self._dirmancache.get(dir, ()):
1289 if node in self._dirmancache.get(dir, ()):
1290 cachemf = self._dirmancache[dir][node]
1290 cachemf = self._dirmancache[dir][node]
1291 # The old manifest may put non-ctx manifests in the cache, so
1291 # The old manifest may put non-ctx manifests in the cache, so
1292 # skip those since they don't implement the full api.
1292 # skip those since they don't implement the full api.
1293 if (isinstance(cachemf, manifestctx) or
1293 if (isinstance(cachemf, manifestctx) or
1294 isinstance(cachemf, treemanifestctx)):
1294 isinstance(cachemf, treemanifestctx)):
1295 return cachemf
1295 return cachemf
1296
1296
1297 if dir:
1297 if dir:
1298 if self._revlog._treeondisk:
1298 if self._revlog._treeondisk:
1299 dirlog = self._revlog.dirlog(dir)
1299 dirlog = self._revlog.dirlog(dir)
1300 if node not in dirlog.nodemap:
1300 if node not in dirlog.nodemap:
1301 raise LookupError(node, dirlog.indexfile,
1301 raise LookupError(node, dirlog.indexfile,
1302 _('no node'))
1302 _('no node'))
1303 m = treemanifestctx(self._repo, dir, node)
1303 m = treemanifestctx(self._repo, dir, node)
1304 else:
1304 else:
1305 raise error.Abort(
1305 raise error.Abort(
1306 _("cannot ask for manifest directory '%s' in a flat "
1306 _("cannot ask for manifest directory '%s' in a flat "
1307 "manifest") % dir)
1307 "manifest") % dir)
1308 else:
1308 else:
1309 if node not in self._revlog.nodemap:
1309 if node not in self._revlog.nodemap:
1310 raise LookupError(node, self._revlog.indexfile,
1310 raise LookupError(node, self._revlog.indexfile,
1311 _('no node'))
1311 _('no node'))
1312 if self._treeinmem:
1312 if self._treeinmem:
1313 m = treemanifestctx(self._repo, '', node)
1313 m = treemanifestctx(self._repo, '', node)
1314 else:
1314 else:
1315 m = manifestctx(self._repo, node)
1315 m = manifestctx(self._repo, node)
1316
1316
1317 if node != revlog.nullid:
1317 if node != revlog.nullid:
1318 mancache = self._dirmancache.get(dir)
1318 mancache = self._dirmancache.get(dir)
1319 if not mancache:
1319 if not mancache:
1320 mancache = util.lrucachedict(self.cachesize)
1320 mancache = util.lrucachedict(self.cachesize)
1321 self._dirmancache[dir] = mancache
1321 self._dirmancache[dir] = mancache
1322 mancache[node] = m
1322 mancache[node] = m
1323 return m
1323 return m
1324
1324
1325 def clearcaches(self):
1325 def clearcaches(self):
1326 self._dirmancache.clear()
1326 self._dirmancache.clear()
1327 self._revlog.clearcaches()
1327 self._revlog.clearcaches()
1328
1328
1329 class memmanifestctx(object):
1329 class memmanifestctx(object):
1330 def __init__(self, repo):
1330 def __init__(self, repo):
1331 self._repo = repo
1331 self._repo = repo
1332 self._manifestdict = manifestdict()
1332 self._manifestdict = manifestdict()
1333
1333
1334 def _revlog(self):
1334 def _revlog(self):
1335 return self._repo.manifestlog._revlog
1335 return self._repo.manifestlog._revlog
1336
1336
1337 def new(self):
1337 def new(self):
1338 return memmanifestctx(self._repo)
1338 return memmanifestctx(self._repo)
1339
1339
1340 def copy(self):
1340 def copy(self):
1341 memmf = memmanifestctx(self._repo)
1341 memmf = memmanifestctx(self._repo)
1342 memmf._manifestdict = self.read().copy()
1342 memmf._manifestdict = self.read().copy()
1343 return memmf
1343 return memmf
1344
1344
1345 def read(self):
1345 def read(self):
1346 return self._manifestdict
1346 return self._manifestdict
1347
1347
1348 def write(self, transaction, link, p1, p2, added, removed):
1348 def write(self, transaction, link, p1, p2, added, removed):
1349 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1349 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1350 added, removed)
1350 added, removed)
1351
1351
1352 class manifestctx(object):
1352 class manifestctx(object):
1353 """A class representing a single revision of a manifest, including its
1353 """A class representing a single revision of a manifest, including its
1354 contents, its parent revs, and its linkrev.
1354 contents, its parent revs, and its linkrev.
1355 """
1355 """
1356 def __init__(self, repo, node):
1356 def __init__(self, repo, node):
1357 self._repo = repo
1357 self._repo = repo
1358 self._data = None
1358 self._data = None
1359
1359
1360 self._node = node
1360 self._node = node
1361
1361
1362 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1362 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1363 # but let's add it later when something needs it and we can load it
1363 # but let's add it later when something needs it and we can load it
1364 # lazily.
1364 # lazily.
1365 #self.p1, self.p2 = revlog.parents(node)
1365 #self.p1, self.p2 = revlog.parents(node)
1366 #rev = revlog.rev(node)
1366 #rev = revlog.rev(node)
1367 #self.linkrev = revlog.linkrev(rev)
1367 #self.linkrev = revlog.linkrev(rev)
1368
1368
1369 def _revlog(self):
1369 def _revlog(self):
1370 return self._repo.manifestlog._revlog
1370 return self._repo.manifestlog._revlog
1371
1371
1372 def node(self):
1372 def node(self):
1373 return self._node
1373 return self._node
1374
1374
1375 def new(self):
1375 def new(self):
1376 return memmanifestctx(self._repo)
1376 return memmanifestctx(self._repo)
1377
1377
1378 def copy(self):
1378 def copy(self):
1379 memmf = memmanifestctx(self._repo)
1379 memmf = memmanifestctx(self._repo)
1380 memmf._manifestdict = self.read().copy()
1380 memmf._manifestdict = self.read().copy()
1381 return memmf
1381 return memmf
1382
1382
1383 def read(self):
1383 def read(self):
1384 if not self._data:
1384 if not self._data:
1385 if self._node == revlog.nullid:
1385 if self._node == revlog.nullid:
1386 self._data = manifestdict()
1386 self._data = manifestdict()
1387 else:
1387 else:
1388 rl = self._revlog()
1388 rl = self._revlog()
1389 text = rl.revision(self._node)
1389 text = rl.revision(self._node)
1390 arraytext = array.array('c', text)
1390 arraytext = array.array('c', text)
1391 rl._fulltextcache[self._node] = arraytext
1391 rl._fulltextcache[self._node] = arraytext
1392 self._data = manifestdict(text)
1392 self._data = manifestdict(text)
1393 return self._data
1393 return self._data
1394
1394
1395 def readfast(self, shallow=False):
1395 def readfast(self, shallow=False):
1396 '''Calls either readdelta or read, based on which would be less work.
1396 '''Calls either readdelta or read, based on which would be less work.
1397 readdelta is called if the delta is against the p1, and therefore can be
1397 readdelta is called if the delta is against the p1, and therefore can be
1398 read quickly.
1398 read quickly.
1399
1399
1400 If `shallow` is True, nothing changes since this is a flat manifest.
1400 If `shallow` is True, nothing changes since this is a flat manifest.
1401 '''
1401 '''
1402 rl = self._revlog()
1402 rl = self._revlog()
1403 r = rl.rev(self._node)
1403 r = rl.rev(self._node)
1404 deltaparent = rl.deltaparent(r)
1404 deltaparent = rl.deltaparent(r)
1405 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1405 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1406 return self.readdelta()
1406 return self.readdelta()
1407 return self.read()
1407 return self.read()
1408
1408
1409 def readdelta(self, shallow=False):
1409 def readdelta(self, shallow=False):
1410 '''Returns a manifest containing just the entries that are present
1410 '''Returns a manifest containing just the entries that are present
1411 in this manifest, but not in its p1 manifest. This is efficient to read
1411 in this manifest, but not in its p1 manifest. This is efficient to read
1412 if the revlog delta is already p1.
1412 if the revlog delta is already p1.
1413
1413
1414 Changing the value of `shallow` has no effect on flat manifests.
1414 Changing the value of `shallow` has no effect on flat manifests.
1415 '''
1415 '''
1416 revlog = self._revlog()
1416 revlog = self._revlog()
1417 if revlog._usemanifestv2:
1417 if revlog._usemanifestv2:
1418 # Need to perform a slow delta
1418 # Need to perform a slow delta
1419 r0 = revlog.deltaparent(revlog.rev(self._node))
1419 r0 = revlog.deltaparent(revlog.rev(self._node))
1420 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1420 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1421 m1 = self.read()
1421 m1 = self.read()
1422 md = manifestdict()
1422 md = manifestdict()
1423 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1423 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1424 if n1:
1424 if n1:
1425 md[f] = n1
1425 md[f] = n1
1426 if fl1:
1426 if fl1:
1427 md.setflag(f, fl1)
1427 md.setflag(f, fl1)
1428 return md
1428 return md
1429
1429
1430 r = revlog.rev(self._node)
1430 r = revlog.rev(self._node)
1431 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1431 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1432 return manifestdict(d)
1432 return manifestdict(d)
1433
1433
1434 def find(self, key):
1434 def find(self, key):
1435 return self.read().find(key)
1435 return self.read().find(key)
1436
1436
1437 class memtreemanifestctx(object):
1437 class memtreemanifestctx(object):
1438 def __init__(self, repo, dir=''):
1438 def __init__(self, repo, dir=''):
1439 self._repo = repo
1439 self._repo = repo
1440 self._dir = dir
1440 self._dir = dir
1441 self._treemanifest = treemanifest()
1441 self._treemanifest = treemanifest()
1442
1442
1443 def _revlog(self):
1443 def _revlog(self):
1444 return self._repo.manifestlog._revlog
1444 return self._repo.manifestlog._revlog
1445
1445
1446 def new(self, dir=''):
1446 def new(self, dir=''):
1447 return memtreemanifestctx(self._repo, dir=dir)
1447 return memtreemanifestctx(self._repo, dir=dir)
1448
1448
1449 def copy(self):
1449 def copy(self):
1450 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1450 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1451 memmf._treemanifest = self._treemanifest.copy()
1451 memmf._treemanifest = self._treemanifest.copy()
1452 return memmf
1452 return memmf
1453
1453
1454 def read(self):
1454 def read(self):
1455 return self._treemanifest
1455 return self._treemanifest
1456
1456
1457 def write(self, transaction, link, p1, p2, added, removed):
1457 def write(self, transaction, link, p1, p2, added, removed):
1458 def readtree(dir, node):
1458 def readtree(dir, node):
1459 return self._repo.manifestlog.get(dir, node).read()
1459 return self._repo.manifestlog.get(dir, node).read()
1460 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1460 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1461 added, removed, readtree=readtree)
1461 added, removed, readtree=readtree)
1462
1462
1463 class treemanifestctx(object):
1463 class treemanifestctx(object):
1464 def __init__(self, repo, dir, node):
1464 def __init__(self, repo, dir, node):
1465 self._repo = repo
1465 self._repo = repo
1466 self._dir = dir
1466 self._dir = dir
1467 self._data = None
1467 self._data = None
1468
1468
1469 self._node = node
1469 self._node = node
1470
1470
1471 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1471 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1472 # we can instantiate treemanifestctx objects for directories we don't
1472 # we can instantiate treemanifestctx objects for directories we don't
1473 # have on disk.
1473 # have on disk.
1474 #self.p1, self.p2 = revlog.parents(node)
1474 #self.p1, self.p2 = revlog.parents(node)
1475 #rev = revlog.rev(node)
1475 #rev = revlog.rev(node)
1476 #self.linkrev = revlog.linkrev(rev)
1476 #self.linkrev = revlog.linkrev(rev)
1477
1477
1478 def _revlog(self):
1478 def _revlog(self):
1479 return self._repo.manifestlog._revlog.dirlog(self._dir)
1479 return self._repo.manifestlog._revlog.dirlog(self._dir)
1480
1480
1481 def read(self):
1481 def read(self):
1482 if not self._data:
1482 if not self._data:
1483 rl = self._revlog()
1483 rl = self._revlog()
1484 if self._node == revlog.nullid:
1484 if self._node == revlog.nullid:
1485 self._data = treemanifest()
1485 self._data = treemanifest()
1486 elif rl._treeondisk:
1486 elif rl._treeondisk:
1487 m = treemanifest(dir=self._dir)
1487 m = treemanifest(dir=self._dir)
1488 def gettext():
1488 def gettext():
1489 return rl.revision(self._node)
1489 return rl.revision(self._node)
1490 def readsubtree(dir, subm):
1490 def readsubtree(dir, subm):
1491 return treemanifestctx(self._repo, dir, subm).read()
1491 return treemanifestctx(self._repo, dir, subm).read()
1492 m.read(gettext, readsubtree)
1492 m.read(gettext, readsubtree)
1493 m.setnode(self._node)
1493 m.setnode(self._node)
1494 self._data = m
1494 self._data = m
1495 else:
1495 else:
1496 text = rl.revision(self._node)
1496 text = rl.revision(self._node)
1497 arraytext = array.array('c', text)
1497 arraytext = array.array('c', text)
1498 rl.fulltextcache[self._node] = arraytext
1498 rl.fulltextcache[self._node] = arraytext
1499 self._data = treemanifest(dir=self._dir, text=text)
1499 self._data = treemanifest(dir=self._dir, text=text)
1500
1500
1501 return self._data
1501 return self._data
1502
1502
1503 def node(self):
1503 def node(self):
1504 return self._node
1504 return self._node
1505
1505
1506 def new(self, dir=''):
1506 def new(self, dir=''):
1507 return memtreemanifestctx(self._repo, dir=dir)
1507 return memtreemanifestctx(self._repo, dir=dir)
1508
1508
1509 def copy(self):
1509 def copy(self):
1510 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1510 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1511 memmf._treemanifest = self.read().copy()
1511 memmf._treemanifest = self.read().copy()
1512 return memmf
1512 return memmf
1513
1513
1514 def readdelta(self, shallow=False):
1514 def readdelta(self, shallow=False):
1515 '''Returns a manifest containing just the entries that are present
1515 '''Returns a manifest containing just the entries that are present
1516 in this manifest, but not in its p1 manifest. This is efficient to read
1516 in this manifest, but not in its p1 manifest. This is efficient to read
1517 if the revlog delta is already p1.
1517 if the revlog delta is already p1.
1518
1518
1519 If `shallow` is True, this will read the delta for this directory,
1519 If `shallow` is True, this will read the delta for this directory,
1520 without recursively reading subdirectory manifests. Instead, any
1520 without recursively reading subdirectory manifests. Instead, any
1521 subdirectory entry will be reported as it appears in the manifest, i.e.
1521 subdirectory entry will be reported as it appears in the manifest, i.e.
1522 the subdirectory will be reported among files and distinguished only by
1522 the subdirectory will be reported among files and distinguished only by
1523 its 't' flag.
1523 its 't' flag.
1524 '''
1524 '''
1525 revlog = self._revlog()
1525 revlog = self._revlog()
1526 if shallow and not revlog._usemanifestv2:
1526 if shallow and not revlog._usemanifestv2:
1527 r = revlog.rev(self._node)
1527 r = revlog.rev(self._node)
1528 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1528 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1529 return manifestdict(d)
1529 return manifestdict(d)
1530 else:
1530 else:
1531 # Need to perform a slow delta
1531 # Need to perform a slow delta
1532 r0 = revlog.deltaparent(revlog.rev(self._node))
1532 r0 = revlog.deltaparent(revlog.rev(self._node))
1533 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1533 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1534 m1 = self.read()
1534 m1 = self.read()
1535 md = treemanifest(dir=self._dir)
1535 md = treemanifest(dir=self._dir)
1536 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1536 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1537 if n1:
1537 if n1:
1538 md[f] = n1
1538 md[f] = n1
1539 if fl1:
1539 if fl1:
1540 md.setflag(f, fl1)
1540 md.setflag(f, fl1)
1541 return md
1541 return md
1542
1542
1543 def readfast(self, shallow=False):
1543 def readfast(self, shallow=False):
1544 '''Calls either readdelta or read, based on which would be less work.
1544 '''Calls either readdelta or read, based on which would be less work.
1545 readdelta is called if the delta is against the p1, and therefore can be
1545 readdelta is called if the delta is against the p1, and therefore can be
1546 read quickly.
1546 read quickly.
1547
1547
1548 If `shallow` is True, it only returns the entries from this manifest,
1548 If `shallow` is True, it only returns the entries from this manifest,
1549 and not any submanifests.
1549 and not any submanifests.
1550 '''
1550 '''
1551 rl = self._revlog()
1551 rl = self._revlog()
1552 r = rl.rev(self._node)
1552 r = rl.rev(self._node)
1553 deltaparent = rl.deltaparent(r)
1553 deltaparent = rl.deltaparent(r)
1554 if (deltaparent != revlog.nullrev and
1554 if (deltaparent != revlog.nullrev and
1555 deltaparent in rl.parentrevs(r)):
1555 deltaparent in rl.parentrevs(r)):
1556 return self.readdelta(shallow=shallow)
1556 return self.readdelta(shallow=shallow)
1557
1557
1558 if shallow:
1558 if shallow:
1559 return manifestdict(rl.revision(self._node))
1559 return manifestdict(rl.revision(self._node))
1560 else:
1560 else:
1561 return self.read()
1561 return self.read()
1562
1562
1563 def find(self, key):
1563 def find(self, key):
1564 return self.read().find(key)
1564 return self.read().find(key)
1565
1565
1566 class manifest(manifestrevlog):
1566 class manifest(manifestrevlog):
1567 def __init__(self, opener, dir='', dirlogcache=None):
1567 def __init__(self, opener, dir='', dirlogcache=None):
1568 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1568 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1569 manifest.manifest only. External users should create a root manifest
1569 manifest.manifest only. External users should create a root manifest
1570 log with manifest.manifest(opener) and call dirlog() on it.
1570 log with manifest.manifest(opener) and call dirlog() on it.
1571 '''
1571 '''
1572 # During normal operations, we expect to deal with not more than four
1572 # During normal operations, we expect to deal with not more than four
1573 # revs at a time (such as during commit --amend). When rebasing large
1573 # revs at a time (such as during commit --amend). When rebasing large
1574 # stacks of commits, the number can go up, hence the config knob below.
1574 # stacks of commits, the number can go up, hence the config knob below.
1575 cachesize = 4
1575 cachesize = 4
1576 usetreemanifest = False
1576 usetreemanifest = False
1577 opts = getattr(opener, 'options', None)
1577 opts = getattr(opener, 'options', None)
1578 if opts is not None:
1578 if opts is not None:
1579 cachesize = opts.get('manifestcachesize', cachesize)
1579 cachesize = opts.get('manifestcachesize', cachesize)
1580 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1580 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1581 self._mancache = util.lrucachedict(cachesize)
1581 self._mancache = util.lrucachedict(cachesize)
1582 self._treeinmem = usetreemanifest
1582 self._treeinmem = usetreemanifest
1583 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1583 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1584
1585 def _newmanifest(self, data=''):
1586 if self._treeinmem:
1587 return treemanifest(self._dir, data)
1588 return manifestdict(data)
1589
1590 def dirlog(self, dir):
1591 """This overrides the base revlog implementation to allow construction
1592 'manifest' types instead of manifestrevlog types. This is only needed
1593 until we migrate off the 'manifest' type."""
1594 if dir:
1595 assert self._treeondisk
1596 if dir not in self._dirlogcache:
1597 self._dirlogcache[dir] = manifest(self.opener, dir,
1598 self._dirlogcache)
1599 return self._dirlogcache[dir]
@@ -1,362 +1,362
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import short
15 from .node import short
16 from . import (
16 from . import (
17 bundle2,
17 bundle2,
18 changegroup,
18 changegroup,
19 error,
19 error,
20 exchange,
20 exchange,
21 obsolete,
21 obsolete,
22 util,
22 util,
23 )
23 )
24
24
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
25 def _bundle(repo, bases, heads, node, suffix, compress=True):
26 """create a bundle with the specified revisions as a backup"""
26 """create a bundle with the specified revisions as a backup"""
27 cgversion = changegroup.safeversion(repo)
27 cgversion = changegroup.safeversion(repo)
28
28
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
29 cg = changegroup.changegroupsubset(repo, bases, heads, 'strip',
30 version=cgversion)
30 version=cgversion)
31 backupdir = "strip-backup"
31 backupdir = "strip-backup"
32 vfs = repo.vfs
32 vfs = repo.vfs
33 if not vfs.isdir(backupdir):
33 if not vfs.isdir(backupdir):
34 vfs.mkdir(backupdir)
34 vfs.mkdir(backupdir)
35
35
36 # Include a hash of all the nodes in the filename for uniqueness
36 # Include a hash of all the nodes in the filename for uniqueness
37 allcommits = repo.set('%ln::%ln', bases, heads)
37 allcommits = repo.set('%ln::%ln', bases, heads)
38 allhashes = sorted(c.hex() for c in allcommits)
38 allhashes = sorted(c.hex() for c in allcommits)
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
39 totalhash = hashlib.sha1(''.join(allhashes)).hexdigest()
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
40 name = "%s/%s-%s-%s.hg" % (backupdir, short(node), totalhash[:8], suffix)
41
41
42 comp = None
42 comp = None
43 if cgversion != '01':
43 if cgversion != '01':
44 bundletype = "HG20"
44 bundletype = "HG20"
45 if compress:
45 if compress:
46 comp = 'BZ'
46 comp = 'BZ'
47 elif compress:
47 elif compress:
48 bundletype = "HG10BZ"
48 bundletype = "HG10BZ"
49 else:
49 else:
50 bundletype = "HG10UN"
50 bundletype = "HG10UN"
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
51 return bundle2.writebundle(repo.ui, cg, name, bundletype, vfs,
52 compression=comp)
52 compression=comp)
53
53
54 def _collectfiles(repo, striprev):
54 def _collectfiles(repo, striprev):
55 """find out the filelogs affected by the strip"""
55 """find out the filelogs affected by the strip"""
56 files = set()
56 files = set()
57
57
58 for x in xrange(striprev, len(repo)):
58 for x in xrange(striprev, len(repo)):
59 files.update(repo[x].files())
59 files.update(repo[x].files())
60
60
61 return sorted(files)
61 return sorted(files)
62
62
63 def _collectbrokencsets(repo, files, striprev):
63 def _collectbrokencsets(repo, files, striprev):
64 """return the changesets which will be broken by the truncation"""
64 """return the changesets which will be broken by the truncation"""
65 s = set()
65 s = set()
66 def collectone(revlog):
66 def collectone(revlog):
67 _, brokenset = revlog.getstrippoint(striprev)
67 _, brokenset = revlog.getstrippoint(striprev)
68 s.update([revlog.linkrev(r) for r in brokenset])
68 s.update([revlog.linkrev(r) for r in brokenset])
69
69
70 collectone(repo.manifest)
70 collectone(repo.manifest)
71 for fname in files:
71 for fname in files:
72 collectone(repo.file(fname))
72 collectone(repo.file(fname))
73
73
74 return s
74 return s
75
75
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
76 def strip(ui, repo, nodelist, backup=True, topic='backup'):
77 # This function operates within a transaction of its own, but does
77 # This function operates within a transaction of its own, but does
78 # not take any lock on the repo.
78 # not take any lock on the repo.
79 # Simple way to maintain backwards compatibility for this
79 # Simple way to maintain backwards compatibility for this
80 # argument.
80 # argument.
81 if backup in ['none', 'strip']:
81 if backup in ['none', 'strip']:
82 backup = False
82 backup = False
83
83
84 repo = repo.unfiltered()
84 repo = repo.unfiltered()
85 repo.destroying()
85 repo.destroying()
86
86
87 cl = repo.changelog
87 cl = repo.changelog
88 # TODO handle undo of merge sets
88 # TODO handle undo of merge sets
89 if isinstance(nodelist, str):
89 if isinstance(nodelist, str):
90 nodelist = [nodelist]
90 nodelist = [nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
91 striplist = [cl.rev(node) for node in nodelist]
92 striprev = min(striplist)
92 striprev = min(striplist)
93
93
94 # Some revisions with rev > striprev may not be descendants of striprev.
94 # Some revisions with rev > striprev may not be descendants of striprev.
95 # We have to find these revisions and put them in a bundle, so that
95 # We have to find these revisions and put them in a bundle, so that
96 # we can restore them after the truncations.
96 # we can restore them after the truncations.
97 # To create the bundle we use repo.changegroupsubset which requires
97 # To create the bundle we use repo.changegroupsubset which requires
98 # the list of heads and bases of the set of interesting revisions.
98 # the list of heads and bases of the set of interesting revisions.
99 # (head = revision in the set that has no descendant in the set;
99 # (head = revision in the set that has no descendant in the set;
100 # base = revision in the set that has no ancestor in the set)
100 # base = revision in the set that has no ancestor in the set)
101 tostrip = set(striplist)
101 tostrip = set(striplist)
102 for rev in striplist:
102 for rev in striplist:
103 for desc in cl.descendants([rev]):
103 for desc in cl.descendants([rev]):
104 tostrip.add(desc)
104 tostrip.add(desc)
105
105
106 files = _collectfiles(repo, striprev)
106 files = _collectfiles(repo, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
107 saverevs = _collectbrokencsets(repo, files, striprev)
108
108
109 # compute heads
109 # compute heads
110 saveheads = set(saverevs)
110 saveheads = set(saverevs)
111 for r in xrange(striprev + 1, len(cl)):
111 for r in xrange(striprev + 1, len(cl)):
112 if r not in tostrip:
112 if r not in tostrip:
113 saverevs.add(r)
113 saverevs.add(r)
114 saveheads.difference_update(cl.parentrevs(r))
114 saveheads.difference_update(cl.parentrevs(r))
115 saveheads.add(r)
115 saveheads.add(r)
116 saveheads = [cl.node(r) for r in saveheads]
116 saveheads = [cl.node(r) for r in saveheads]
117
117
118 # compute base nodes
118 # compute base nodes
119 if saverevs:
119 if saverevs:
120 descendants = set(cl.descendants(saverevs))
120 descendants = set(cl.descendants(saverevs))
121 saverevs.difference_update(descendants)
121 saverevs.difference_update(descendants)
122 savebases = [cl.node(r) for r in saverevs]
122 savebases = [cl.node(r) for r in saverevs]
123 stripbases = [cl.node(r) for r in tostrip]
123 stripbases = [cl.node(r) for r in tostrip]
124
124
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
125 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)), but
126 # is much faster
126 # is much faster
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
127 newbmtarget = repo.revs('max(parents(%ld) - (%ld))', tostrip, tostrip)
128 if newbmtarget:
128 if newbmtarget:
129 newbmtarget = repo[newbmtarget.first()].node()
129 newbmtarget = repo[newbmtarget.first()].node()
130 else:
130 else:
131 newbmtarget = '.'
131 newbmtarget = '.'
132
132
133 bm = repo._bookmarks
133 bm = repo._bookmarks
134 updatebm = []
134 updatebm = []
135 for m in bm:
135 for m in bm:
136 rev = repo[bm[m]].rev()
136 rev = repo[bm[m]].rev()
137 if rev in tostrip:
137 if rev in tostrip:
138 updatebm.append(m)
138 updatebm.append(m)
139
139
140 # create a changegroup for all the branches we need to keep
140 # create a changegroup for all the branches we need to keep
141 backupfile = None
141 backupfile = None
142 vfs = repo.vfs
142 vfs = repo.vfs
143 node = nodelist[-1]
143 node = nodelist[-1]
144 if backup:
144 if backup:
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
145 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
146 repo.ui.status(_("saved backup bundle to %s\n") %
146 repo.ui.status(_("saved backup bundle to %s\n") %
147 vfs.join(backupfile))
147 vfs.join(backupfile))
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
148 repo.ui.log("backupbundle", "saved backup bundle to %s\n",
149 vfs.join(backupfile))
149 vfs.join(backupfile))
150 tmpbundlefile = None
150 tmpbundlefile = None
151 if saveheads:
151 if saveheads:
152 # do not compress temporary bundle if we remove it from disk later
152 # do not compress temporary bundle if we remove it from disk later
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
153 tmpbundlefile = _bundle(repo, savebases, saveheads, node, 'temp',
154 compress=False)
154 compress=False)
155
155
156 mfst = repo.manifest
156 mfst = repo.manifest
157
157
158 curtr = repo.currenttransaction()
158 curtr = repo.currenttransaction()
159 if curtr is not None:
159 if curtr is not None:
160 del curtr # avoid carrying reference to transaction for nothing
160 del curtr # avoid carrying reference to transaction for nothing
161 msg = _('programming error: cannot strip from inside a transaction')
161 msg = _('programming error: cannot strip from inside a transaction')
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
162 raise error.Abort(msg, hint=_('contact your extension maintainer'))
163
163
164 try:
164 try:
165 with repo.transaction("strip") as tr:
165 with repo.transaction("strip") as tr:
166 offset = len(tr.entries)
166 offset = len(tr.entries)
167
167
168 tr.startgroup()
168 tr.startgroup()
169 cl.strip(striprev, tr)
169 cl.strip(striprev, tr)
170 mfst.strip(striprev, tr)
170 mfst.strip(striprev, tr)
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
171 if 'treemanifest' in repo.requirements: # safe but unnecessary
172 # otherwise
172 # otherwise
173 for unencoded, encoded, size in repo.store.datafiles():
173 for unencoded, encoded, size in repo.store.datafiles():
174 if (unencoded.startswith('meta/') and
174 if (unencoded.startswith('meta/') and
175 unencoded.endswith('00manifest.i')):
175 unencoded.endswith('00manifest.i')):
176 dir = unencoded[5:-12]
176 dir = unencoded[5:-12]
177 repo.manifest.dirlog(dir).strip(striprev, tr)
177 repo.manifestlog._revlog.dirlog(dir).strip(striprev, tr)
178 for fn in files:
178 for fn in files:
179 repo.file(fn).strip(striprev, tr)
179 repo.file(fn).strip(striprev, tr)
180 tr.endgroup()
180 tr.endgroup()
181
181
182 for i in xrange(offset, len(tr.entries)):
182 for i in xrange(offset, len(tr.entries)):
183 file, troffset, ignore = tr.entries[i]
183 file, troffset, ignore = tr.entries[i]
184 with repo.svfs(file, 'a', checkambig=True) as fp:
184 with repo.svfs(file, 'a', checkambig=True) as fp:
185 fp.truncate(troffset)
185 fp.truncate(troffset)
186 if troffset == 0:
186 if troffset == 0:
187 repo.store.markremoved(file)
187 repo.store.markremoved(file)
188
188
189 if tmpbundlefile:
189 if tmpbundlefile:
190 ui.note(_("adding branch\n"))
190 ui.note(_("adding branch\n"))
191 f = vfs.open(tmpbundlefile, "rb")
191 f = vfs.open(tmpbundlefile, "rb")
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
192 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
193 if not repo.ui.verbose:
193 if not repo.ui.verbose:
194 # silence internal shuffling chatter
194 # silence internal shuffling chatter
195 repo.ui.pushbuffer()
195 repo.ui.pushbuffer()
196 if isinstance(gen, bundle2.unbundle20):
196 if isinstance(gen, bundle2.unbundle20):
197 with repo.transaction('strip') as tr:
197 with repo.transaction('strip') as tr:
198 tr.hookargs = {'source': 'strip',
198 tr.hookargs = {'source': 'strip',
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
199 'url': 'bundle:' + vfs.join(tmpbundlefile)}
200 bundle2.applybundle(repo, gen, tr, source='strip',
200 bundle2.applybundle(repo, gen, tr, source='strip',
201 url='bundle:' + vfs.join(tmpbundlefile))
201 url='bundle:' + vfs.join(tmpbundlefile))
202 else:
202 else:
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
203 gen.apply(repo, 'strip', 'bundle:' + vfs.join(tmpbundlefile),
204 True)
204 True)
205 if not repo.ui.verbose:
205 if not repo.ui.verbose:
206 repo.ui.popbuffer()
206 repo.ui.popbuffer()
207 f.close()
207 f.close()
208 repo._phasecache.invalidate()
208 repo._phasecache.invalidate()
209
209
210 for m in updatebm:
210 for m in updatebm:
211 bm[m] = repo[newbmtarget].node()
211 bm[m] = repo[newbmtarget].node()
212 lock = tr = None
212 lock = tr = None
213 try:
213 try:
214 lock = repo.lock()
214 lock = repo.lock()
215 tr = repo.transaction('repair')
215 tr = repo.transaction('repair')
216 bm.recordchange(tr)
216 bm.recordchange(tr)
217 tr.close()
217 tr.close()
218 finally:
218 finally:
219 tr.release()
219 tr.release()
220 lock.release()
220 lock.release()
221
221
222 # remove undo files
222 # remove undo files
223 for undovfs, undofile in repo.undofiles():
223 for undovfs, undofile in repo.undofiles():
224 try:
224 try:
225 undovfs.unlink(undofile)
225 undovfs.unlink(undofile)
226 except OSError as e:
226 except OSError as e:
227 if e.errno != errno.ENOENT:
227 if e.errno != errno.ENOENT:
228 ui.warn(_('error removing %s: %s\n') %
228 ui.warn(_('error removing %s: %s\n') %
229 (undovfs.join(undofile), str(e)))
229 (undovfs.join(undofile), str(e)))
230
230
231 except: # re-raises
231 except: # re-raises
232 if backupfile:
232 if backupfile:
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
233 ui.warn(_("strip failed, backup bundle stored in '%s'\n")
234 % vfs.join(backupfile))
234 % vfs.join(backupfile))
235 if tmpbundlefile:
235 if tmpbundlefile:
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
236 ui.warn(_("strip failed, unrecovered changes stored in '%s'\n")
237 % vfs.join(tmpbundlefile))
237 % vfs.join(tmpbundlefile))
238 ui.warn(_("(fix the problem, then recover the changesets with "
238 ui.warn(_("(fix the problem, then recover the changesets with "
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
239 "\"hg unbundle '%s'\")\n") % vfs.join(tmpbundlefile))
240 raise
240 raise
241 else:
241 else:
242 if tmpbundlefile:
242 if tmpbundlefile:
243 # Remove temporary bundle only if there were no exceptions
243 # Remove temporary bundle only if there were no exceptions
244 vfs.unlink(tmpbundlefile)
244 vfs.unlink(tmpbundlefile)
245
245
246 repo.destroyed()
246 repo.destroyed()
247 # return the backup file path (or None if 'backup' was False) so
247 # return the backup file path (or None if 'backup' was False) so
248 # extensions can use it
248 # extensions can use it
249 return backupfile
249 return backupfile
250
250
251 def rebuildfncache(ui, repo):
251 def rebuildfncache(ui, repo):
252 """Rebuilds the fncache file from repo history.
252 """Rebuilds the fncache file from repo history.
253
253
254 Missing entries will be added. Extra entries will be removed.
254 Missing entries will be added. Extra entries will be removed.
255 """
255 """
256 repo = repo.unfiltered()
256 repo = repo.unfiltered()
257
257
258 if 'fncache' not in repo.requirements:
258 if 'fncache' not in repo.requirements:
259 ui.warn(_('(not rebuilding fncache because repository does not '
259 ui.warn(_('(not rebuilding fncache because repository does not '
260 'support fncache)\n'))
260 'support fncache)\n'))
261 return
261 return
262
262
263 with repo.lock():
263 with repo.lock():
264 fnc = repo.store.fncache
264 fnc = repo.store.fncache
265 # Trigger load of fncache.
265 # Trigger load of fncache.
266 if 'irrelevant' in fnc:
266 if 'irrelevant' in fnc:
267 pass
267 pass
268
268
269 oldentries = set(fnc.entries)
269 oldentries = set(fnc.entries)
270 newentries = set()
270 newentries = set()
271 seenfiles = set()
271 seenfiles = set()
272
272
273 repolen = len(repo)
273 repolen = len(repo)
274 for rev in repo:
274 for rev in repo:
275 ui.progress(_('rebuilding'), rev, total=repolen,
275 ui.progress(_('rebuilding'), rev, total=repolen,
276 unit=_('changesets'))
276 unit=_('changesets'))
277
277
278 ctx = repo[rev]
278 ctx = repo[rev]
279 for f in ctx.files():
279 for f in ctx.files():
280 # This is to minimize I/O.
280 # This is to minimize I/O.
281 if f in seenfiles:
281 if f in seenfiles:
282 continue
282 continue
283 seenfiles.add(f)
283 seenfiles.add(f)
284
284
285 i = 'data/%s.i' % f
285 i = 'data/%s.i' % f
286 d = 'data/%s.d' % f
286 d = 'data/%s.d' % f
287
287
288 if repo.store._exists(i):
288 if repo.store._exists(i):
289 newentries.add(i)
289 newentries.add(i)
290 if repo.store._exists(d):
290 if repo.store._exists(d):
291 newentries.add(d)
291 newentries.add(d)
292
292
293 ui.progress(_('rebuilding'), None)
293 ui.progress(_('rebuilding'), None)
294
294
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
295 if 'treemanifest' in repo.requirements: # safe but unnecessary otherwise
296 for dir in util.dirs(seenfiles):
296 for dir in util.dirs(seenfiles):
297 i = 'meta/%s/00manifest.i' % dir
297 i = 'meta/%s/00manifest.i' % dir
298 d = 'meta/%s/00manifest.d' % dir
298 d = 'meta/%s/00manifest.d' % dir
299
299
300 if repo.store._exists(i):
300 if repo.store._exists(i):
301 newentries.add(i)
301 newentries.add(i)
302 if repo.store._exists(d):
302 if repo.store._exists(d):
303 newentries.add(d)
303 newentries.add(d)
304
304
305 addcount = len(newentries - oldentries)
305 addcount = len(newentries - oldentries)
306 removecount = len(oldentries - newentries)
306 removecount = len(oldentries - newentries)
307 for p in sorted(oldentries - newentries):
307 for p in sorted(oldentries - newentries):
308 ui.write(_('removing %s\n') % p)
308 ui.write(_('removing %s\n') % p)
309 for p in sorted(newentries - oldentries):
309 for p in sorted(newentries - oldentries):
310 ui.write(_('adding %s\n') % p)
310 ui.write(_('adding %s\n') % p)
311
311
312 if addcount or removecount:
312 if addcount or removecount:
313 ui.write(_('%d items added, %d removed from fncache\n') %
313 ui.write(_('%d items added, %d removed from fncache\n') %
314 (addcount, removecount))
314 (addcount, removecount))
315 fnc.entries = newentries
315 fnc.entries = newentries
316 fnc._dirty = True
316 fnc._dirty = True
317
317
318 with repo.transaction('fncache') as tr:
318 with repo.transaction('fncache') as tr:
319 fnc.write(tr)
319 fnc.write(tr)
320 else:
320 else:
321 ui.write(_('fncache already up to date\n'))
321 ui.write(_('fncache already up to date\n'))
322
322
323 def stripbmrevset(repo, mark):
323 def stripbmrevset(repo, mark):
324 """
324 """
325 The revset to strip when strip is called with -B mark
325 The revset to strip when strip is called with -B mark
326
326
327 Needs to live here so extensions can use it and wrap it even when strip is
327 Needs to live here so extensions can use it and wrap it even when strip is
328 not enabled or not present on a box.
328 not enabled or not present on a box.
329 """
329 """
330 return repo.revs("ancestors(bookmark(%s)) - "
330 return repo.revs("ancestors(bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
331 "ancestors(head() and not bookmark(%s)) - "
332 "ancestors(bookmark() and not bookmark(%s))",
332 "ancestors(bookmark() and not bookmark(%s))",
333 mark, mark, mark)
333 mark, mark, mark)
334
334
335 def deleteobsmarkers(obsstore, indices):
335 def deleteobsmarkers(obsstore, indices):
336 """Delete some obsmarkers from obsstore and return how many were deleted
336 """Delete some obsmarkers from obsstore and return how many were deleted
337
337
338 'indices' is a list of ints which are the indices
338 'indices' is a list of ints which are the indices
339 of the markers to be deleted.
339 of the markers to be deleted.
340
340
341 Every invocation of this function completely rewrites the obsstore file,
341 Every invocation of this function completely rewrites the obsstore file,
342 skipping the markers we want to be removed. The new temporary file is
342 skipping the markers we want to be removed. The new temporary file is
343 created, remaining markers are written there and on .close() this file
343 created, remaining markers are written there and on .close() this file
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
344 gets atomically renamed to obsstore, thus guaranteeing consistency."""
345 if not indices:
345 if not indices:
346 # we don't want to rewrite the obsstore with the same content
346 # we don't want to rewrite the obsstore with the same content
347 return
347 return
348
348
349 left = []
349 left = []
350 current = obsstore._all
350 current = obsstore._all
351 n = 0
351 n = 0
352 for i, m in enumerate(current):
352 for i, m in enumerate(current):
353 if i in indices:
353 if i in indices:
354 n += 1
354 n += 1
355 continue
355 continue
356 left.append(m)
356 left.append(m)
357
357
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
358 newobsstorefile = obsstore.svfs('obsstore', 'w', atomictemp=True)
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
359 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
360 newobsstorefile.write(bytes)
360 newobsstorefile.write(bytes)
361 newobsstorefile.close()
361 newobsstorefile.close()
362 return n
362 return n
General Comments 0
You need to be logged in to leave comments. Login now