##// END OF EJS Templates
manifest: remove manifest.find...
Durham Goode -
r30340:608ba935 default
parent child Browse files
Show More
@@ -1,3574 +1,3577 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import sys
13 import sys
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 bin,
18 bin,
19 hex,
19 hex,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 )
23 )
24
24
25 from . import (
25 from . import (
26 bookmarks,
26 bookmarks,
27 changelog,
27 changelog,
28 copies,
28 copies,
29 crecord as crecordmod,
29 crecord as crecordmod,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 lock as lockmod,
34 lock as lockmod,
35 match as matchmod,
35 match as matchmod,
36 obsolete,
36 obsolete,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 repair,
40 repair,
41 revlog,
41 revlog,
42 revset,
42 revset,
43 scmutil,
43 scmutil,
44 templatekw,
44 templatekw,
45 templater,
45 templater,
46 util,
46 util,
47 )
47 )
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 def ishunk(x):
50 def ishunk(x):
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
51 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
52 return isinstance(x, hunkclasses)
52 return isinstance(x, hunkclasses)
53
53
54 def newandmodified(chunks, originalchunks):
54 def newandmodified(chunks, originalchunks):
55 newlyaddedandmodifiedfiles = set()
55 newlyaddedandmodifiedfiles = set()
56 for chunk in chunks:
56 for chunk in chunks:
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
57 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
58 originalchunks:
58 originalchunks:
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
59 newlyaddedandmodifiedfiles.add(chunk.header.filename())
60 return newlyaddedandmodifiedfiles
60 return newlyaddedandmodifiedfiles
61
61
62 def parsealiases(cmd):
62 def parsealiases(cmd):
63 return cmd.lstrip("^").split("|")
63 return cmd.lstrip("^").split("|")
64
64
65 def setupwrapcolorwrite(ui):
65 def setupwrapcolorwrite(ui):
66 # wrap ui.write so diff output can be labeled/colorized
66 # wrap ui.write so diff output can be labeled/colorized
67 def wrapwrite(orig, *args, **kw):
67 def wrapwrite(orig, *args, **kw):
68 label = kw.pop('label', '')
68 label = kw.pop('label', '')
69 for chunk, l in patch.difflabel(lambda: args):
69 for chunk, l in patch.difflabel(lambda: args):
70 orig(chunk, label=label + l)
70 orig(chunk, label=label + l)
71
71
72 oldwrite = ui.write
72 oldwrite = ui.write
73 def wrap(*args, **kwargs):
73 def wrap(*args, **kwargs):
74 return wrapwrite(oldwrite, *args, **kwargs)
74 return wrapwrite(oldwrite, *args, **kwargs)
75 setattr(ui, 'write', wrap)
75 setattr(ui, 'write', wrap)
76 return oldwrite
76 return oldwrite
77
77
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
78 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
79 if usecurses:
79 if usecurses:
80 if testfile:
80 if testfile:
81 recordfn = crecordmod.testdecorator(testfile,
81 recordfn = crecordmod.testdecorator(testfile,
82 crecordmod.testchunkselector)
82 crecordmod.testchunkselector)
83 else:
83 else:
84 recordfn = crecordmod.chunkselector
84 recordfn = crecordmod.chunkselector
85
85
86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
86 return crecordmod.filterpatch(ui, originalhunks, recordfn)
87
87
88 else:
88 else:
89 return patch.filterpatch(ui, originalhunks, operation)
89 return patch.filterpatch(ui, originalhunks, operation)
90
90
91 def recordfilter(ui, originalhunks, operation=None):
91 def recordfilter(ui, originalhunks, operation=None):
92 """ Prompts the user to filter the originalhunks and return a list of
92 """ Prompts the user to filter the originalhunks and return a list of
93 selected hunks.
93 selected hunks.
94 *operation* is used for to build ui messages to indicate the user what
94 *operation* is used for to build ui messages to indicate the user what
95 kind of filtering they are doing: reverting, committing, shelving, etc.
95 kind of filtering they are doing: reverting, committing, shelving, etc.
96 (see patch.filterpatch).
96 (see patch.filterpatch).
97 """
97 """
98 usecurses = crecordmod.checkcurses(ui)
98 usecurses = crecordmod.checkcurses(ui)
99 testfile = ui.config('experimental', 'crecordtest', None)
99 testfile = ui.config('experimental', 'crecordtest', None)
100 oldwrite = setupwrapcolorwrite(ui)
100 oldwrite = setupwrapcolorwrite(ui)
101 try:
101 try:
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
102 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
103 testfile, operation)
103 testfile, operation)
104 finally:
104 finally:
105 ui.write = oldwrite
105 ui.write = oldwrite
106 return newchunks, newopts
106 return newchunks, newopts
107
107
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
108 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
109 filterfn, *pats, **opts):
109 filterfn, *pats, **opts):
110 from . import merge as mergemod
110 from . import merge as mergemod
111 if not ui.interactive():
111 if not ui.interactive():
112 if cmdsuggest:
112 if cmdsuggest:
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
113 msg = _('running non-interactively, use %s instead') % cmdsuggest
114 else:
114 else:
115 msg = _('running non-interactively')
115 msg = _('running non-interactively')
116 raise error.Abort(msg)
116 raise error.Abort(msg)
117
117
118 # make sure username is set before going interactive
118 # make sure username is set before going interactive
119 if not opts.get('user'):
119 if not opts.get('user'):
120 ui.username() # raise exception, username not provided
120 ui.username() # raise exception, username not provided
121
121
122 def recordfunc(ui, repo, message, match, opts):
122 def recordfunc(ui, repo, message, match, opts):
123 """This is generic record driver.
123 """This is generic record driver.
124
124
125 Its job is to interactively filter local changes, and
125 Its job is to interactively filter local changes, and
126 accordingly prepare working directory into a state in which the
126 accordingly prepare working directory into a state in which the
127 job can be delegated to a non-interactive commit command such as
127 job can be delegated to a non-interactive commit command such as
128 'commit' or 'qrefresh'.
128 'commit' or 'qrefresh'.
129
129
130 After the actual job is done by non-interactive command, the
130 After the actual job is done by non-interactive command, the
131 working directory is restored to its original state.
131 working directory is restored to its original state.
132
132
133 In the end we'll record interesting changes, and everything else
133 In the end we'll record interesting changes, and everything else
134 will be left in place, so the user can continue working.
134 will be left in place, so the user can continue working.
135 """
135 """
136
136
137 checkunfinished(repo, commit=True)
137 checkunfinished(repo, commit=True)
138 wctx = repo[None]
138 wctx = repo[None]
139 merge = len(wctx.parents()) > 1
139 merge = len(wctx.parents()) > 1
140 if merge:
140 if merge:
141 raise error.Abort(_('cannot partially commit a merge '
141 raise error.Abort(_('cannot partially commit a merge '
142 '(use "hg commit" instead)'))
142 '(use "hg commit" instead)'))
143
143
144 def fail(f, msg):
144 def fail(f, msg):
145 raise error.Abort('%s: %s' % (f, msg))
145 raise error.Abort('%s: %s' % (f, msg))
146
146
147 force = opts.get('force')
147 force = opts.get('force')
148 if not force:
148 if not force:
149 vdirs = []
149 vdirs = []
150 match.explicitdir = vdirs.append
150 match.explicitdir = vdirs.append
151 match.bad = fail
151 match.bad = fail
152
152
153 status = repo.status(match=match)
153 status = repo.status(match=match)
154 if not force:
154 if not force:
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
155 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
156 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
157 diffopts.nodates = True
157 diffopts.nodates = True
158 diffopts.git = True
158 diffopts.git = True
159 diffopts.showfunc = True
159 diffopts.showfunc = True
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
160 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
161 originalchunks = patch.parsepatch(originaldiff)
161 originalchunks = patch.parsepatch(originaldiff)
162
162
163 # 1. filter patch, since we are intending to apply subset of it
163 # 1. filter patch, since we are intending to apply subset of it
164 try:
164 try:
165 chunks, newopts = filterfn(ui, originalchunks)
165 chunks, newopts = filterfn(ui, originalchunks)
166 except patch.PatchError as err:
166 except patch.PatchError as err:
167 raise error.Abort(_('error parsing patch: %s') % err)
167 raise error.Abort(_('error parsing patch: %s') % err)
168 opts.update(newopts)
168 opts.update(newopts)
169
169
170 # We need to keep a backup of files that have been newly added and
170 # We need to keep a backup of files that have been newly added and
171 # modified during the recording process because there is a previous
171 # modified during the recording process because there is a previous
172 # version without the edit in the workdir
172 # version without the edit in the workdir
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
173 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
174 contenders = set()
174 contenders = set()
175 for h in chunks:
175 for h in chunks:
176 try:
176 try:
177 contenders.update(set(h.files()))
177 contenders.update(set(h.files()))
178 except AttributeError:
178 except AttributeError:
179 pass
179 pass
180
180
181 changed = status.modified + status.added + status.removed
181 changed = status.modified + status.added + status.removed
182 newfiles = [f for f in changed if f in contenders]
182 newfiles = [f for f in changed if f in contenders]
183 if not newfiles:
183 if not newfiles:
184 ui.status(_('no changes to record\n'))
184 ui.status(_('no changes to record\n'))
185 return 0
185 return 0
186
186
187 modified = set(status.modified)
187 modified = set(status.modified)
188
188
189 # 2. backup changed files, so we can restore them in the end
189 # 2. backup changed files, so we can restore them in the end
190
190
191 if backupall:
191 if backupall:
192 tobackup = changed
192 tobackup = changed
193 else:
193 else:
194 tobackup = [f for f in newfiles if f in modified or f in \
194 tobackup = [f for f in newfiles if f in modified or f in \
195 newlyaddedandmodifiedfiles]
195 newlyaddedandmodifiedfiles]
196 backups = {}
196 backups = {}
197 if tobackup:
197 if tobackup:
198 backupdir = repo.join('record-backups')
198 backupdir = repo.join('record-backups')
199 try:
199 try:
200 os.mkdir(backupdir)
200 os.mkdir(backupdir)
201 except OSError as err:
201 except OSError as err:
202 if err.errno != errno.EEXIST:
202 if err.errno != errno.EEXIST:
203 raise
203 raise
204 try:
204 try:
205 # backup continues
205 # backup continues
206 for f in tobackup:
206 for f in tobackup:
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
207 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
208 dir=backupdir)
208 dir=backupdir)
209 os.close(fd)
209 os.close(fd)
210 ui.debug('backup %r as %r\n' % (f, tmpname))
210 ui.debug('backup %r as %r\n' % (f, tmpname))
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
211 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
212 backups[f] = tmpname
212 backups[f] = tmpname
213
213
214 fp = stringio()
214 fp = stringio()
215 for c in chunks:
215 for c in chunks:
216 fname = c.filename()
216 fname = c.filename()
217 if fname in backups:
217 if fname in backups:
218 c.write(fp)
218 c.write(fp)
219 dopatch = fp.tell()
219 dopatch = fp.tell()
220 fp.seek(0)
220 fp.seek(0)
221
221
222 # 2.5 optionally review / modify patch in text editor
222 # 2.5 optionally review / modify patch in text editor
223 if opts.get('review', False):
223 if opts.get('review', False):
224 patchtext = (crecordmod.diffhelptext
224 patchtext = (crecordmod.diffhelptext
225 + crecordmod.patchhelptext
225 + crecordmod.patchhelptext
226 + fp.read())
226 + fp.read())
227 reviewedpatch = ui.edit(patchtext, "",
227 reviewedpatch = ui.edit(patchtext, "",
228 extra={"suffix": ".diff"})
228 extra={"suffix": ".diff"})
229 fp.truncate(0)
229 fp.truncate(0)
230 fp.write(reviewedpatch)
230 fp.write(reviewedpatch)
231 fp.seek(0)
231 fp.seek(0)
232
232
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
233 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
234 # 3a. apply filtered patch to clean repo (clean)
234 # 3a. apply filtered patch to clean repo (clean)
235 if backups:
235 if backups:
236 # Equivalent to hg.revert
236 # Equivalent to hg.revert
237 m = scmutil.matchfiles(repo, backups.keys())
237 m = scmutil.matchfiles(repo, backups.keys())
238 mergemod.update(repo, repo.dirstate.p1(),
238 mergemod.update(repo, repo.dirstate.p1(),
239 False, True, matcher=m)
239 False, True, matcher=m)
240
240
241 # 3b. (apply)
241 # 3b. (apply)
242 if dopatch:
242 if dopatch:
243 try:
243 try:
244 ui.debug('applying patch\n')
244 ui.debug('applying patch\n')
245 ui.debug(fp.getvalue())
245 ui.debug(fp.getvalue())
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
246 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
247 except patch.PatchError as err:
247 except patch.PatchError as err:
248 raise error.Abort(str(err))
248 raise error.Abort(str(err))
249 del fp
249 del fp
250
250
251 # 4. We prepared working directory according to filtered
251 # 4. We prepared working directory according to filtered
252 # patch. Now is the time to delegate the job to
252 # patch. Now is the time to delegate the job to
253 # commit/qrefresh or the like!
253 # commit/qrefresh or the like!
254
254
255 # Make all of the pathnames absolute.
255 # Make all of the pathnames absolute.
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
256 newfiles = [repo.wjoin(nf) for nf in newfiles]
257 return commitfunc(ui, repo, *newfiles, **opts)
257 return commitfunc(ui, repo, *newfiles, **opts)
258 finally:
258 finally:
259 # 5. finally restore backed-up files
259 # 5. finally restore backed-up files
260 try:
260 try:
261 dirstate = repo.dirstate
261 dirstate = repo.dirstate
262 for realname, tmpname in backups.iteritems():
262 for realname, tmpname in backups.iteritems():
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
263 ui.debug('restoring %r to %r\n' % (tmpname, realname))
264
264
265 if dirstate[realname] == 'n':
265 if dirstate[realname] == 'n':
266 # without normallookup, restoring timestamp
266 # without normallookup, restoring timestamp
267 # may cause partially committed files
267 # may cause partially committed files
268 # to be treated as unmodified
268 # to be treated as unmodified
269 dirstate.normallookup(realname)
269 dirstate.normallookup(realname)
270
270
271 # copystat=True here and above are a hack to trick any
271 # copystat=True here and above are a hack to trick any
272 # editors that have f open that we haven't modified them.
272 # editors that have f open that we haven't modified them.
273 #
273 #
274 # Also note that this racy as an editor could notice the
274 # Also note that this racy as an editor could notice the
275 # file's mtime before we've finished writing it.
275 # file's mtime before we've finished writing it.
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
276 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
277 os.unlink(tmpname)
277 os.unlink(tmpname)
278 if tobackup:
278 if tobackup:
279 os.rmdir(backupdir)
279 os.rmdir(backupdir)
280 except OSError:
280 except OSError:
281 pass
281 pass
282
282
283 def recordinwlock(ui, repo, message, match, opts):
283 def recordinwlock(ui, repo, message, match, opts):
284 with repo.wlock():
284 with repo.wlock():
285 return recordfunc(ui, repo, message, match, opts)
285 return recordfunc(ui, repo, message, match, opts)
286
286
287 return commit(ui, repo, recordinwlock, pats, opts)
287 return commit(ui, repo, recordinwlock, pats, opts)
288
288
289 def findpossible(cmd, table, strict=False):
289 def findpossible(cmd, table, strict=False):
290 """
290 """
291 Return cmd -> (aliases, command table entry)
291 Return cmd -> (aliases, command table entry)
292 for each matching command.
292 for each matching command.
293 Return debug commands (or their aliases) only if no normal command matches.
293 Return debug commands (or their aliases) only if no normal command matches.
294 """
294 """
295 choice = {}
295 choice = {}
296 debugchoice = {}
296 debugchoice = {}
297
297
298 if cmd in table:
298 if cmd in table:
299 # short-circuit exact matches, "log" alias beats "^log|history"
299 # short-circuit exact matches, "log" alias beats "^log|history"
300 keys = [cmd]
300 keys = [cmd]
301 else:
301 else:
302 keys = table.keys()
302 keys = table.keys()
303
303
304 allcmds = []
304 allcmds = []
305 for e in keys:
305 for e in keys:
306 aliases = parsealiases(e)
306 aliases = parsealiases(e)
307 allcmds.extend(aliases)
307 allcmds.extend(aliases)
308 found = None
308 found = None
309 if cmd in aliases:
309 if cmd in aliases:
310 found = cmd
310 found = cmd
311 elif not strict:
311 elif not strict:
312 for a in aliases:
312 for a in aliases:
313 if a.startswith(cmd):
313 if a.startswith(cmd):
314 found = a
314 found = a
315 break
315 break
316 if found is not None:
316 if found is not None:
317 if aliases[0].startswith("debug") or found.startswith("debug"):
317 if aliases[0].startswith("debug") or found.startswith("debug"):
318 debugchoice[found] = (aliases, table[e])
318 debugchoice[found] = (aliases, table[e])
319 else:
319 else:
320 choice[found] = (aliases, table[e])
320 choice[found] = (aliases, table[e])
321
321
322 if not choice and debugchoice:
322 if not choice and debugchoice:
323 choice = debugchoice
323 choice = debugchoice
324
324
325 return choice, allcmds
325 return choice, allcmds
326
326
327 def findcmd(cmd, table, strict=True):
327 def findcmd(cmd, table, strict=True):
328 """Return (aliases, command table entry) for command string."""
328 """Return (aliases, command table entry) for command string."""
329 choice, allcmds = findpossible(cmd, table, strict)
329 choice, allcmds = findpossible(cmd, table, strict)
330
330
331 if cmd in choice:
331 if cmd in choice:
332 return choice[cmd]
332 return choice[cmd]
333
333
334 if len(choice) > 1:
334 if len(choice) > 1:
335 clist = choice.keys()
335 clist = choice.keys()
336 clist.sort()
336 clist.sort()
337 raise error.AmbiguousCommand(cmd, clist)
337 raise error.AmbiguousCommand(cmd, clist)
338
338
339 if choice:
339 if choice:
340 return choice.values()[0]
340 return choice.values()[0]
341
341
342 raise error.UnknownCommand(cmd, allcmds)
342 raise error.UnknownCommand(cmd, allcmds)
343
343
344 def findrepo(p):
344 def findrepo(p):
345 while not os.path.isdir(os.path.join(p, ".hg")):
345 while not os.path.isdir(os.path.join(p, ".hg")):
346 oldp, p = p, os.path.dirname(p)
346 oldp, p = p, os.path.dirname(p)
347 if p == oldp:
347 if p == oldp:
348 return None
348 return None
349
349
350 return p
350 return p
351
351
352 def bailifchanged(repo, merge=True):
352 def bailifchanged(repo, merge=True):
353 if merge and repo.dirstate.p2() != nullid:
353 if merge and repo.dirstate.p2() != nullid:
354 raise error.Abort(_('outstanding uncommitted merge'))
354 raise error.Abort(_('outstanding uncommitted merge'))
355 modified, added, removed, deleted = repo.status()[:4]
355 modified, added, removed, deleted = repo.status()[:4]
356 if modified or added or removed or deleted:
356 if modified or added or removed or deleted:
357 raise error.Abort(_('uncommitted changes'))
357 raise error.Abort(_('uncommitted changes'))
358 ctx = repo[None]
358 ctx = repo[None]
359 for s in sorted(ctx.substate):
359 for s in sorted(ctx.substate):
360 ctx.sub(s).bailifchanged()
360 ctx.sub(s).bailifchanged()
361
361
362 def logmessage(ui, opts):
362 def logmessage(ui, opts):
363 """ get the log message according to -m and -l option """
363 """ get the log message according to -m and -l option """
364 message = opts.get('message')
364 message = opts.get('message')
365 logfile = opts.get('logfile')
365 logfile = opts.get('logfile')
366
366
367 if message and logfile:
367 if message and logfile:
368 raise error.Abort(_('options --message and --logfile are mutually '
368 raise error.Abort(_('options --message and --logfile are mutually '
369 'exclusive'))
369 'exclusive'))
370 if not message and logfile:
370 if not message and logfile:
371 try:
371 try:
372 if logfile == '-':
372 if logfile == '-':
373 message = ui.fin.read()
373 message = ui.fin.read()
374 else:
374 else:
375 message = '\n'.join(util.readfile(logfile).splitlines())
375 message = '\n'.join(util.readfile(logfile).splitlines())
376 except IOError as inst:
376 except IOError as inst:
377 raise error.Abort(_("can't read commit message '%s': %s") %
377 raise error.Abort(_("can't read commit message '%s': %s") %
378 (logfile, inst.strerror))
378 (logfile, inst.strerror))
379 return message
379 return message
380
380
381 def mergeeditform(ctxorbool, baseformname):
381 def mergeeditform(ctxorbool, baseformname):
382 """return appropriate editform name (referencing a committemplate)
382 """return appropriate editform name (referencing a committemplate)
383
383
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
384 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
385 merging is committed.
385 merging is committed.
386
386
387 This returns baseformname with '.merge' appended if it is a merge,
387 This returns baseformname with '.merge' appended if it is a merge,
388 otherwise '.normal' is appended.
388 otherwise '.normal' is appended.
389 """
389 """
390 if isinstance(ctxorbool, bool):
390 if isinstance(ctxorbool, bool):
391 if ctxorbool:
391 if ctxorbool:
392 return baseformname + ".merge"
392 return baseformname + ".merge"
393 elif 1 < len(ctxorbool.parents()):
393 elif 1 < len(ctxorbool.parents()):
394 return baseformname + ".merge"
394 return baseformname + ".merge"
395
395
396 return baseformname + ".normal"
396 return baseformname + ".normal"
397
397
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
398 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
399 editform='', **opts):
399 editform='', **opts):
400 """get appropriate commit message editor according to '--edit' option
400 """get appropriate commit message editor according to '--edit' option
401
401
402 'finishdesc' is a function to be called with edited commit message
402 'finishdesc' is a function to be called with edited commit message
403 (= 'description' of the new changeset) just after editing, but
403 (= 'description' of the new changeset) just after editing, but
404 before checking empty-ness. It should return actual text to be
404 before checking empty-ness. It should return actual text to be
405 stored into history. This allows to change description before
405 stored into history. This allows to change description before
406 storing.
406 storing.
407
407
408 'extramsg' is a extra message to be shown in the editor instead of
408 'extramsg' is a extra message to be shown in the editor instead of
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
409 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
410 is automatically added.
410 is automatically added.
411
411
412 'editform' is a dot-separated list of names, to distinguish
412 'editform' is a dot-separated list of names, to distinguish
413 the purpose of commit text editing.
413 the purpose of commit text editing.
414
414
415 'getcommiteditor' returns 'commitforceeditor' regardless of
415 'getcommiteditor' returns 'commitforceeditor' regardless of
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
416 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
417 they are specific for usage in MQ.
417 they are specific for usage in MQ.
418 """
418 """
419 if edit or finishdesc or extramsg:
419 if edit or finishdesc or extramsg:
420 return lambda r, c, s: commitforceeditor(r, c, s,
420 return lambda r, c, s: commitforceeditor(r, c, s,
421 finishdesc=finishdesc,
421 finishdesc=finishdesc,
422 extramsg=extramsg,
422 extramsg=extramsg,
423 editform=editform)
423 editform=editform)
424 elif editform:
424 elif editform:
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
425 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
426 else:
426 else:
427 return commiteditor
427 return commiteditor
428
428
429 def loglimit(opts):
429 def loglimit(opts):
430 """get the log limit according to option -l/--limit"""
430 """get the log limit according to option -l/--limit"""
431 limit = opts.get('limit')
431 limit = opts.get('limit')
432 if limit:
432 if limit:
433 try:
433 try:
434 limit = int(limit)
434 limit = int(limit)
435 except ValueError:
435 except ValueError:
436 raise error.Abort(_('limit must be a positive integer'))
436 raise error.Abort(_('limit must be a positive integer'))
437 if limit <= 0:
437 if limit <= 0:
438 raise error.Abort(_('limit must be positive'))
438 raise error.Abort(_('limit must be positive'))
439 else:
439 else:
440 limit = None
440 limit = None
441 return limit
441 return limit
442
442
443 def makefilename(repo, pat, node, desc=None,
443 def makefilename(repo, pat, node, desc=None,
444 total=None, seqno=None, revwidth=None, pathname=None):
444 total=None, seqno=None, revwidth=None, pathname=None):
445 node_expander = {
445 node_expander = {
446 'H': lambda: hex(node),
446 'H': lambda: hex(node),
447 'R': lambda: str(repo.changelog.rev(node)),
447 'R': lambda: str(repo.changelog.rev(node)),
448 'h': lambda: short(node),
448 'h': lambda: short(node),
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
449 'm': lambda: re.sub('[^\w]', '_', str(desc))
450 }
450 }
451 expander = {
451 expander = {
452 '%': lambda: '%',
452 '%': lambda: '%',
453 'b': lambda: os.path.basename(repo.root),
453 'b': lambda: os.path.basename(repo.root),
454 }
454 }
455
455
456 try:
456 try:
457 if node:
457 if node:
458 expander.update(node_expander)
458 expander.update(node_expander)
459 if node:
459 if node:
460 expander['r'] = (lambda:
460 expander['r'] = (lambda:
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
461 str(repo.changelog.rev(node)).zfill(revwidth or 0))
462 if total is not None:
462 if total is not None:
463 expander['N'] = lambda: str(total)
463 expander['N'] = lambda: str(total)
464 if seqno is not None:
464 if seqno is not None:
465 expander['n'] = lambda: str(seqno)
465 expander['n'] = lambda: str(seqno)
466 if total is not None and seqno is not None:
466 if total is not None and seqno is not None:
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
467 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
468 if pathname is not None:
468 if pathname is not None:
469 expander['s'] = lambda: os.path.basename(pathname)
469 expander['s'] = lambda: os.path.basename(pathname)
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
470 expander['d'] = lambda: os.path.dirname(pathname) or '.'
471 expander['p'] = lambda: pathname
471 expander['p'] = lambda: pathname
472
472
473 newname = []
473 newname = []
474 patlen = len(pat)
474 patlen = len(pat)
475 i = 0
475 i = 0
476 while i < patlen:
476 while i < patlen:
477 c = pat[i]
477 c = pat[i]
478 if c == '%':
478 if c == '%':
479 i += 1
479 i += 1
480 c = pat[i]
480 c = pat[i]
481 c = expander[c]()
481 c = expander[c]()
482 newname.append(c)
482 newname.append(c)
483 i += 1
483 i += 1
484 return ''.join(newname)
484 return ''.join(newname)
485 except KeyError as inst:
485 except KeyError as inst:
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
486 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
487 inst.args[0])
487 inst.args[0])
488
488
489 class _unclosablefile(object):
489 class _unclosablefile(object):
490 def __init__(self, fp):
490 def __init__(self, fp):
491 self._fp = fp
491 self._fp = fp
492
492
493 def close(self):
493 def close(self):
494 pass
494 pass
495
495
496 def __iter__(self):
496 def __iter__(self):
497 return iter(self._fp)
497 return iter(self._fp)
498
498
499 def __getattr__(self, attr):
499 def __getattr__(self, attr):
500 return getattr(self._fp, attr)
500 return getattr(self._fp, attr)
501
501
502 def __enter__(self):
502 def __enter__(self):
503 return self
503 return self
504
504
505 def __exit__(self, exc_type, exc_value, exc_tb):
505 def __exit__(self, exc_type, exc_value, exc_tb):
506 pass
506 pass
507
507
508 def makefileobj(repo, pat, node=None, desc=None, total=None,
508 def makefileobj(repo, pat, node=None, desc=None, total=None,
509 seqno=None, revwidth=None, mode='wb', modemap=None,
509 seqno=None, revwidth=None, mode='wb', modemap=None,
510 pathname=None):
510 pathname=None):
511
511
512 writable = mode not in ('r', 'rb')
512 writable = mode not in ('r', 'rb')
513
513
514 if not pat or pat == '-':
514 if not pat or pat == '-':
515 if writable:
515 if writable:
516 fp = repo.ui.fout
516 fp = repo.ui.fout
517 else:
517 else:
518 fp = repo.ui.fin
518 fp = repo.ui.fin
519 return _unclosablefile(fp)
519 return _unclosablefile(fp)
520 if util.safehasattr(pat, 'write') and writable:
520 if util.safehasattr(pat, 'write') and writable:
521 return pat
521 return pat
522 if util.safehasattr(pat, 'read') and 'r' in mode:
522 if util.safehasattr(pat, 'read') and 'r' in mode:
523 return pat
523 return pat
524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
524 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
525 if modemap is not None:
525 if modemap is not None:
526 mode = modemap.get(fn, mode)
526 mode = modemap.get(fn, mode)
527 if mode == 'wb':
527 if mode == 'wb':
528 modemap[fn] = 'ab'
528 modemap[fn] = 'ab'
529 return open(fn, mode)
529 return open(fn, mode)
530
530
531 def openrevlog(repo, cmd, file_, opts):
531 def openrevlog(repo, cmd, file_, opts):
532 """opens the changelog, manifest, a filelog or a given revlog"""
532 """opens the changelog, manifest, a filelog or a given revlog"""
533 cl = opts['changelog']
533 cl = opts['changelog']
534 mf = opts['manifest']
534 mf = opts['manifest']
535 dir = opts['dir']
535 dir = opts['dir']
536 msg = None
536 msg = None
537 if cl and mf:
537 if cl and mf:
538 msg = _('cannot specify --changelog and --manifest at the same time')
538 msg = _('cannot specify --changelog and --manifest at the same time')
539 elif cl and dir:
539 elif cl and dir:
540 msg = _('cannot specify --changelog and --dir at the same time')
540 msg = _('cannot specify --changelog and --dir at the same time')
541 elif cl or mf or dir:
541 elif cl or mf or dir:
542 if file_:
542 if file_:
543 msg = _('cannot specify filename with --changelog or --manifest')
543 msg = _('cannot specify filename with --changelog or --manifest')
544 elif not repo:
544 elif not repo:
545 msg = _('cannot specify --changelog or --manifest or --dir '
545 msg = _('cannot specify --changelog or --manifest or --dir '
546 'without a repository')
546 'without a repository')
547 if msg:
547 if msg:
548 raise error.Abort(msg)
548 raise error.Abort(msg)
549
549
550 r = None
550 r = None
551 if repo:
551 if repo:
552 if cl:
552 if cl:
553 r = repo.unfiltered().changelog
553 r = repo.unfiltered().changelog
554 elif dir:
554 elif dir:
555 if 'treemanifest' not in repo.requirements:
555 if 'treemanifest' not in repo.requirements:
556 raise error.Abort(_("--dir can only be used on repos with "
556 raise error.Abort(_("--dir can only be used on repos with "
557 "treemanifest enabled"))
557 "treemanifest enabled"))
558 dirlog = repo.manifest.dirlog(dir)
558 dirlog = repo.manifest.dirlog(dir)
559 if len(dirlog):
559 if len(dirlog):
560 r = dirlog
560 r = dirlog
561 elif mf:
561 elif mf:
562 r = repo.manifest
562 r = repo.manifest
563 elif file_:
563 elif file_:
564 filelog = repo.file(file_)
564 filelog = repo.file(file_)
565 if len(filelog):
565 if len(filelog):
566 r = filelog
566 r = filelog
567 if not r:
567 if not r:
568 if not file_:
568 if not file_:
569 raise error.CommandError(cmd, _('invalid arguments'))
569 raise error.CommandError(cmd, _('invalid arguments'))
570 if not os.path.isfile(file_):
570 if not os.path.isfile(file_):
571 raise error.Abort(_("revlog '%s' not found") % file_)
571 raise error.Abort(_("revlog '%s' not found") % file_)
572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
572 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
573 file_[:-2] + ".i")
573 file_[:-2] + ".i")
574 return r
574 return r
575
575
576 def copy(ui, repo, pats, opts, rename=False):
576 def copy(ui, repo, pats, opts, rename=False):
577 # called with the repo lock held
577 # called with the repo lock held
578 #
578 #
579 # hgsep => pathname that uses "/" to separate directories
579 # hgsep => pathname that uses "/" to separate directories
580 # ossep => pathname that uses os.sep to separate directories
580 # ossep => pathname that uses os.sep to separate directories
581 cwd = repo.getcwd()
581 cwd = repo.getcwd()
582 targets = {}
582 targets = {}
583 after = opts.get("after")
583 after = opts.get("after")
584 dryrun = opts.get("dry_run")
584 dryrun = opts.get("dry_run")
585 wctx = repo[None]
585 wctx = repo[None]
586
586
587 def walkpat(pat):
587 def walkpat(pat):
588 srcs = []
588 srcs = []
589 if after:
589 if after:
590 badstates = '?'
590 badstates = '?'
591 else:
591 else:
592 badstates = '?r'
592 badstates = '?r'
593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
593 m = scmutil.match(repo[None], [pat], opts, globbed=True)
594 for abs in repo.walk(m):
594 for abs in repo.walk(m):
595 state = repo.dirstate[abs]
595 state = repo.dirstate[abs]
596 rel = m.rel(abs)
596 rel = m.rel(abs)
597 exact = m.exact(abs)
597 exact = m.exact(abs)
598 if state in badstates:
598 if state in badstates:
599 if exact and state == '?':
599 if exact and state == '?':
600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
600 ui.warn(_('%s: not copying - file is not managed\n') % rel)
601 if exact and state == 'r':
601 if exact and state == 'r':
602 ui.warn(_('%s: not copying - file has been marked for'
602 ui.warn(_('%s: not copying - file has been marked for'
603 ' remove\n') % rel)
603 ' remove\n') % rel)
604 continue
604 continue
605 # abs: hgsep
605 # abs: hgsep
606 # rel: ossep
606 # rel: ossep
607 srcs.append((abs, rel, exact))
607 srcs.append((abs, rel, exact))
608 return srcs
608 return srcs
609
609
610 # abssrc: hgsep
610 # abssrc: hgsep
611 # relsrc: ossep
611 # relsrc: ossep
612 # otarget: ossep
612 # otarget: ossep
613 def copyfile(abssrc, relsrc, otarget, exact):
613 def copyfile(abssrc, relsrc, otarget, exact):
614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
614 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
615 if '/' in abstarget:
615 if '/' in abstarget:
616 # We cannot normalize abstarget itself, this would prevent
616 # We cannot normalize abstarget itself, this would prevent
617 # case only renames, like a => A.
617 # case only renames, like a => A.
618 abspath, absname = abstarget.rsplit('/', 1)
618 abspath, absname = abstarget.rsplit('/', 1)
619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
619 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
620 reltarget = repo.pathto(abstarget, cwd)
620 reltarget = repo.pathto(abstarget, cwd)
621 target = repo.wjoin(abstarget)
621 target = repo.wjoin(abstarget)
622 src = repo.wjoin(abssrc)
622 src = repo.wjoin(abssrc)
623 state = repo.dirstate[abstarget]
623 state = repo.dirstate[abstarget]
624
624
625 scmutil.checkportable(ui, abstarget)
625 scmutil.checkportable(ui, abstarget)
626
626
627 # check for collisions
627 # check for collisions
628 prevsrc = targets.get(abstarget)
628 prevsrc = targets.get(abstarget)
629 if prevsrc is not None:
629 if prevsrc is not None:
630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
630 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
631 (reltarget, repo.pathto(abssrc, cwd),
631 (reltarget, repo.pathto(abssrc, cwd),
632 repo.pathto(prevsrc, cwd)))
632 repo.pathto(prevsrc, cwd)))
633 return
633 return
634
634
635 # check for overwrites
635 # check for overwrites
636 exists = os.path.lexists(target)
636 exists = os.path.lexists(target)
637 samefile = False
637 samefile = False
638 if exists and abssrc != abstarget:
638 if exists and abssrc != abstarget:
639 if (repo.dirstate.normalize(abssrc) ==
639 if (repo.dirstate.normalize(abssrc) ==
640 repo.dirstate.normalize(abstarget)):
640 repo.dirstate.normalize(abstarget)):
641 if not rename:
641 if not rename:
642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
642 ui.warn(_("%s: can't copy - same file\n") % reltarget)
643 return
643 return
644 exists = False
644 exists = False
645 samefile = True
645 samefile = True
646
646
647 if not after and exists or after and state in 'mn':
647 if not after and exists or after and state in 'mn':
648 if not opts['force']:
648 if not opts['force']:
649 if state in 'mn':
649 if state in 'mn':
650 msg = _('%s: not overwriting - file already committed\n')
650 msg = _('%s: not overwriting - file already committed\n')
651 if after:
651 if after:
652 flags = '--after --force'
652 flags = '--after --force'
653 else:
653 else:
654 flags = '--force'
654 flags = '--force'
655 if rename:
655 if rename:
656 hint = _('(hg rename %s to replace the file by '
656 hint = _('(hg rename %s to replace the file by '
657 'recording a rename)\n') % flags
657 'recording a rename)\n') % flags
658 else:
658 else:
659 hint = _('(hg copy %s to replace the file by '
659 hint = _('(hg copy %s to replace the file by '
660 'recording a copy)\n') % flags
660 'recording a copy)\n') % flags
661 else:
661 else:
662 msg = _('%s: not overwriting - file exists\n')
662 msg = _('%s: not overwriting - file exists\n')
663 if rename:
663 if rename:
664 hint = _('(hg rename --after to record the rename)\n')
664 hint = _('(hg rename --after to record the rename)\n')
665 else:
665 else:
666 hint = _('(hg copy --after to record the copy)\n')
666 hint = _('(hg copy --after to record the copy)\n')
667 ui.warn(msg % reltarget)
667 ui.warn(msg % reltarget)
668 ui.warn(hint)
668 ui.warn(hint)
669 return
669 return
670
670
671 if after:
671 if after:
672 if not exists:
672 if not exists:
673 if rename:
673 if rename:
674 ui.warn(_('%s: not recording move - %s does not exist\n') %
674 ui.warn(_('%s: not recording move - %s does not exist\n') %
675 (relsrc, reltarget))
675 (relsrc, reltarget))
676 else:
676 else:
677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
677 ui.warn(_('%s: not recording copy - %s does not exist\n') %
678 (relsrc, reltarget))
678 (relsrc, reltarget))
679 return
679 return
680 elif not dryrun:
680 elif not dryrun:
681 try:
681 try:
682 if exists:
682 if exists:
683 os.unlink(target)
683 os.unlink(target)
684 targetdir = os.path.dirname(target) or '.'
684 targetdir = os.path.dirname(target) or '.'
685 if not os.path.isdir(targetdir):
685 if not os.path.isdir(targetdir):
686 os.makedirs(targetdir)
686 os.makedirs(targetdir)
687 if samefile:
687 if samefile:
688 tmp = target + "~hgrename"
688 tmp = target + "~hgrename"
689 os.rename(src, tmp)
689 os.rename(src, tmp)
690 os.rename(tmp, target)
690 os.rename(tmp, target)
691 else:
691 else:
692 util.copyfile(src, target)
692 util.copyfile(src, target)
693 srcexists = True
693 srcexists = True
694 except IOError as inst:
694 except IOError as inst:
695 if inst.errno == errno.ENOENT:
695 if inst.errno == errno.ENOENT:
696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
696 ui.warn(_('%s: deleted in working directory\n') % relsrc)
697 srcexists = False
697 srcexists = False
698 else:
698 else:
699 ui.warn(_('%s: cannot copy - %s\n') %
699 ui.warn(_('%s: cannot copy - %s\n') %
700 (relsrc, inst.strerror))
700 (relsrc, inst.strerror))
701 return True # report a failure
701 return True # report a failure
702
702
703 if ui.verbose or not exact:
703 if ui.verbose or not exact:
704 if rename:
704 if rename:
705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
705 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
706 else:
706 else:
707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
707 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
708
708
709 targets[abstarget] = abssrc
709 targets[abstarget] = abssrc
710
710
711 # fix up dirstate
711 # fix up dirstate
712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
712 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
713 dryrun=dryrun, cwd=cwd)
713 dryrun=dryrun, cwd=cwd)
714 if rename and not dryrun:
714 if rename and not dryrun:
715 if not after and srcexists and not samefile:
715 if not after and srcexists and not samefile:
716 util.unlinkpath(repo.wjoin(abssrc))
716 util.unlinkpath(repo.wjoin(abssrc))
717 wctx.forget([abssrc])
717 wctx.forget([abssrc])
718
718
719 # pat: ossep
719 # pat: ossep
720 # dest ossep
720 # dest ossep
721 # srcs: list of (hgsep, hgsep, ossep, bool)
721 # srcs: list of (hgsep, hgsep, ossep, bool)
722 # return: function that takes hgsep and returns ossep
722 # return: function that takes hgsep and returns ossep
723 def targetpathfn(pat, dest, srcs):
723 def targetpathfn(pat, dest, srcs):
724 if os.path.isdir(pat):
724 if os.path.isdir(pat):
725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
725 abspfx = pathutil.canonpath(repo.root, cwd, pat)
726 abspfx = util.localpath(abspfx)
726 abspfx = util.localpath(abspfx)
727 if destdirexists:
727 if destdirexists:
728 striplen = len(os.path.split(abspfx)[0])
728 striplen = len(os.path.split(abspfx)[0])
729 else:
729 else:
730 striplen = len(abspfx)
730 striplen = len(abspfx)
731 if striplen:
731 if striplen:
732 striplen += len(os.sep)
732 striplen += len(os.sep)
733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
733 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
734 elif destdirexists:
734 elif destdirexists:
735 res = lambda p: os.path.join(dest,
735 res = lambda p: os.path.join(dest,
736 os.path.basename(util.localpath(p)))
736 os.path.basename(util.localpath(p)))
737 else:
737 else:
738 res = lambda p: dest
738 res = lambda p: dest
739 return res
739 return res
740
740
741 # pat: ossep
741 # pat: ossep
742 # dest ossep
742 # dest ossep
743 # srcs: list of (hgsep, hgsep, ossep, bool)
743 # srcs: list of (hgsep, hgsep, ossep, bool)
744 # return: function that takes hgsep and returns ossep
744 # return: function that takes hgsep and returns ossep
745 def targetpathafterfn(pat, dest, srcs):
745 def targetpathafterfn(pat, dest, srcs):
746 if matchmod.patkind(pat):
746 if matchmod.patkind(pat):
747 # a mercurial pattern
747 # a mercurial pattern
748 res = lambda p: os.path.join(dest,
748 res = lambda p: os.path.join(dest,
749 os.path.basename(util.localpath(p)))
749 os.path.basename(util.localpath(p)))
750 else:
750 else:
751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
752 if len(abspfx) < len(srcs[0][0]):
752 if len(abspfx) < len(srcs[0][0]):
753 # A directory. Either the target path contains the last
753 # A directory. Either the target path contains the last
754 # component of the source path or it does not.
754 # component of the source path or it does not.
755 def evalpath(striplen):
755 def evalpath(striplen):
756 score = 0
756 score = 0
757 for s in srcs:
757 for s in srcs:
758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
759 if os.path.lexists(t):
759 if os.path.lexists(t):
760 score += 1
760 score += 1
761 return score
761 return score
762
762
763 abspfx = util.localpath(abspfx)
763 abspfx = util.localpath(abspfx)
764 striplen = len(abspfx)
764 striplen = len(abspfx)
765 if striplen:
765 if striplen:
766 striplen += len(os.sep)
766 striplen += len(os.sep)
767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
768 score = evalpath(striplen)
768 score = evalpath(striplen)
769 striplen1 = len(os.path.split(abspfx)[0])
769 striplen1 = len(os.path.split(abspfx)[0])
770 if striplen1:
770 if striplen1:
771 striplen1 += len(os.sep)
771 striplen1 += len(os.sep)
772 if evalpath(striplen1) > score:
772 if evalpath(striplen1) > score:
773 striplen = striplen1
773 striplen = striplen1
774 res = lambda p: os.path.join(dest,
774 res = lambda p: os.path.join(dest,
775 util.localpath(p)[striplen:])
775 util.localpath(p)[striplen:])
776 else:
776 else:
777 # a file
777 # a file
778 if destdirexists:
778 if destdirexists:
779 res = lambda p: os.path.join(dest,
779 res = lambda p: os.path.join(dest,
780 os.path.basename(util.localpath(p)))
780 os.path.basename(util.localpath(p)))
781 else:
781 else:
782 res = lambda p: dest
782 res = lambda p: dest
783 return res
783 return res
784
784
785 pats = scmutil.expandpats(pats)
785 pats = scmutil.expandpats(pats)
786 if not pats:
786 if not pats:
787 raise error.Abort(_('no source or destination specified'))
787 raise error.Abort(_('no source or destination specified'))
788 if len(pats) == 1:
788 if len(pats) == 1:
789 raise error.Abort(_('no destination specified'))
789 raise error.Abort(_('no destination specified'))
790 dest = pats.pop()
790 dest = pats.pop()
791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
791 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
792 if not destdirexists:
792 if not destdirexists:
793 if len(pats) > 1 or matchmod.patkind(pats[0]):
793 if len(pats) > 1 or matchmod.patkind(pats[0]):
794 raise error.Abort(_('with multiple sources, destination must be an '
794 raise error.Abort(_('with multiple sources, destination must be an '
795 'existing directory'))
795 'existing directory'))
796 if util.endswithsep(dest):
796 if util.endswithsep(dest):
797 raise error.Abort(_('destination %s is not a directory') % dest)
797 raise error.Abort(_('destination %s is not a directory') % dest)
798
798
799 tfn = targetpathfn
799 tfn = targetpathfn
800 if after:
800 if after:
801 tfn = targetpathafterfn
801 tfn = targetpathafterfn
802 copylist = []
802 copylist = []
803 for pat in pats:
803 for pat in pats:
804 srcs = walkpat(pat)
804 srcs = walkpat(pat)
805 if not srcs:
805 if not srcs:
806 continue
806 continue
807 copylist.append((tfn(pat, dest, srcs), srcs))
807 copylist.append((tfn(pat, dest, srcs), srcs))
808 if not copylist:
808 if not copylist:
809 raise error.Abort(_('no files to copy'))
809 raise error.Abort(_('no files to copy'))
810
810
811 errors = 0
811 errors = 0
812 for targetpath, srcs in copylist:
812 for targetpath, srcs in copylist:
813 for abssrc, relsrc, exact in srcs:
813 for abssrc, relsrc, exact in srcs:
814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
814 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
815 errors += 1
815 errors += 1
816
816
817 if errors:
817 if errors:
818 ui.warn(_('(consider using --after)\n'))
818 ui.warn(_('(consider using --after)\n'))
819
819
820 return errors != 0
820 return errors != 0
821
821
822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
822 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
823 runargs=None, appendpid=False):
823 runargs=None, appendpid=False):
824 '''Run a command as a service.'''
824 '''Run a command as a service.'''
825
825
826 def writepid(pid):
826 def writepid(pid):
827 if opts['pid_file']:
827 if opts['pid_file']:
828 if appendpid:
828 if appendpid:
829 mode = 'a'
829 mode = 'a'
830 else:
830 else:
831 mode = 'w'
831 mode = 'w'
832 fp = open(opts['pid_file'], mode)
832 fp = open(opts['pid_file'], mode)
833 fp.write(str(pid) + '\n')
833 fp.write(str(pid) + '\n')
834 fp.close()
834 fp.close()
835
835
836 if opts['daemon'] and not opts['daemon_postexec']:
836 if opts['daemon'] and not opts['daemon_postexec']:
837 # Signal child process startup with file removal
837 # Signal child process startup with file removal
838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
838 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
839 os.close(lockfd)
839 os.close(lockfd)
840 try:
840 try:
841 if not runargs:
841 if not runargs:
842 runargs = util.hgcmd() + sys.argv[1:]
842 runargs = util.hgcmd() + sys.argv[1:]
843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
843 runargs.append('--daemon-postexec=unlink:%s' % lockpath)
844 # Don't pass --cwd to the child process, because we've already
844 # Don't pass --cwd to the child process, because we've already
845 # changed directory.
845 # changed directory.
846 for i in xrange(1, len(runargs)):
846 for i in xrange(1, len(runargs)):
847 if runargs[i].startswith('--cwd='):
847 if runargs[i].startswith('--cwd='):
848 del runargs[i]
848 del runargs[i]
849 break
849 break
850 elif runargs[i].startswith('--cwd'):
850 elif runargs[i].startswith('--cwd'):
851 del runargs[i:i + 2]
851 del runargs[i:i + 2]
852 break
852 break
853 def condfn():
853 def condfn():
854 return not os.path.exists(lockpath)
854 return not os.path.exists(lockpath)
855 pid = util.rundetached(runargs, condfn)
855 pid = util.rundetached(runargs, condfn)
856 if pid < 0:
856 if pid < 0:
857 raise error.Abort(_('child process failed to start'))
857 raise error.Abort(_('child process failed to start'))
858 writepid(pid)
858 writepid(pid)
859 finally:
859 finally:
860 try:
860 try:
861 os.unlink(lockpath)
861 os.unlink(lockpath)
862 except OSError as e:
862 except OSError as e:
863 if e.errno != errno.ENOENT:
863 if e.errno != errno.ENOENT:
864 raise
864 raise
865 if parentfn:
865 if parentfn:
866 return parentfn(pid)
866 return parentfn(pid)
867 else:
867 else:
868 return
868 return
869
869
870 if initfn:
870 if initfn:
871 initfn()
871 initfn()
872
872
873 if not opts['daemon']:
873 if not opts['daemon']:
874 writepid(util.getpid())
874 writepid(util.getpid())
875
875
876 if opts['daemon_postexec']:
876 if opts['daemon_postexec']:
877 try:
877 try:
878 os.setsid()
878 os.setsid()
879 except AttributeError:
879 except AttributeError:
880 pass
880 pass
881 for inst in opts['daemon_postexec']:
881 for inst in opts['daemon_postexec']:
882 if inst.startswith('unlink:'):
882 if inst.startswith('unlink:'):
883 lockpath = inst[7:]
883 lockpath = inst[7:]
884 os.unlink(lockpath)
884 os.unlink(lockpath)
885 elif inst.startswith('chdir:'):
885 elif inst.startswith('chdir:'):
886 os.chdir(inst[6:])
886 os.chdir(inst[6:])
887 elif inst != 'none':
887 elif inst != 'none':
888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
888 raise error.Abort(_('invalid value for --daemon-postexec: %s')
889 % inst)
889 % inst)
890 util.hidewindow()
890 util.hidewindow()
891 sys.stdout.flush()
891 sys.stdout.flush()
892 sys.stderr.flush()
892 sys.stderr.flush()
893
893
894 nullfd = os.open(os.devnull, os.O_RDWR)
894 nullfd = os.open(os.devnull, os.O_RDWR)
895 logfilefd = nullfd
895 logfilefd = nullfd
896 if logfile:
896 if logfile:
897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
897 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
898 os.dup2(nullfd, 0)
898 os.dup2(nullfd, 0)
899 os.dup2(logfilefd, 1)
899 os.dup2(logfilefd, 1)
900 os.dup2(logfilefd, 2)
900 os.dup2(logfilefd, 2)
901 if nullfd not in (0, 1, 2):
901 if nullfd not in (0, 1, 2):
902 os.close(nullfd)
902 os.close(nullfd)
903 if logfile and logfilefd not in (0, 1, 2):
903 if logfile and logfilefd not in (0, 1, 2):
904 os.close(logfilefd)
904 os.close(logfilefd)
905
905
906 if runfn:
906 if runfn:
907 return runfn()
907 return runfn()
908
908
909 ## facility to let extension process additional data into an import patch
909 ## facility to let extension process additional data into an import patch
910 # list of identifier to be executed in order
910 # list of identifier to be executed in order
911 extrapreimport = [] # run before commit
911 extrapreimport = [] # run before commit
912 extrapostimport = [] # run after commit
912 extrapostimport = [] # run after commit
913 # mapping from identifier to actual import function
913 # mapping from identifier to actual import function
914 #
914 #
915 # 'preimport' are run before the commit is made and are provided the following
915 # 'preimport' are run before the commit is made and are provided the following
916 # arguments:
916 # arguments:
917 # - repo: the localrepository instance,
917 # - repo: the localrepository instance,
918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
918 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
919 # - extra: the future extra dictionary of the changeset, please mutate it,
919 # - extra: the future extra dictionary of the changeset, please mutate it,
920 # - opts: the import options.
920 # - opts: the import options.
921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
921 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
922 # mutation of in memory commit and more. Feel free to rework the code to get
922 # mutation of in memory commit and more. Feel free to rework the code to get
923 # there.
923 # there.
924 extrapreimportmap = {}
924 extrapreimportmap = {}
925 # 'postimport' are run after the commit is made and are provided the following
925 # 'postimport' are run after the commit is made and are provided the following
926 # argument:
926 # argument:
927 # - ctx: the changectx created by import.
927 # - ctx: the changectx created by import.
928 extrapostimportmap = {}
928 extrapostimportmap = {}
929
929
930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
930 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
931 """Utility function used by commands.import to import a single patch
931 """Utility function used by commands.import to import a single patch
932
932
933 This function is explicitly defined here to help the evolve extension to
933 This function is explicitly defined here to help the evolve extension to
934 wrap this part of the import logic.
934 wrap this part of the import logic.
935
935
936 The API is currently a bit ugly because it a simple code translation from
936 The API is currently a bit ugly because it a simple code translation from
937 the import command. Feel free to make it better.
937 the import command. Feel free to make it better.
938
938
939 :hunk: a patch (as a binary string)
939 :hunk: a patch (as a binary string)
940 :parents: nodes that will be parent of the created commit
940 :parents: nodes that will be parent of the created commit
941 :opts: the full dict of option passed to the import command
941 :opts: the full dict of option passed to the import command
942 :msgs: list to save commit message to.
942 :msgs: list to save commit message to.
943 (used in case we need to save it when failing)
943 (used in case we need to save it when failing)
944 :updatefunc: a function that update a repo to a given node
944 :updatefunc: a function that update a repo to a given node
945 updatefunc(<repo>, <node>)
945 updatefunc(<repo>, <node>)
946 """
946 """
947 # avoid cycle context -> subrepo -> cmdutil
947 # avoid cycle context -> subrepo -> cmdutil
948 from . import context
948 from . import context
949 extractdata = patch.extract(ui, hunk)
949 extractdata = patch.extract(ui, hunk)
950 tmpname = extractdata.get('filename')
950 tmpname = extractdata.get('filename')
951 message = extractdata.get('message')
951 message = extractdata.get('message')
952 user = opts.get('user') or extractdata.get('user')
952 user = opts.get('user') or extractdata.get('user')
953 date = opts.get('date') or extractdata.get('date')
953 date = opts.get('date') or extractdata.get('date')
954 branch = extractdata.get('branch')
954 branch = extractdata.get('branch')
955 nodeid = extractdata.get('nodeid')
955 nodeid = extractdata.get('nodeid')
956 p1 = extractdata.get('p1')
956 p1 = extractdata.get('p1')
957 p2 = extractdata.get('p2')
957 p2 = extractdata.get('p2')
958
958
959 nocommit = opts.get('no_commit')
959 nocommit = opts.get('no_commit')
960 importbranch = opts.get('import_branch')
960 importbranch = opts.get('import_branch')
961 update = not opts.get('bypass')
961 update = not opts.get('bypass')
962 strip = opts["strip"]
962 strip = opts["strip"]
963 prefix = opts["prefix"]
963 prefix = opts["prefix"]
964 sim = float(opts.get('similarity') or 0)
964 sim = float(opts.get('similarity') or 0)
965 if not tmpname:
965 if not tmpname:
966 return (None, None, False)
966 return (None, None, False)
967
967
968 rejects = False
968 rejects = False
969
969
970 try:
970 try:
971 cmdline_message = logmessage(ui, opts)
971 cmdline_message = logmessage(ui, opts)
972 if cmdline_message:
972 if cmdline_message:
973 # pickup the cmdline msg
973 # pickup the cmdline msg
974 message = cmdline_message
974 message = cmdline_message
975 elif message:
975 elif message:
976 # pickup the patch msg
976 # pickup the patch msg
977 message = message.strip()
977 message = message.strip()
978 else:
978 else:
979 # launch the editor
979 # launch the editor
980 message = None
980 message = None
981 ui.debug('message:\n%s\n' % message)
981 ui.debug('message:\n%s\n' % message)
982
982
983 if len(parents) == 1:
983 if len(parents) == 1:
984 parents.append(repo[nullid])
984 parents.append(repo[nullid])
985 if opts.get('exact'):
985 if opts.get('exact'):
986 if not nodeid or not p1:
986 if not nodeid or not p1:
987 raise error.Abort(_('not a Mercurial patch'))
987 raise error.Abort(_('not a Mercurial patch'))
988 p1 = repo[p1]
988 p1 = repo[p1]
989 p2 = repo[p2 or nullid]
989 p2 = repo[p2 or nullid]
990 elif p2:
990 elif p2:
991 try:
991 try:
992 p1 = repo[p1]
992 p1 = repo[p1]
993 p2 = repo[p2]
993 p2 = repo[p2]
994 # Without any options, consider p2 only if the
994 # Without any options, consider p2 only if the
995 # patch is being applied on top of the recorded
995 # patch is being applied on top of the recorded
996 # first parent.
996 # first parent.
997 if p1 != parents[0]:
997 if p1 != parents[0]:
998 p1 = parents[0]
998 p1 = parents[0]
999 p2 = repo[nullid]
999 p2 = repo[nullid]
1000 except error.RepoError:
1000 except error.RepoError:
1001 p1, p2 = parents
1001 p1, p2 = parents
1002 if p2.node() == nullid:
1002 if p2.node() == nullid:
1003 ui.warn(_("warning: import the patch as a normal revision\n"
1003 ui.warn(_("warning: import the patch as a normal revision\n"
1004 "(use --exact to import the patch as a merge)\n"))
1004 "(use --exact to import the patch as a merge)\n"))
1005 else:
1005 else:
1006 p1, p2 = parents
1006 p1, p2 = parents
1007
1007
1008 n = None
1008 n = None
1009 if update:
1009 if update:
1010 if p1 != parents[0]:
1010 if p1 != parents[0]:
1011 updatefunc(repo, p1.node())
1011 updatefunc(repo, p1.node())
1012 if p2 != parents[1]:
1012 if p2 != parents[1]:
1013 repo.setparents(p1.node(), p2.node())
1013 repo.setparents(p1.node(), p2.node())
1014
1014
1015 if opts.get('exact') or importbranch:
1015 if opts.get('exact') or importbranch:
1016 repo.dirstate.setbranch(branch or 'default')
1016 repo.dirstate.setbranch(branch or 'default')
1017
1017
1018 partial = opts.get('partial', False)
1018 partial = opts.get('partial', False)
1019 files = set()
1019 files = set()
1020 try:
1020 try:
1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1021 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1022 files=files, eolmode=None, similarity=sim / 100.0)
1022 files=files, eolmode=None, similarity=sim / 100.0)
1023 except patch.PatchError as e:
1023 except patch.PatchError as e:
1024 if not partial:
1024 if not partial:
1025 raise error.Abort(str(e))
1025 raise error.Abort(str(e))
1026 if partial:
1026 if partial:
1027 rejects = True
1027 rejects = True
1028
1028
1029 files = list(files)
1029 files = list(files)
1030 if nocommit:
1030 if nocommit:
1031 if message:
1031 if message:
1032 msgs.append(message)
1032 msgs.append(message)
1033 else:
1033 else:
1034 if opts.get('exact') or p2:
1034 if opts.get('exact') or p2:
1035 # If you got here, you either use --force and know what
1035 # If you got here, you either use --force and know what
1036 # you are doing or used --exact or a merge patch while
1036 # you are doing or used --exact or a merge patch while
1037 # being updated to its first parent.
1037 # being updated to its first parent.
1038 m = None
1038 m = None
1039 else:
1039 else:
1040 m = scmutil.matchfiles(repo, files or [])
1040 m = scmutil.matchfiles(repo, files or [])
1041 editform = mergeeditform(repo[None], 'import.normal')
1041 editform = mergeeditform(repo[None], 'import.normal')
1042 if opts.get('exact'):
1042 if opts.get('exact'):
1043 editor = None
1043 editor = None
1044 else:
1044 else:
1045 editor = getcommiteditor(editform=editform, **opts)
1045 editor = getcommiteditor(editform=editform, **opts)
1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1046 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
1047 extra = {}
1047 extra = {}
1048 for idfunc in extrapreimport:
1048 for idfunc in extrapreimport:
1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1049 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1050 try:
1050 try:
1051 if partial:
1051 if partial:
1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1052 repo.ui.setconfig('ui', 'allowemptycommit', True)
1053 n = repo.commit(message, user,
1053 n = repo.commit(message, user,
1054 date, match=m,
1054 date, match=m,
1055 editor=editor, extra=extra)
1055 editor=editor, extra=extra)
1056 for idfunc in extrapostimport:
1056 for idfunc in extrapostimport:
1057 extrapostimportmap[idfunc](repo[n])
1057 extrapostimportmap[idfunc](repo[n])
1058 finally:
1058 finally:
1059 repo.ui.restoreconfig(allowemptyback)
1059 repo.ui.restoreconfig(allowemptyback)
1060 else:
1060 else:
1061 if opts.get('exact') or importbranch:
1061 if opts.get('exact') or importbranch:
1062 branch = branch or 'default'
1062 branch = branch or 'default'
1063 else:
1063 else:
1064 branch = p1.branch()
1064 branch = p1.branch()
1065 store = patch.filestore()
1065 store = patch.filestore()
1066 try:
1066 try:
1067 files = set()
1067 files = set()
1068 try:
1068 try:
1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1069 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1070 files, eolmode=None)
1070 files, eolmode=None)
1071 except patch.PatchError as e:
1071 except patch.PatchError as e:
1072 raise error.Abort(str(e))
1072 raise error.Abort(str(e))
1073 if opts.get('exact'):
1073 if opts.get('exact'):
1074 editor = None
1074 editor = None
1075 else:
1075 else:
1076 editor = getcommiteditor(editform='import.bypass')
1076 editor = getcommiteditor(editform='import.bypass')
1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1077 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1078 message,
1078 message,
1079 user,
1079 user,
1080 date,
1080 date,
1081 branch, files, store,
1081 branch, files, store,
1082 editor=editor)
1082 editor=editor)
1083 n = memctx.commit()
1083 n = memctx.commit()
1084 finally:
1084 finally:
1085 store.close()
1085 store.close()
1086 if opts.get('exact') and nocommit:
1086 if opts.get('exact') and nocommit:
1087 # --exact with --no-commit is still useful in that it does merge
1087 # --exact with --no-commit is still useful in that it does merge
1088 # and branch bits
1088 # and branch bits
1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1089 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1090 elif opts.get('exact') and hex(n) != nodeid:
1090 elif opts.get('exact') and hex(n) != nodeid:
1091 raise error.Abort(_('patch is damaged or loses information'))
1091 raise error.Abort(_('patch is damaged or loses information'))
1092 msg = _('applied to working directory')
1092 msg = _('applied to working directory')
1093 if n:
1093 if n:
1094 # i18n: refers to a short changeset id
1094 # i18n: refers to a short changeset id
1095 msg = _('created %s') % short(n)
1095 msg = _('created %s') % short(n)
1096 return (msg, n, rejects)
1096 return (msg, n, rejects)
1097 finally:
1097 finally:
1098 os.unlink(tmpname)
1098 os.unlink(tmpname)
1099
1099
1100 # facility to let extensions include additional data in an exported patch
1100 # facility to let extensions include additional data in an exported patch
1101 # list of identifiers to be executed in order
1101 # list of identifiers to be executed in order
1102 extraexport = []
1102 extraexport = []
1103 # mapping from identifier to actual export function
1103 # mapping from identifier to actual export function
1104 # function as to return a string to be added to the header or None
1104 # function as to return a string to be added to the header or None
1105 # it is given two arguments (sequencenumber, changectx)
1105 # it is given two arguments (sequencenumber, changectx)
1106 extraexportmap = {}
1106 extraexportmap = {}
1107
1107
1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1108 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1109 opts=None, match=None):
1109 opts=None, match=None):
1110 '''export changesets as hg patches.'''
1110 '''export changesets as hg patches.'''
1111
1111
1112 total = len(revs)
1112 total = len(revs)
1113 revwidth = max([len(str(rev)) for rev in revs])
1113 revwidth = max([len(str(rev)) for rev in revs])
1114 filemode = {}
1114 filemode = {}
1115
1115
1116 def single(rev, seqno, fp):
1116 def single(rev, seqno, fp):
1117 ctx = repo[rev]
1117 ctx = repo[rev]
1118 node = ctx.node()
1118 node = ctx.node()
1119 parents = [p.node() for p in ctx.parents() if p]
1119 parents = [p.node() for p in ctx.parents() if p]
1120 branch = ctx.branch()
1120 branch = ctx.branch()
1121 if switch_parent:
1121 if switch_parent:
1122 parents.reverse()
1122 parents.reverse()
1123
1123
1124 if parents:
1124 if parents:
1125 prev = parents[0]
1125 prev = parents[0]
1126 else:
1126 else:
1127 prev = nullid
1127 prev = nullid
1128
1128
1129 shouldclose = False
1129 shouldclose = False
1130 if not fp and len(template) > 0:
1130 if not fp and len(template) > 0:
1131 desc_lines = ctx.description().rstrip().split('\n')
1131 desc_lines = ctx.description().rstrip().split('\n')
1132 desc = desc_lines[0] #Commit always has a first line.
1132 desc = desc_lines[0] #Commit always has a first line.
1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1133 fp = makefileobj(repo, template, node, desc=desc, total=total,
1134 seqno=seqno, revwidth=revwidth, mode='wb',
1134 seqno=seqno, revwidth=revwidth, mode='wb',
1135 modemap=filemode)
1135 modemap=filemode)
1136 shouldclose = True
1136 shouldclose = True
1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1137 if fp and not getattr(fp, 'name', '<unnamed>').startswith('<'):
1138 repo.ui.note("%s\n" % fp.name)
1138 repo.ui.note("%s\n" % fp.name)
1139
1139
1140 if not fp:
1140 if not fp:
1141 write = repo.ui.write
1141 write = repo.ui.write
1142 else:
1142 else:
1143 def write(s, **kw):
1143 def write(s, **kw):
1144 fp.write(s)
1144 fp.write(s)
1145
1145
1146 write("# HG changeset patch\n")
1146 write("# HG changeset patch\n")
1147 write("# User %s\n" % ctx.user())
1147 write("# User %s\n" % ctx.user())
1148 write("# Date %d %d\n" % ctx.date())
1148 write("# Date %d %d\n" % ctx.date())
1149 write("# %s\n" % util.datestr(ctx.date()))
1149 write("# %s\n" % util.datestr(ctx.date()))
1150 if branch and branch != 'default':
1150 if branch and branch != 'default':
1151 write("# Branch %s\n" % branch)
1151 write("# Branch %s\n" % branch)
1152 write("# Node ID %s\n" % hex(node))
1152 write("# Node ID %s\n" % hex(node))
1153 write("# Parent %s\n" % hex(prev))
1153 write("# Parent %s\n" % hex(prev))
1154 if len(parents) > 1:
1154 if len(parents) > 1:
1155 write("# Parent %s\n" % hex(parents[1]))
1155 write("# Parent %s\n" % hex(parents[1]))
1156
1156
1157 for headerid in extraexport:
1157 for headerid in extraexport:
1158 header = extraexportmap[headerid](seqno, ctx)
1158 header = extraexportmap[headerid](seqno, ctx)
1159 if header is not None:
1159 if header is not None:
1160 write('# %s\n' % header)
1160 write('# %s\n' % header)
1161 write(ctx.description().rstrip())
1161 write(ctx.description().rstrip())
1162 write("\n\n")
1162 write("\n\n")
1163
1163
1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1164 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1165 write(chunk, label=label)
1165 write(chunk, label=label)
1166
1166
1167 if shouldclose:
1167 if shouldclose:
1168 fp.close()
1168 fp.close()
1169
1169
1170 for seqno, rev in enumerate(revs):
1170 for seqno, rev in enumerate(revs):
1171 single(rev, seqno + 1, fp)
1171 single(rev, seqno + 1, fp)
1172
1172
1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1173 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1174 changes=None, stat=False, fp=None, prefix='',
1174 changes=None, stat=False, fp=None, prefix='',
1175 root='', listsubrepos=False):
1175 root='', listsubrepos=False):
1176 '''show diff or diffstat.'''
1176 '''show diff or diffstat.'''
1177 if fp is None:
1177 if fp is None:
1178 write = ui.write
1178 write = ui.write
1179 else:
1179 else:
1180 def write(s, **kw):
1180 def write(s, **kw):
1181 fp.write(s)
1181 fp.write(s)
1182
1182
1183 if root:
1183 if root:
1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1184 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1185 else:
1185 else:
1186 relroot = ''
1186 relroot = ''
1187 if relroot != '':
1187 if relroot != '':
1188 # XXX relative roots currently don't work if the root is within a
1188 # XXX relative roots currently don't work if the root is within a
1189 # subrepo
1189 # subrepo
1190 uirelroot = match.uipath(relroot)
1190 uirelroot = match.uipath(relroot)
1191 relroot += '/'
1191 relroot += '/'
1192 for matchroot in match.files():
1192 for matchroot in match.files():
1193 if not matchroot.startswith(relroot):
1193 if not matchroot.startswith(relroot):
1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1194 ui.warn(_('warning: %s not inside relative root %s\n') % (
1195 match.uipath(matchroot), uirelroot))
1195 match.uipath(matchroot), uirelroot))
1196
1196
1197 if stat:
1197 if stat:
1198 diffopts = diffopts.copy(context=0)
1198 diffopts = diffopts.copy(context=0)
1199 width = 80
1199 width = 80
1200 if not ui.plain():
1200 if not ui.plain():
1201 width = ui.termwidth()
1201 width = ui.termwidth()
1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1202 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1203 prefix=prefix, relroot=relroot)
1203 prefix=prefix, relroot=relroot)
1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1204 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1205 width=width,
1205 width=width,
1206 git=diffopts.git):
1206 git=diffopts.git):
1207 write(chunk, label=label)
1207 write(chunk, label=label)
1208 else:
1208 else:
1209 for chunk, label in patch.diffui(repo, node1, node2, match,
1209 for chunk, label in patch.diffui(repo, node1, node2, match,
1210 changes, diffopts, prefix=prefix,
1210 changes, diffopts, prefix=prefix,
1211 relroot=relroot):
1211 relroot=relroot):
1212 write(chunk, label=label)
1212 write(chunk, label=label)
1213
1213
1214 if listsubrepos:
1214 if listsubrepos:
1215 ctx1 = repo[node1]
1215 ctx1 = repo[node1]
1216 ctx2 = repo[node2]
1216 ctx2 = repo[node2]
1217 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1217 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1218 tempnode2 = node2
1218 tempnode2 = node2
1219 try:
1219 try:
1220 if node2 is not None:
1220 if node2 is not None:
1221 tempnode2 = ctx2.substate[subpath][1]
1221 tempnode2 = ctx2.substate[subpath][1]
1222 except KeyError:
1222 except KeyError:
1223 # A subrepo that existed in node1 was deleted between node1 and
1223 # A subrepo that existed in node1 was deleted between node1 and
1224 # node2 (inclusive). Thus, ctx2's substate won't contain that
1224 # node2 (inclusive). Thus, ctx2's substate won't contain that
1225 # subpath. The best we can do is to ignore it.
1225 # subpath. The best we can do is to ignore it.
1226 tempnode2 = None
1226 tempnode2 = None
1227 submatch = matchmod.subdirmatcher(subpath, match)
1227 submatch = matchmod.subdirmatcher(subpath, match)
1228 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1228 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1229 stat=stat, fp=fp, prefix=prefix)
1229 stat=stat, fp=fp, prefix=prefix)
1230
1230
1231 class changeset_printer(object):
1231 class changeset_printer(object):
1232 '''show changeset information when templating not requested.'''
1232 '''show changeset information when templating not requested.'''
1233
1233
1234 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1234 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1235 self.ui = ui
1235 self.ui = ui
1236 self.repo = repo
1236 self.repo = repo
1237 self.buffered = buffered
1237 self.buffered = buffered
1238 self.matchfn = matchfn
1238 self.matchfn = matchfn
1239 self.diffopts = diffopts
1239 self.diffopts = diffopts
1240 self.header = {}
1240 self.header = {}
1241 self.hunk = {}
1241 self.hunk = {}
1242 self.lastheader = None
1242 self.lastheader = None
1243 self.footer = None
1243 self.footer = None
1244
1244
1245 def flush(self, ctx):
1245 def flush(self, ctx):
1246 rev = ctx.rev()
1246 rev = ctx.rev()
1247 if rev in self.header:
1247 if rev in self.header:
1248 h = self.header[rev]
1248 h = self.header[rev]
1249 if h != self.lastheader:
1249 if h != self.lastheader:
1250 self.lastheader = h
1250 self.lastheader = h
1251 self.ui.write(h)
1251 self.ui.write(h)
1252 del self.header[rev]
1252 del self.header[rev]
1253 if rev in self.hunk:
1253 if rev in self.hunk:
1254 self.ui.write(self.hunk[rev])
1254 self.ui.write(self.hunk[rev])
1255 del self.hunk[rev]
1255 del self.hunk[rev]
1256 return 1
1256 return 1
1257 return 0
1257 return 0
1258
1258
1259 def close(self):
1259 def close(self):
1260 if self.footer:
1260 if self.footer:
1261 self.ui.write(self.footer)
1261 self.ui.write(self.footer)
1262
1262
1263 def show(self, ctx, copies=None, matchfn=None, **props):
1263 def show(self, ctx, copies=None, matchfn=None, **props):
1264 if self.buffered:
1264 if self.buffered:
1265 self.ui.pushbuffer(labeled=True)
1265 self.ui.pushbuffer(labeled=True)
1266 self._show(ctx, copies, matchfn, props)
1266 self._show(ctx, copies, matchfn, props)
1267 self.hunk[ctx.rev()] = self.ui.popbuffer()
1267 self.hunk[ctx.rev()] = self.ui.popbuffer()
1268 else:
1268 else:
1269 self._show(ctx, copies, matchfn, props)
1269 self._show(ctx, copies, matchfn, props)
1270
1270
1271 def _show(self, ctx, copies, matchfn, props):
1271 def _show(self, ctx, copies, matchfn, props):
1272 '''show a single changeset or file revision'''
1272 '''show a single changeset or file revision'''
1273 changenode = ctx.node()
1273 changenode = ctx.node()
1274 rev = ctx.rev()
1274 rev = ctx.rev()
1275 if self.ui.debugflag:
1275 if self.ui.debugflag:
1276 hexfunc = hex
1276 hexfunc = hex
1277 else:
1277 else:
1278 hexfunc = short
1278 hexfunc = short
1279 # as of now, wctx.node() and wctx.rev() return None, but we want to
1279 # as of now, wctx.node() and wctx.rev() return None, but we want to
1280 # show the same values as {node} and {rev} templatekw
1280 # show the same values as {node} and {rev} templatekw
1281 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1281 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1282
1282
1283 if self.ui.quiet:
1283 if self.ui.quiet:
1284 self.ui.write("%d:%s\n" % revnode, label='log.node')
1284 self.ui.write("%d:%s\n" % revnode, label='log.node')
1285 return
1285 return
1286
1286
1287 date = util.datestr(ctx.date())
1287 date = util.datestr(ctx.date())
1288
1288
1289 # i18n: column positioning for "hg log"
1289 # i18n: column positioning for "hg log"
1290 self.ui.write(_("changeset: %d:%s\n") % revnode,
1290 self.ui.write(_("changeset: %d:%s\n") % revnode,
1291 label='log.changeset changeset.%s' % ctx.phasestr())
1291 label='log.changeset changeset.%s' % ctx.phasestr())
1292
1292
1293 # branches are shown first before any other names due to backwards
1293 # branches are shown first before any other names due to backwards
1294 # compatibility
1294 # compatibility
1295 branch = ctx.branch()
1295 branch = ctx.branch()
1296 # don't show the default branch name
1296 # don't show the default branch name
1297 if branch != 'default':
1297 if branch != 'default':
1298 # i18n: column positioning for "hg log"
1298 # i18n: column positioning for "hg log"
1299 self.ui.write(_("branch: %s\n") % branch,
1299 self.ui.write(_("branch: %s\n") % branch,
1300 label='log.branch')
1300 label='log.branch')
1301
1301
1302 for nsname, ns in self.repo.names.iteritems():
1302 for nsname, ns in self.repo.names.iteritems():
1303 # branches has special logic already handled above, so here we just
1303 # branches has special logic already handled above, so here we just
1304 # skip it
1304 # skip it
1305 if nsname == 'branches':
1305 if nsname == 'branches':
1306 continue
1306 continue
1307 # we will use the templatename as the color name since those two
1307 # we will use the templatename as the color name since those two
1308 # should be the same
1308 # should be the same
1309 for name in ns.names(self.repo, changenode):
1309 for name in ns.names(self.repo, changenode):
1310 self.ui.write(ns.logfmt % name,
1310 self.ui.write(ns.logfmt % name,
1311 label='log.%s' % ns.colorname)
1311 label='log.%s' % ns.colorname)
1312 if self.ui.debugflag:
1312 if self.ui.debugflag:
1313 # i18n: column positioning for "hg log"
1313 # i18n: column positioning for "hg log"
1314 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1314 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1315 label='log.phase')
1315 label='log.phase')
1316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1316 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1317 label = 'log.parent changeset.%s' % pctx.phasestr()
1317 label = 'log.parent changeset.%s' % pctx.phasestr()
1318 # i18n: column positioning for "hg log"
1318 # i18n: column positioning for "hg log"
1319 self.ui.write(_("parent: %d:%s\n")
1319 self.ui.write(_("parent: %d:%s\n")
1320 % (pctx.rev(), hexfunc(pctx.node())),
1320 % (pctx.rev(), hexfunc(pctx.node())),
1321 label=label)
1321 label=label)
1322
1322
1323 if self.ui.debugflag and rev is not None:
1323 if self.ui.debugflag and rev is not None:
1324 mnode = ctx.manifestnode()
1324 mnode = ctx.manifestnode()
1325 # i18n: column positioning for "hg log"
1325 # i18n: column positioning for "hg log"
1326 self.ui.write(_("manifest: %d:%s\n") %
1326 self.ui.write(_("manifest: %d:%s\n") %
1327 (self.repo.manifest.rev(mnode), hex(mnode)),
1327 (self.repo.manifest.rev(mnode), hex(mnode)),
1328 label='ui.debug log.manifest')
1328 label='ui.debug log.manifest')
1329 # i18n: column positioning for "hg log"
1329 # i18n: column positioning for "hg log"
1330 self.ui.write(_("user: %s\n") % ctx.user(),
1330 self.ui.write(_("user: %s\n") % ctx.user(),
1331 label='log.user')
1331 label='log.user')
1332 # i18n: column positioning for "hg log"
1332 # i18n: column positioning for "hg log"
1333 self.ui.write(_("date: %s\n") % date,
1333 self.ui.write(_("date: %s\n") % date,
1334 label='log.date')
1334 label='log.date')
1335
1335
1336 if self.ui.debugflag:
1336 if self.ui.debugflag:
1337 files = ctx.p1().status(ctx)[:3]
1337 files = ctx.p1().status(ctx)[:3]
1338 for key, value in zip([# i18n: column positioning for "hg log"
1338 for key, value in zip([# i18n: column positioning for "hg log"
1339 _("files:"),
1339 _("files:"),
1340 # i18n: column positioning for "hg log"
1340 # i18n: column positioning for "hg log"
1341 _("files+:"),
1341 _("files+:"),
1342 # i18n: column positioning for "hg log"
1342 # i18n: column positioning for "hg log"
1343 _("files-:")], files):
1343 _("files-:")], files):
1344 if value:
1344 if value:
1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1345 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1346 label='ui.debug log.files')
1346 label='ui.debug log.files')
1347 elif ctx.files() and self.ui.verbose:
1347 elif ctx.files() and self.ui.verbose:
1348 # i18n: column positioning for "hg log"
1348 # i18n: column positioning for "hg log"
1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1349 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1350 label='ui.note log.files')
1350 label='ui.note log.files')
1351 if copies and self.ui.verbose:
1351 if copies and self.ui.verbose:
1352 copies = ['%s (%s)' % c for c in copies]
1352 copies = ['%s (%s)' % c for c in copies]
1353 # i18n: column positioning for "hg log"
1353 # i18n: column positioning for "hg log"
1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1354 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1355 label='ui.note log.copies')
1355 label='ui.note log.copies')
1356
1356
1357 extra = ctx.extra()
1357 extra = ctx.extra()
1358 if extra and self.ui.debugflag:
1358 if extra and self.ui.debugflag:
1359 for key, value in sorted(extra.items()):
1359 for key, value in sorted(extra.items()):
1360 # i18n: column positioning for "hg log"
1360 # i18n: column positioning for "hg log"
1361 self.ui.write(_("extra: %s=%s\n")
1361 self.ui.write(_("extra: %s=%s\n")
1362 % (key, value.encode('string_escape')),
1362 % (key, value.encode('string_escape')),
1363 label='ui.debug log.extra')
1363 label='ui.debug log.extra')
1364
1364
1365 description = ctx.description().strip()
1365 description = ctx.description().strip()
1366 if description:
1366 if description:
1367 if self.ui.verbose:
1367 if self.ui.verbose:
1368 self.ui.write(_("description:\n"),
1368 self.ui.write(_("description:\n"),
1369 label='ui.note log.description')
1369 label='ui.note log.description')
1370 self.ui.write(description,
1370 self.ui.write(description,
1371 label='ui.note log.description')
1371 label='ui.note log.description')
1372 self.ui.write("\n\n")
1372 self.ui.write("\n\n")
1373 else:
1373 else:
1374 # i18n: column positioning for "hg log"
1374 # i18n: column positioning for "hg log"
1375 self.ui.write(_("summary: %s\n") %
1375 self.ui.write(_("summary: %s\n") %
1376 description.splitlines()[0],
1376 description.splitlines()[0],
1377 label='log.summary')
1377 label='log.summary')
1378 self.ui.write("\n")
1378 self.ui.write("\n")
1379
1379
1380 self.showpatch(ctx, matchfn)
1380 self.showpatch(ctx, matchfn)
1381
1381
1382 def showpatch(self, ctx, matchfn):
1382 def showpatch(self, ctx, matchfn):
1383 if not matchfn:
1383 if not matchfn:
1384 matchfn = self.matchfn
1384 matchfn = self.matchfn
1385 if matchfn:
1385 if matchfn:
1386 stat = self.diffopts.get('stat')
1386 stat = self.diffopts.get('stat')
1387 diff = self.diffopts.get('patch')
1387 diff = self.diffopts.get('patch')
1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1388 diffopts = patch.diffallopts(self.ui, self.diffopts)
1389 node = ctx.node()
1389 node = ctx.node()
1390 prev = ctx.p1().node()
1390 prev = ctx.p1().node()
1391 if stat:
1391 if stat:
1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1392 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1393 match=matchfn, stat=True)
1393 match=matchfn, stat=True)
1394 if diff:
1394 if diff:
1395 if stat:
1395 if stat:
1396 self.ui.write("\n")
1396 self.ui.write("\n")
1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1397 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1398 match=matchfn, stat=False)
1398 match=matchfn, stat=False)
1399 self.ui.write("\n")
1399 self.ui.write("\n")
1400
1400
1401 class jsonchangeset(changeset_printer):
1401 class jsonchangeset(changeset_printer):
1402 '''format changeset information.'''
1402 '''format changeset information.'''
1403
1403
1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1404 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1405 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1406 self.cache = {}
1406 self.cache = {}
1407 self._first = True
1407 self._first = True
1408
1408
1409 def close(self):
1409 def close(self):
1410 if not self._first:
1410 if not self._first:
1411 self.ui.write("\n]\n")
1411 self.ui.write("\n]\n")
1412 else:
1412 else:
1413 self.ui.write("[]\n")
1413 self.ui.write("[]\n")
1414
1414
1415 def _show(self, ctx, copies, matchfn, props):
1415 def _show(self, ctx, copies, matchfn, props):
1416 '''show a single changeset or file revision'''
1416 '''show a single changeset or file revision'''
1417 rev = ctx.rev()
1417 rev = ctx.rev()
1418 if rev is None:
1418 if rev is None:
1419 jrev = jnode = 'null'
1419 jrev = jnode = 'null'
1420 else:
1420 else:
1421 jrev = str(rev)
1421 jrev = str(rev)
1422 jnode = '"%s"' % hex(ctx.node())
1422 jnode = '"%s"' % hex(ctx.node())
1423 j = encoding.jsonescape
1423 j = encoding.jsonescape
1424
1424
1425 if self._first:
1425 if self._first:
1426 self.ui.write("[\n {")
1426 self.ui.write("[\n {")
1427 self._first = False
1427 self._first = False
1428 else:
1428 else:
1429 self.ui.write(",\n {")
1429 self.ui.write(",\n {")
1430
1430
1431 if self.ui.quiet:
1431 if self.ui.quiet:
1432 self.ui.write(('\n "rev": %s') % jrev)
1432 self.ui.write(('\n "rev": %s') % jrev)
1433 self.ui.write((',\n "node": %s') % jnode)
1433 self.ui.write((',\n "node": %s') % jnode)
1434 self.ui.write('\n }')
1434 self.ui.write('\n }')
1435 return
1435 return
1436
1436
1437 self.ui.write(('\n "rev": %s') % jrev)
1437 self.ui.write(('\n "rev": %s') % jrev)
1438 self.ui.write((',\n "node": %s') % jnode)
1438 self.ui.write((',\n "node": %s') % jnode)
1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1439 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1440 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1441 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1442 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1443 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1444
1444
1445 self.ui.write((',\n "bookmarks": [%s]') %
1445 self.ui.write((',\n "bookmarks": [%s]') %
1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1446 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1447 self.ui.write((',\n "tags": [%s]') %
1447 self.ui.write((',\n "tags": [%s]') %
1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1448 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1449 self.ui.write((',\n "parents": [%s]') %
1449 self.ui.write((',\n "parents": [%s]') %
1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1450 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1451
1451
1452 if self.ui.debugflag:
1452 if self.ui.debugflag:
1453 if rev is None:
1453 if rev is None:
1454 jmanifestnode = 'null'
1454 jmanifestnode = 'null'
1455 else:
1455 else:
1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1456 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1457 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1458
1458
1459 self.ui.write((',\n "extra": {%s}') %
1459 self.ui.write((',\n "extra": {%s}') %
1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1460 ", ".join('"%s": "%s"' % (j(k), j(v))
1461 for k, v in ctx.extra().items()))
1461 for k, v in ctx.extra().items()))
1462
1462
1463 files = ctx.p1().status(ctx)
1463 files = ctx.p1().status(ctx)
1464 self.ui.write((',\n "modified": [%s]') %
1464 self.ui.write((',\n "modified": [%s]') %
1465 ", ".join('"%s"' % j(f) for f in files[0]))
1465 ", ".join('"%s"' % j(f) for f in files[0]))
1466 self.ui.write((',\n "added": [%s]') %
1466 self.ui.write((',\n "added": [%s]') %
1467 ", ".join('"%s"' % j(f) for f in files[1]))
1467 ", ".join('"%s"' % j(f) for f in files[1]))
1468 self.ui.write((',\n "removed": [%s]') %
1468 self.ui.write((',\n "removed": [%s]') %
1469 ", ".join('"%s"' % j(f) for f in files[2]))
1469 ", ".join('"%s"' % j(f) for f in files[2]))
1470
1470
1471 elif self.ui.verbose:
1471 elif self.ui.verbose:
1472 self.ui.write((',\n "files": [%s]') %
1472 self.ui.write((',\n "files": [%s]') %
1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1473 ", ".join('"%s"' % j(f) for f in ctx.files()))
1474
1474
1475 if copies:
1475 if copies:
1476 self.ui.write((',\n "copies": {%s}') %
1476 self.ui.write((',\n "copies": {%s}') %
1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1477 ", ".join('"%s": "%s"' % (j(k), j(v))
1478 for k, v in copies))
1478 for k, v in copies))
1479
1479
1480 matchfn = self.matchfn
1480 matchfn = self.matchfn
1481 if matchfn:
1481 if matchfn:
1482 stat = self.diffopts.get('stat')
1482 stat = self.diffopts.get('stat')
1483 diff = self.diffopts.get('patch')
1483 diff = self.diffopts.get('patch')
1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1484 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1485 node, prev = ctx.node(), ctx.p1().node()
1485 node, prev = ctx.node(), ctx.p1().node()
1486 if stat:
1486 if stat:
1487 self.ui.pushbuffer()
1487 self.ui.pushbuffer()
1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1488 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1489 match=matchfn, stat=True)
1489 match=matchfn, stat=True)
1490 self.ui.write((',\n "diffstat": "%s"')
1490 self.ui.write((',\n "diffstat": "%s"')
1491 % j(self.ui.popbuffer()))
1491 % j(self.ui.popbuffer()))
1492 if diff:
1492 if diff:
1493 self.ui.pushbuffer()
1493 self.ui.pushbuffer()
1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1494 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1495 match=matchfn, stat=False)
1495 match=matchfn, stat=False)
1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1496 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1497
1497
1498 self.ui.write("\n }")
1498 self.ui.write("\n }")
1499
1499
1500 class changeset_templater(changeset_printer):
1500 class changeset_templater(changeset_printer):
1501 '''format changeset information.'''
1501 '''format changeset information.'''
1502
1502
1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1503 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1504 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1505 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1506 filters = {'formatnode': formatnode}
1506 filters = {'formatnode': formatnode}
1507 defaulttempl = {
1507 defaulttempl = {
1508 'parent': '{rev}:{node|formatnode} ',
1508 'parent': '{rev}:{node|formatnode} ',
1509 'manifest': '{rev}:{node|formatnode}',
1509 'manifest': '{rev}:{node|formatnode}',
1510 'file_copy': '{name} ({source})',
1510 'file_copy': '{name} ({source})',
1511 'extra': '{key}={value|stringescape}'
1511 'extra': '{key}={value|stringescape}'
1512 }
1512 }
1513 # filecopy is preserved for compatibility reasons
1513 # filecopy is preserved for compatibility reasons
1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1514 defaulttempl['filecopy'] = defaulttempl['file_copy']
1515 assert not (tmpl and mapfile)
1515 assert not (tmpl and mapfile)
1516 if mapfile:
1516 if mapfile:
1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1517 self.t = templater.templater.frommapfile(mapfile, filters=filters,
1518 cache=defaulttempl)
1518 cache=defaulttempl)
1519 else:
1519 else:
1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1520 self.t = formatter.maketemplater(ui, 'changeset', tmpl,
1521 filters=filters,
1521 filters=filters,
1522 cache=defaulttempl)
1522 cache=defaulttempl)
1523
1523
1524 self.cache = {}
1524 self.cache = {}
1525
1525
1526 # find correct templates for current mode
1526 # find correct templates for current mode
1527 tmplmodes = [
1527 tmplmodes = [
1528 (True, None),
1528 (True, None),
1529 (self.ui.verbose, 'verbose'),
1529 (self.ui.verbose, 'verbose'),
1530 (self.ui.quiet, 'quiet'),
1530 (self.ui.quiet, 'quiet'),
1531 (self.ui.debugflag, 'debug'),
1531 (self.ui.debugflag, 'debug'),
1532 ]
1532 ]
1533
1533
1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1534 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1535 'docheader': '', 'docfooter': ''}
1535 'docheader': '', 'docfooter': ''}
1536 for mode, postfix in tmplmodes:
1536 for mode, postfix in tmplmodes:
1537 for t in self._parts:
1537 for t in self._parts:
1538 cur = t
1538 cur = t
1539 if postfix:
1539 if postfix:
1540 cur += "_" + postfix
1540 cur += "_" + postfix
1541 if mode and cur in self.t:
1541 if mode and cur in self.t:
1542 self._parts[t] = cur
1542 self._parts[t] = cur
1543
1543
1544 if self._parts['docheader']:
1544 if self._parts['docheader']:
1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1545 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1546
1546
1547 def close(self):
1547 def close(self):
1548 if self._parts['docfooter']:
1548 if self._parts['docfooter']:
1549 if not self.footer:
1549 if not self.footer:
1550 self.footer = ""
1550 self.footer = ""
1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1551 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1552 return super(changeset_templater, self).close()
1552 return super(changeset_templater, self).close()
1553
1553
1554 def _show(self, ctx, copies, matchfn, props):
1554 def _show(self, ctx, copies, matchfn, props):
1555 '''show a single changeset or file revision'''
1555 '''show a single changeset or file revision'''
1556 props = props.copy()
1556 props = props.copy()
1557 props.update(templatekw.keywords)
1557 props.update(templatekw.keywords)
1558 props['templ'] = self.t
1558 props['templ'] = self.t
1559 props['ctx'] = ctx
1559 props['ctx'] = ctx
1560 props['repo'] = self.repo
1560 props['repo'] = self.repo
1561 props['ui'] = self.repo.ui
1561 props['ui'] = self.repo.ui
1562 props['revcache'] = {'copies': copies}
1562 props['revcache'] = {'copies': copies}
1563 props['cache'] = self.cache
1563 props['cache'] = self.cache
1564
1564
1565 # write header
1565 # write header
1566 if self._parts['header']:
1566 if self._parts['header']:
1567 h = templater.stringify(self.t(self._parts['header'], **props))
1567 h = templater.stringify(self.t(self._parts['header'], **props))
1568 if self.buffered:
1568 if self.buffered:
1569 self.header[ctx.rev()] = h
1569 self.header[ctx.rev()] = h
1570 else:
1570 else:
1571 if self.lastheader != h:
1571 if self.lastheader != h:
1572 self.lastheader = h
1572 self.lastheader = h
1573 self.ui.write(h)
1573 self.ui.write(h)
1574
1574
1575 # write changeset metadata, then patch if requested
1575 # write changeset metadata, then patch if requested
1576 key = self._parts['changeset']
1576 key = self._parts['changeset']
1577 self.ui.write(templater.stringify(self.t(key, **props)))
1577 self.ui.write(templater.stringify(self.t(key, **props)))
1578 self.showpatch(ctx, matchfn)
1578 self.showpatch(ctx, matchfn)
1579
1579
1580 if self._parts['footer']:
1580 if self._parts['footer']:
1581 if not self.footer:
1581 if not self.footer:
1582 self.footer = templater.stringify(
1582 self.footer = templater.stringify(
1583 self.t(self._parts['footer'], **props))
1583 self.t(self._parts['footer'], **props))
1584
1584
1585 def gettemplate(ui, tmpl, style):
1585 def gettemplate(ui, tmpl, style):
1586 """
1586 """
1587 Find the template matching the given template spec or style.
1587 Find the template matching the given template spec or style.
1588 """
1588 """
1589
1589
1590 # ui settings
1590 # ui settings
1591 if not tmpl and not style: # template are stronger than style
1591 if not tmpl and not style: # template are stronger than style
1592 tmpl = ui.config('ui', 'logtemplate')
1592 tmpl = ui.config('ui', 'logtemplate')
1593 if tmpl:
1593 if tmpl:
1594 return templater.unquotestring(tmpl), None
1594 return templater.unquotestring(tmpl), None
1595 else:
1595 else:
1596 style = util.expandpath(ui.config('ui', 'style', ''))
1596 style = util.expandpath(ui.config('ui', 'style', ''))
1597
1597
1598 if not tmpl and style:
1598 if not tmpl and style:
1599 mapfile = style
1599 mapfile = style
1600 if not os.path.split(mapfile)[0]:
1600 if not os.path.split(mapfile)[0]:
1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1601 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1602 or templater.templatepath(mapfile))
1602 or templater.templatepath(mapfile))
1603 if mapname:
1603 if mapname:
1604 mapfile = mapname
1604 mapfile = mapname
1605 return None, mapfile
1605 return None, mapfile
1606
1606
1607 if not tmpl:
1607 if not tmpl:
1608 return None, None
1608 return None, None
1609
1609
1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1610 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1611
1611
1612 def show_changeset(ui, repo, opts, buffered=False):
1612 def show_changeset(ui, repo, opts, buffered=False):
1613 """show one changeset using template or regular display.
1613 """show one changeset using template or regular display.
1614
1614
1615 Display format will be the first non-empty hit of:
1615 Display format will be the first non-empty hit of:
1616 1. option 'template'
1616 1. option 'template'
1617 2. option 'style'
1617 2. option 'style'
1618 3. [ui] setting 'logtemplate'
1618 3. [ui] setting 'logtemplate'
1619 4. [ui] setting 'style'
1619 4. [ui] setting 'style'
1620 If all of these values are either the unset or the empty string,
1620 If all of these values are either the unset or the empty string,
1621 regular display via changeset_printer() is done.
1621 regular display via changeset_printer() is done.
1622 """
1622 """
1623 # options
1623 # options
1624 matchfn = None
1624 matchfn = None
1625 if opts.get('patch') or opts.get('stat'):
1625 if opts.get('patch') or opts.get('stat'):
1626 matchfn = scmutil.matchall(repo)
1626 matchfn = scmutil.matchall(repo)
1627
1627
1628 if opts.get('template') == 'json':
1628 if opts.get('template') == 'json':
1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1629 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1630
1630
1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1631 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1632
1632
1633 if not tmpl and not mapfile:
1633 if not tmpl and not mapfile:
1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1634 return changeset_printer(ui, repo, matchfn, opts, buffered)
1635
1635
1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1636 return changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile, buffered)
1637
1637
1638 def showmarker(fm, marker, index=None):
1638 def showmarker(fm, marker, index=None):
1639 """utility function to display obsolescence marker in a readable way
1639 """utility function to display obsolescence marker in a readable way
1640
1640
1641 To be used by debug function."""
1641 To be used by debug function."""
1642 if index is not None:
1642 if index is not None:
1643 fm.write('index', '%i ', index)
1643 fm.write('index', '%i ', index)
1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1644 fm.write('precnode', '%s ', hex(marker.precnode()))
1645 succs = marker.succnodes()
1645 succs = marker.succnodes()
1646 fm.condwrite(succs, 'succnodes', '%s ',
1646 fm.condwrite(succs, 'succnodes', '%s ',
1647 fm.formatlist(map(hex, succs), name='node'))
1647 fm.formatlist(map(hex, succs), name='node'))
1648 fm.write('flag', '%X ', marker.flags())
1648 fm.write('flag', '%X ', marker.flags())
1649 parents = marker.parentnodes()
1649 parents = marker.parentnodes()
1650 if parents is not None:
1650 if parents is not None:
1651 fm.write('parentnodes', '{%s} ',
1651 fm.write('parentnodes', '{%s} ',
1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1652 fm.formatlist(map(hex, parents), name='node', sep=', '))
1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1653 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1654 meta = marker.metadata().copy()
1654 meta = marker.metadata().copy()
1655 meta.pop('date', None)
1655 meta.pop('date', None)
1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1656 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
1657 fm.plain('\n')
1657 fm.plain('\n')
1658
1658
1659 def finddate(ui, repo, date):
1659 def finddate(ui, repo, date):
1660 """Find the tipmost changeset that matches the given date spec"""
1660 """Find the tipmost changeset that matches the given date spec"""
1661
1661
1662 df = util.matchdate(date)
1662 df = util.matchdate(date)
1663 m = scmutil.matchall(repo)
1663 m = scmutil.matchall(repo)
1664 results = {}
1664 results = {}
1665
1665
1666 def prep(ctx, fns):
1666 def prep(ctx, fns):
1667 d = ctx.date()
1667 d = ctx.date()
1668 if df(d[0]):
1668 if df(d[0]):
1669 results[ctx.rev()] = d
1669 results[ctx.rev()] = d
1670
1670
1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1671 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1672 rev = ctx.rev()
1672 rev = ctx.rev()
1673 if rev in results:
1673 if rev in results:
1674 ui.status(_("found revision %s from %s\n") %
1674 ui.status(_("found revision %s from %s\n") %
1675 (rev, util.datestr(results[rev])))
1675 (rev, util.datestr(results[rev])))
1676 return str(rev)
1676 return str(rev)
1677
1677
1678 raise error.Abort(_("revision matching date not found"))
1678 raise error.Abort(_("revision matching date not found"))
1679
1679
1680 def increasingwindows(windowsize=8, sizelimit=512):
1680 def increasingwindows(windowsize=8, sizelimit=512):
1681 while True:
1681 while True:
1682 yield windowsize
1682 yield windowsize
1683 if windowsize < sizelimit:
1683 if windowsize < sizelimit:
1684 windowsize *= 2
1684 windowsize *= 2
1685
1685
1686 class FileWalkError(Exception):
1686 class FileWalkError(Exception):
1687 pass
1687 pass
1688
1688
1689 def walkfilerevs(repo, match, follow, revs, fncache):
1689 def walkfilerevs(repo, match, follow, revs, fncache):
1690 '''Walks the file history for the matched files.
1690 '''Walks the file history for the matched files.
1691
1691
1692 Returns the changeset revs that are involved in the file history.
1692 Returns the changeset revs that are involved in the file history.
1693
1693
1694 Throws FileWalkError if the file history can't be walked using
1694 Throws FileWalkError if the file history can't be walked using
1695 filelogs alone.
1695 filelogs alone.
1696 '''
1696 '''
1697 wanted = set()
1697 wanted = set()
1698 copies = []
1698 copies = []
1699 minrev, maxrev = min(revs), max(revs)
1699 minrev, maxrev = min(revs), max(revs)
1700 def filerevgen(filelog, last):
1700 def filerevgen(filelog, last):
1701 """
1701 """
1702 Only files, no patterns. Check the history of each file.
1702 Only files, no patterns. Check the history of each file.
1703
1703
1704 Examines filelog entries within minrev, maxrev linkrev range
1704 Examines filelog entries within minrev, maxrev linkrev range
1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1705 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1706 tuples in backwards order
1706 tuples in backwards order
1707 """
1707 """
1708 cl_count = len(repo)
1708 cl_count = len(repo)
1709 revs = []
1709 revs = []
1710 for j in xrange(0, last + 1):
1710 for j in xrange(0, last + 1):
1711 linkrev = filelog.linkrev(j)
1711 linkrev = filelog.linkrev(j)
1712 if linkrev < minrev:
1712 if linkrev < minrev:
1713 continue
1713 continue
1714 # only yield rev for which we have the changelog, it can
1714 # only yield rev for which we have the changelog, it can
1715 # happen while doing "hg log" during a pull or commit
1715 # happen while doing "hg log" during a pull or commit
1716 if linkrev >= cl_count:
1716 if linkrev >= cl_count:
1717 break
1717 break
1718
1718
1719 parentlinkrevs = []
1719 parentlinkrevs = []
1720 for p in filelog.parentrevs(j):
1720 for p in filelog.parentrevs(j):
1721 if p != nullrev:
1721 if p != nullrev:
1722 parentlinkrevs.append(filelog.linkrev(p))
1722 parentlinkrevs.append(filelog.linkrev(p))
1723 n = filelog.node(j)
1723 n = filelog.node(j)
1724 revs.append((linkrev, parentlinkrevs,
1724 revs.append((linkrev, parentlinkrevs,
1725 follow and filelog.renamed(n)))
1725 follow and filelog.renamed(n)))
1726
1726
1727 return reversed(revs)
1727 return reversed(revs)
1728 def iterfiles():
1728 def iterfiles():
1729 pctx = repo['.']
1729 pctx = repo['.']
1730 for filename in match.files():
1730 for filename in match.files():
1731 if follow:
1731 if follow:
1732 if filename not in pctx:
1732 if filename not in pctx:
1733 raise error.Abort(_('cannot follow file not in parent '
1733 raise error.Abort(_('cannot follow file not in parent '
1734 'revision: "%s"') % filename)
1734 'revision: "%s"') % filename)
1735 yield filename, pctx[filename].filenode()
1735 yield filename, pctx[filename].filenode()
1736 else:
1736 else:
1737 yield filename, None
1737 yield filename, None
1738 for filename_node in copies:
1738 for filename_node in copies:
1739 yield filename_node
1739 yield filename_node
1740
1740
1741 for file_, node in iterfiles():
1741 for file_, node in iterfiles():
1742 filelog = repo.file(file_)
1742 filelog = repo.file(file_)
1743 if not len(filelog):
1743 if not len(filelog):
1744 if node is None:
1744 if node is None:
1745 # A zero count may be a directory or deleted file, so
1745 # A zero count may be a directory or deleted file, so
1746 # try to find matching entries on the slow path.
1746 # try to find matching entries on the slow path.
1747 if follow:
1747 if follow:
1748 raise error.Abort(
1748 raise error.Abort(
1749 _('cannot follow nonexistent file: "%s"') % file_)
1749 _('cannot follow nonexistent file: "%s"') % file_)
1750 raise FileWalkError("Cannot walk via filelog")
1750 raise FileWalkError("Cannot walk via filelog")
1751 else:
1751 else:
1752 continue
1752 continue
1753
1753
1754 if node is None:
1754 if node is None:
1755 last = len(filelog) - 1
1755 last = len(filelog) - 1
1756 else:
1756 else:
1757 last = filelog.rev(node)
1757 last = filelog.rev(node)
1758
1758
1759 # keep track of all ancestors of the file
1759 # keep track of all ancestors of the file
1760 ancestors = set([filelog.linkrev(last)])
1760 ancestors = set([filelog.linkrev(last)])
1761
1761
1762 # iterate from latest to oldest revision
1762 # iterate from latest to oldest revision
1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1763 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1764 if not follow:
1764 if not follow:
1765 if rev > maxrev:
1765 if rev > maxrev:
1766 continue
1766 continue
1767 else:
1767 else:
1768 # Note that last might not be the first interesting
1768 # Note that last might not be the first interesting
1769 # rev to us:
1769 # rev to us:
1770 # if the file has been changed after maxrev, we'll
1770 # if the file has been changed after maxrev, we'll
1771 # have linkrev(last) > maxrev, and we still need
1771 # have linkrev(last) > maxrev, and we still need
1772 # to explore the file graph
1772 # to explore the file graph
1773 if rev not in ancestors:
1773 if rev not in ancestors:
1774 continue
1774 continue
1775 # XXX insert 1327 fix here
1775 # XXX insert 1327 fix here
1776 if flparentlinkrevs:
1776 if flparentlinkrevs:
1777 ancestors.update(flparentlinkrevs)
1777 ancestors.update(flparentlinkrevs)
1778
1778
1779 fncache.setdefault(rev, []).append(file_)
1779 fncache.setdefault(rev, []).append(file_)
1780 wanted.add(rev)
1780 wanted.add(rev)
1781 if copied:
1781 if copied:
1782 copies.append(copied)
1782 copies.append(copied)
1783
1783
1784 return wanted
1784 return wanted
1785
1785
1786 class _followfilter(object):
1786 class _followfilter(object):
1787 def __init__(self, repo, onlyfirst=False):
1787 def __init__(self, repo, onlyfirst=False):
1788 self.repo = repo
1788 self.repo = repo
1789 self.startrev = nullrev
1789 self.startrev = nullrev
1790 self.roots = set()
1790 self.roots = set()
1791 self.onlyfirst = onlyfirst
1791 self.onlyfirst = onlyfirst
1792
1792
1793 def match(self, rev):
1793 def match(self, rev):
1794 def realparents(rev):
1794 def realparents(rev):
1795 if self.onlyfirst:
1795 if self.onlyfirst:
1796 return self.repo.changelog.parentrevs(rev)[0:1]
1796 return self.repo.changelog.parentrevs(rev)[0:1]
1797 else:
1797 else:
1798 return filter(lambda x: x != nullrev,
1798 return filter(lambda x: x != nullrev,
1799 self.repo.changelog.parentrevs(rev))
1799 self.repo.changelog.parentrevs(rev))
1800
1800
1801 if self.startrev == nullrev:
1801 if self.startrev == nullrev:
1802 self.startrev = rev
1802 self.startrev = rev
1803 return True
1803 return True
1804
1804
1805 if rev > self.startrev:
1805 if rev > self.startrev:
1806 # forward: all descendants
1806 # forward: all descendants
1807 if not self.roots:
1807 if not self.roots:
1808 self.roots.add(self.startrev)
1808 self.roots.add(self.startrev)
1809 for parent in realparents(rev):
1809 for parent in realparents(rev):
1810 if parent in self.roots:
1810 if parent in self.roots:
1811 self.roots.add(rev)
1811 self.roots.add(rev)
1812 return True
1812 return True
1813 else:
1813 else:
1814 # backwards: all parents
1814 # backwards: all parents
1815 if not self.roots:
1815 if not self.roots:
1816 self.roots.update(realparents(self.startrev))
1816 self.roots.update(realparents(self.startrev))
1817 if rev in self.roots:
1817 if rev in self.roots:
1818 self.roots.remove(rev)
1818 self.roots.remove(rev)
1819 self.roots.update(realparents(rev))
1819 self.roots.update(realparents(rev))
1820 return True
1820 return True
1821
1821
1822 return False
1822 return False
1823
1823
1824 def walkchangerevs(repo, match, opts, prepare):
1824 def walkchangerevs(repo, match, opts, prepare):
1825 '''Iterate over files and the revs in which they changed.
1825 '''Iterate over files and the revs in which they changed.
1826
1826
1827 Callers most commonly need to iterate backwards over the history
1827 Callers most commonly need to iterate backwards over the history
1828 in which they are interested. Doing so has awful (quadratic-looking)
1828 in which they are interested. Doing so has awful (quadratic-looking)
1829 performance, so we use iterators in a "windowed" way.
1829 performance, so we use iterators in a "windowed" way.
1830
1830
1831 We walk a window of revisions in the desired order. Within the
1831 We walk a window of revisions in the desired order. Within the
1832 window, we first walk forwards to gather data, then in the desired
1832 window, we first walk forwards to gather data, then in the desired
1833 order (usually backwards) to display it.
1833 order (usually backwards) to display it.
1834
1834
1835 This function returns an iterator yielding contexts. Before
1835 This function returns an iterator yielding contexts. Before
1836 yielding each context, the iterator will first call the prepare
1836 yielding each context, the iterator will first call the prepare
1837 function on each context in the window in forward order.'''
1837 function on each context in the window in forward order.'''
1838
1838
1839 follow = opts.get('follow') or opts.get('follow_first')
1839 follow = opts.get('follow') or opts.get('follow_first')
1840 revs = _logrevs(repo, opts)
1840 revs = _logrevs(repo, opts)
1841 if not revs:
1841 if not revs:
1842 return []
1842 return []
1843 wanted = set()
1843 wanted = set()
1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1844 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1845 opts.get('removed'))
1845 opts.get('removed'))
1846 fncache = {}
1846 fncache = {}
1847 change = repo.changectx
1847 change = repo.changectx
1848
1848
1849 # First step is to fill wanted, the set of revisions that we want to yield.
1849 # First step is to fill wanted, the set of revisions that we want to yield.
1850 # When it does not induce extra cost, we also fill fncache for revisions in
1850 # When it does not induce extra cost, we also fill fncache for revisions in
1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1851 # wanted: a cache of filenames that were changed (ctx.files()) and that
1852 # match the file filtering conditions.
1852 # match the file filtering conditions.
1853
1853
1854 if match.always():
1854 if match.always():
1855 # No files, no patterns. Display all revs.
1855 # No files, no patterns. Display all revs.
1856 wanted = revs
1856 wanted = revs
1857 elif not slowpath:
1857 elif not slowpath:
1858 # We only have to read through the filelog to find wanted revisions
1858 # We only have to read through the filelog to find wanted revisions
1859
1859
1860 try:
1860 try:
1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1861 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1862 except FileWalkError:
1862 except FileWalkError:
1863 slowpath = True
1863 slowpath = True
1864
1864
1865 # We decided to fall back to the slowpath because at least one
1865 # We decided to fall back to the slowpath because at least one
1866 # of the paths was not a file. Check to see if at least one of them
1866 # of the paths was not a file. Check to see if at least one of them
1867 # existed in history, otherwise simply return
1867 # existed in history, otherwise simply return
1868 for path in match.files():
1868 for path in match.files():
1869 if path == '.' or path in repo.store:
1869 if path == '.' or path in repo.store:
1870 break
1870 break
1871 else:
1871 else:
1872 return []
1872 return []
1873
1873
1874 if slowpath:
1874 if slowpath:
1875 # We have to read the changelog to match filenames against
1875 # We have to read the changelog to match filenames against
1876 # changed files
1876 # changed files
1877
1877
1878 if follow:
1878 if follow:
1879 raise error.Abort(_('can only follow copies/renames for explicit '
1879 raise error.Abort(_('can only follow copies/renames for explicit '
1880 'filenames'))
1880 'filenames'))
1881
1881
1882 # The slow path checks files modified in every changeset.
1882 # The slow path checks files modified in every changeset.
1883 # This is really slow on large repos, so compute the set lazily.
1883 # This is really slow on large repos, so compute the set lazily.
1884 class lazywantedset(object):
1884 class lazywantedset(object):
1885 def __init__(self):
1885 def __init__(self):
1886 self.set = set()
1886 self.set = set()
1887 self.revs = set(revs)
1887 self.revs = set(revs)
1888
1888
1889 # No need to worry about locality here because it will be accessed
1889 # No need to worry about locality here because it will be accessed
1890 # in the same order as the increasing window below.
1890 # in the same order as the increasing window below.
1891 def __contains__(self, value):
1891 def __contains__(self, value):
1892 if value in self.set:
1892 if value in self.set:
1893 return True
1893 return True
1894 elif not value in self.revs:
1894 elif not value in self.revs:
1895 return False
1895 return False
1896 else:
1896 else:
1897 self.revs.discard(value)
1897 self.revs.discard(value)
1898 ctx = change(value)
1898 ctx = change(value)
1899 matches = filter(match, ctx.files())
1899 matches = filter(match, ctx.files())
1900 if matches:
1900 if matches:
1901 fncache[value] = matches
1901 fncache[value] = matches
1902 self.set.add(value)
1902 self.set.add(value)
1903 return True
1903 return True
1904 return False
1904 return False
1905
1905
1906 def discard(self, value):
1906 def discard(self, value):
1907 self.revs.discard(value)
1907 self.revs.discard(value)
1908 self.set.discard(value)
1908 self.set.discard(value)
1909
1909
1910 wanted = lazywantedset()
1910 wanted = lazywantedset()
1911
1911
1912 # it might be worthwhile to do this in the iterator if the rev range
1912 # it might be worthwhile to do this in the iterator if the rev range
1913 # is descending and the prune args are all within that range
1913 # is descending and the prune args are all within that range
1914 for rev in opts.get('prune', ()):
1914 for rev in opts.get('prune', ()):
1915 rev = repo[rev].rev()
1915 rev = repo[rev].rev()
1916 ff = _followfilter(repo)
1916 ff = _followfilter(repo)
1917 stop = min(revs[0], revs[-1])
1917 stop = min(revs[0], revs[-1])
1918 for x in xrange(rev, stop - 1, -1):
1918 for x in xrange(rev, stop - 1, -1):
1919 if ff.match(x):
1919 if ff.match(x):
1920 wanted = wanted - [x]
1920 wanted = wanted - [x]
1921
1921
1922 # Now that wanted is correctly initialized, we can iterate over the
1922 # Now that wanted is correctly initialized, we can iterate over the
1923 # revision range, yielding only revisions in wanted.
1923 # revision range, yielding only revisions in wanted.
1924 def iterate():
1924 def iterate():
1925 if follow and match.always():
1925 if follow and match.always():
1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1926 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1927 def want(rev):
1927 def want(rev):
1928 return ff.match(rev) and rev in wanted
1928 return ff.match(rev) and rev in wanted
1929 else:
1929 else:
1930 def want(rev):
1930 def want(rev):
1931 return rev in wanted
1931 return rev in wanted
1932
1932
1933 it = iter(revs)
1933 it = iter(revs)
1934 stopiteration = False
1934 stopiteration = False
1935 for windowsize in increasingwindows():
1935 for windowsize in increasingwindows():
1936 nrevs = []
1936 nrevs = []
1937 for i in xrange(windowsize):
1937 for i in xrange(windowsize):
1938 rev = next(it, None)
1938 rev = next(it, None)
1939 if rev is None:
1939 if rev is None:
1940 stopiteration = True
1940 stopiteration = True
1941 break
1941 break
1942 elif want(rev):
1942 elif want(rev):
1943 nrevs.append(rev)
1943 nrevs.append(rev)
1944 for rev in sorted(nrevs):
1944 for rev in sorted(nrevs):
1945 fns = fncache.get(rev)
1945 fns = fncache.get(rev)
1946 ctx = change(rev)
1946 ctx = change(rev)
1947 if not fns:
1947 if not fns:
1948 def fns_generator():
1948 def fns_generator():
1949 for f in ctx.files():
1949 for f in ctx.files():
1950 if match(f):
1950 if match(f):
1951 yield f
1951 yield f
1952 fns = fns_generator()
1952 fns = fns_generator()
1953 prepare(ctx, fns)
1953 prepare(ctx, fns)
1954 for rev in nrevs:
1954 for rev in nrevs:
1955 yield change(rev)
1955 yield change(rev)
1956
1956
1957 if stopiteration:
1957 if stopiteration:
1958 break
1958 break
1959
1959
1960 return iterate()
1960 return iterate()
1961
1961
1962 def _makefollowlogfilematcher(repo, files, followfirst):
1962 def _makefollowlogfilematcher(repo, files, followfirst):
1963 # When displaying a revision with --patch --follow FILE, we have
1963 # When displaying a revision with --patch --follow FILE, we have
1964 # to know which file of the revision must be diffed. With
1964 # to know which file of the revision must be diffed. With
1965 # --follow, we want the names of the ancestors of FILE in the
1965 # --follow, we want the names of the ancestors of FILE in the
1966 # revision, stored in "fcache". "fcache" is populated by
1966 # revision, stored in "fcache". "fcache" is populated by
1967 # reproducing the graph traversal already done by --follow revset
1967 # reproducing the graph traversal already done by --follow revset
1968 # and relating revs to file names (which is not "correct" but
1968 # and relating revs to file names (which is not "correct" but
1969 # good enough).
1969 # good enough).
1970 fcache = {}
1970 fcache = {}
1971 fcacheready = [False]
1971 fcacheready = [False]
1972 pctx = repo['.']
1972 pctx = repo['.']
1973
1973
1974 def populate():
1974 def populate():
1975 for fn in files:
1975 for fn in files:
1976 fctx = pctx[fn]
1976 fctx = pctx[fn]
1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1977 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
1978 for c in fctx.ancestors(followfirst=followfirst):
1978 for c in fctx.ancestors(followfirst=followfirst):
1979 fcache.setdefault(c.rev(), set()).add(c.path())
1979 fcache.setdefault(c.rev(), set()).add(c.path())
1980
1980
1981 def filematcher(rev):
1981 def filematcher(rev):
1982 if not fcacheready[0]:
1982 if not fcacheready[0]:
1983 # Lazy initialization
1983 # Lazy initialization
1984 fcacheready[0] = True
1984 fcacheready[0] = True
1985 populate()
1985 populate()
1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1986 return scmutil.matchfiles(repo, fcache.get(rev, []))
1987
1987
1988 return filematcher
1988 return filematcher
1989
1989
1990 def _makenofollowlogfilematcher(repo, pats, opts):
1990 def _makenofollowlogfilematcher(repo, pats, opts):
1991 '''hook for extensions to override the filematcher for non-follow cases'''
1991 '''hook for extensions to override the filematcher for non-follow cases'''
1992 return None
1992 return None
1993
1993
1994 def _makelogrevset(repo, pats, opts, revs):
1994 def _makelogrevset(repo, pats, opts, revs):
1995 """Return (expr, filematcher) where expr is a revset string built
1995 """Return (expr, filematcher) where expr is a revset string built
1996 from log options and file patterns or None. If --stat or --patch
1996 from log options and file patterns or None. If --stat or --patch
1997 are not passed filematcher is None. Otherwise it is a callable
1997 are not passed filematcher is None. Otherwise it is a callable
1998 taking a revision number and returning a match objects filtering
1998 taking a revision number and returning a match objects filtering
1999 the files to be detailed when displaying the revision.
1999 the files to be detailed when displaying the revision.
2000 """
2000 """
2001 opt2revset = {
2001 opt2revset = {
2002 'no_merges': ('not merge()', None),
2002 'no_merges': ('not merge()', None),
2003 'only_merges': ('merge()', None),
2003 'only_merges': ('merge()', None),
2004 '_ancestors': ('ancestors(%(val)s)', None),
2004 '_ancestors': ('ancestors(%(val)s)', None),
2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2005 '_fancestors': ('_firstancestors(%(val)s)', None),
2006 '_descendants': ('descendants(%(val)s)', None),
2006 '_descendants': ('descendants(%(val)s)', None),
2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2007 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2008 '_matchfiles': ('_matchfiles(%(val)s)', None),
2009 'date': ('date(%(val)r)', None),
2009 'date': ('date(%(val)r)', None),
2010 'branch': ('branch(%(val)r)', ' or '),
2010 'branch': ('branch(%(val)r)', ' or '),
2011 '_patslog': ('filelog(%(val)r)', ' or '),
2011 '_patslog': ('filelog(%(val)r)', ' or '),
2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2012 '_patsfollow': ('follow(%(val)r)', ' or '),
2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2013 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2014 'keyword': ('keyword(%(val)r)', ' or '),
2014 'keyword': ('keyword(%(val)r)', ' or '),
2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2015 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2016 'user': ('user(%(val)r)', ' or '),
2016 'user': ('user(%(val)r)', ' or '),
2017 }
2017 }
2018
2018
2019 opts = dict(opts)
2019 opts = dict(opts)
2020 # follow or not follow?
2020 # follow or not follow?
2021 follow = opts.get('follow') or opts.get('follow_first')
2021 follow = opts.get('follow') or opts.get('follow_first')
2022 if opts.get('follow_first'):
2022 if opts.get('follow_first'):
2023 followfirst = 1
2023 followfirst = 1
2024 else:
2024 else:
2025 followfirst = 0
2025 followfirst = 0
2026 # --follow with FILE behavior depends on revs...
2026 # --follow with FILE behavior depends on revs...
2027 it = iter(revs)
2027 it = iter(revs)
2028 startrev = next(it)
2028 startrev = next(it)
2029 followdescendants = startrev < next(it, startrev)
2029 followdescendants = startrev < next(it, startrev)
2030
2030
2031 # branch and only_branch are really aliases and must be handled at
2031 # branch and only_branch are really aliases and must be handled at
2032 # the same time
2032 # the same time
2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2033 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2034 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2035 # pats/include/exclude are passed to match.match() directly in
2035 # pats/include/exclude are passed to match.match() directly in
2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2036 # _matchfiles() revset but walkchangerevs() builds its matcher with
2037 # scmutil.match(). The difference is input pats are globbed on
2037 # scmutil.match(). The difference is input pats are globbed on
2038 # platforms without shell expansion (windows).
2038 # platforms without shell expansion (windows).
2039 wctx = repo[None]
2039 wctx = repo[None]
2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2040 match, pats = scmutil.matchandpats(wctx, pats, opts)
2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2041 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2042 opts.get('removed'))
2042 opts.get('removed'))
2043 if not slowpath:
2043 if not slowpath:
2044 for f in match.files():
2044 for f in match.files():
2045 if follow and f not in wctx:
2045 if follow and f not in wctx:
2046 # If the file exists, it may be a directory, so let it
2046 # If the file exists, it may be a directory, so let it
2047 # take the slow path.
2047 # take the slow path.
2048 if os.path.exists(repo.wjoin(f)):
2048 if os.path.exists(repo.wjoin(f)):
2049 slowpath = True
2049 slowpath = True
2050 continue
2050 continue
2051 else:
2051 else:
2052 raise error.Abort(_('cannot follow file not in parent '
2052 raise error.Abort(_('cannot follow file not in parent '
2053 'revision: "%s"') % f)
2053 'revision: "%s"') % f)
2054 filelog = repo.file(f)
2054 filelog = repo.file(f)
2055 if not filelog:
2055 if not filelog:
2056 # A zero count may be a directory or deleted file, so
2056 # A zero count may be a directory or deleted file, so
2057 # try to find matching entries on the slow path.
2057 # try to find matching entries on the slow path.
2058 if follow:
2058 if follow:
2059 raise error.Abort(
2059 raise error.Abort(
2060 _('cannot follow nonexistent file: "%s"') % f)
2060 _('cannot follow nonexistent file: "%s"') % f)
2061 slowpath = True
2061 slowpath = True
2062
2062
2063 # We decided to fall back to the slowpath because at least one
2063 # We decided to fall back to the slowpath because at least one
2064 # of the paths was not a file. Check to see if at least one of them
2064 # of the paths was not a file. Check to see if at least one of them
2065 # existed in history - in that case, we'll continue down the
2065 # existed in history - in that case, we'll continue down the
2066 # slowpath; otherwise, we can turn off the slowpath
2066 # slowpath; otherwise, we can turn off the slowpath
2067 if slowpath:
2067 if slowpath:
2068 for path in match.files():
2068 for path in match.files():
2069 if path == '.' or path in repo.store:
2069 if path == '.' or path in repo.store:
2070 break
2070 break
2071 else:
2071 else:
2072 slowpath = False
2072 slowpath = False
2073
2073
2074 fpats = ('_patsfollow', '_patsfollowfirst')
2074 fpats = ('_patsfollow', '_patsfollowfirst')
2075 fnopats = (('_ancestors', '_fancestors'),
2075 fnopats = (('_ancestors', '_fancestors'),
2076 ('_descendants', '_fdescendants'))
2076 ('_descendants', '_fdescendants'))
2077 if slowpath:
2077 if slowpath:
2078 # See walkchangerevs() slow path.
2078 # See walkchangerevs() slow path.
2079 #
2079 #
2080 # pats/include/exclude cannot be represented as separate
2080 # pats/include/exclude cannot be represented as separate
2081 # revset expressions as their filtering logic applies at file
2081 # revset expressions as their filtering logic applies at file
2082 # level. For instance "-I a -X a" matches a revision touching
2082 # level. For instance "-I a -X a" matches a revision touching
2083 # "a" and "b" while "file(a) and not file(b)" does
2083 # "a" and "b" while "file(a) and not file(b)" does
2084 # not. Besides, filesets are evaluated against the working
2084 # not. Besides, filesets are evaluated against the working
2085 # directory.
2085 # directory.
2086 matchargs = ['r:', 'd:relpath']
2086 matchargs = ['r:', 'd:relpath']
2087 for p in pats:
2087 for p in pats:
2088 matchargs.append('p:' + p)
2088 matchargs.append('p:' + p)
2089 for p in opts.get('include', []):
2089 for p in opts.get('include', []):
2090 matchargs.append('i:' + p)
2090 matchargs.append('i:' + p)
2091 for p in opts.get('exclude', []):
2091 for p in opts.get('exclude', []):
2092 matchargs.append('x:' + p)
2092 matchargs.append('x:' + p)
2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2093 matchargs = ','.join(('%r' % p) for p in matchargs)
2094 opts['_matchfiles'] = matchargs
2094 opts['_matchfiles'] = matchargs
2095 if follow:
2095 if follow:
2096 opts[fnopats[0][followfirst]] = '.'
2096 opts[fnopats[0][followfirst]] = '.'
2097 else:
2097 else:
2098 if follow:
2098 if follow:
2099 if pats:
2099 if pats:
2100 # follow() revset interprets its file argument as a
2100 # follow() revset interprets its file argument as a
2101 # manifest entry, so use match.files(), not pats.
2101 # manifest entry, so use match.files(), not pats.
2102 opts[fpats[followfirst]] = list(match.files())
2102 opts[fpats[followfirst]] = list(match.files())
2103 else:
2103 else:
2104 op = fnopats[followdescendants][followfirst]
2104 op = fnopats[followdescendants][followfirst]
2105 opts[op] = 'rev(%d)' % startrev
2105 opts[op] = 'rev(%d)' % startrev
2106 else:
2106 else:
2107 opts['_patslog'] = list(pats)
2107 opts['_patslog'] = list(pats)
2108
2108
2109 filematcher = None
2109 filematcher = None
2110 if opts.get('patch') or opts.get('stat'):
2110 if opts.get('patch') or opts.get('stat'):
2111 # When following files, track renames via a special matcher.
2111 # When following files, track renames via a special matcher.
2112 # If we're forced to take the slowpath it means we're following
2112 # If we're forced to take the slowpath it means we're following
2113 # at least one pattern/directory, so don't bother with rename tracking.
2113 # at least one pattern/directory, so don't bother with rename tracking.
2114 if follow and not match.always() and not slowpath:
2114 if follow and not match.always() and not slowpath:
2115 # _makefollowlogfilematcher expects its files argument to be
2115 # _makefollowlogfilematcher expects its files argument to be
2116 # relative to the repo root, so use match.files(), not pats.
2116 # relative to the repo root, so use match.files(), not pats.
2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2117 filematcher = _makefollowlogfilematcher(repo, match.files(),
2118 followfirst)
2118 followfirst)
2119 else:
2119 else:
2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2120 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2121 if filematcher is None:
2121 if filematcher is None:
2122 filematcher = lambda rev: match
2122 filematcher = lambda rev: match
2123
2123
2124 expr = []
2124 expr = []
2125 for op, val in sorted(opts.iteritems()):
2125 for op, val in sorted(opts.iteritems()):
2126 if not val:
2126 if not val:
2127 continue
2127 continue
2128 if op not in opt2revset:
2128 if op not in opt2revset:
2129 continue
2129 continue
2130 revop, andor = opt2revset[op]
2130 revop, andor = opt2revset[op]
2131 if '%(val)' not in revop:
2131 if '%(val)' not in revop:
2132 expr.append(revop)
2132 expr.append(revop)
2133 else:
2133 else:
2134 if not isinstance(val, list):
2134 if not isinstance(val, list):
2135 e = revop % {'val': val}
2135 e = revop % {'val': val}
2136 else:
2136 else:
2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2137 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2138 expr.append(e)
2138 expr.append(e)
2139
2139
2140 if expr:
2140 if expr:
2141 expr = '(' + ' and '.join(expr) + ')'
2141 expr = '(' + ' and '.join(expr) + ')'
2142 else:
2142 else:
2143 expr = None
2143 expr = None
2144 return expr, filematcher
2144 return expr, filematcher
2145
2145
2146 def _logrevs(repo, opts):
2146 def _logrevs(repo, opts):
2147 # Default --rev value depends on --follow but --follow behavior
2147 # Default --rev value depends on --follow but --follow behavior
2148 # depends on revisions resolved from --rev...
2148 # depends on revisions resolved from --rev...
2149 follow = opts.get('follow') or opts.get('follow_first')
2149 follow = opts.get('follow') or opts.get('follow_first')
2150 if opts.get('rev'):
2150 if opts.get('rev'):
2151 revs = scmutil.revrange(repo, opts['rev'])
2151 revs = scmutil.revrange(repo, opts['rev'])
2152 elif follow and repo.dirstate.p1() == nullid:
2152 elif follow and repo.dirstate.p1() == nullid:
2153 revs = revset.baseset()
2153 revs = revset.baseset()
2154 elif follow:
2154 elif follow:
2155 revs = repo.revs('reverse(:.)')
2155 revs = repo.revs('reverse(:.)')
2156 else:
2156 else:
2157 revs = revset.spanset(repo)
2157 revs = revset.spanset(repo)
2158 revs.reverse()
2158 revs.reverse()
2159 return revs
2159 return revs
2160
2160
2161 def getgraphlogrevs(repo, pats, opts):
2161 def getgraphlogrevs(repo, pats, opts):
2162 """Return (revs, expr, filematcher) where revs is an iterable of
2162 """Return (revs, expr, filematcher) where revs is an iterable of
2163 revision numbers, expr is a revset string built from log options
2163 revision numbers, expr is a revset string built from log options
2164 and file patterns or None, and used to filter 'revs'. If --stat or
2164 and file patterns or None, and used to filter 'revs'. If --stat or
2165 --patch are not passed filematcher is None. Otherwise it is a
2165 --patch are not passed filematcher is None. Otherwise it is a
2166 callable taking a revision number and returning a match objects
2166 callable taking a revision number and returning a match objects
2167 filtering the files to be detailed when displaying the revision.
2167 filtering the files to be detailed when displaying the revision.
2168 """
2168 """
2169 limit = loglimit(opts)
2169 limit = loglimit(opts)
2170 revs = _logrevs(repo, opts)
2170 revs = _logrevs(repo, opts)
2171 if not revs:
2171 if not revs:
2172 return revset.baseset(), None, None
2172 return revset.baseset(), None, None
2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2173 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2174 if opts.get('rev'):
2174 if opts.get('rev'):
2175 # User-specified revs might be unsorted, but don't sort before
2175 # User-specified revs might be unsorted, but don't sort before
2176 # _makelogrevset because it might depend on the order of revs
2176 # _makelogrevset because it might depend on the order of revs
2177 if not (revs.isdescending() or revs.istopo()):
2177 if not (revs.isdescending() or revs.istopo()):
2178 revs.sort(reverse=True)
2178 revs.sort(reverse=True)
2179 if expr:
2179 if expr:
2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2180 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2181 revs = matcher(repo, revs)
2181 revs = matcher(repo, revs)
2182 if limit is not None:
2182 if limit is not None:
2183 limitedrevs = []
2183 limitedrevs = []
2184 for idx, rev in enumerate(revs):
2184 for idx, rev in enumerate(revs):
2185 if idx >= limit:
2185 if idx >= limit:
2186 break
2186 break
2187 limitedrevs.append(rev)
2187 limitedrevs.append(rev)
2188 revs = revset.baseset(limitedrevs)
2188 revs = revset.baseset(limitedrevs)
2189
2189
2190 return revs, expr, filematcher
2190 return revs, expr, filematcher
2191
2191
2192 def getlogrevs(repo, pats, opts):
2192 def getlogrevs(repo, pats, opts):
2193 """Return (revs, expr, filematcher) where revs is an iterable of
2193 """Return (revs, expr, filematcher) where revs is an iterable of
2194 revision numbers, expr is a revset string built from log options
2194 revision numbers, expr is a revset string built from log options
2195 and file patterns or None, and used to filter 'revs'. If --stat or
2195 and file patterns or None, and used to filter 'revs'. If --stat or
2196 --patch are not passed filematcher is None. Otherwise it is a
2196 --patch are not passed filematcher is None. Otherwise it is a
2197 callable taking a revision number and returning a match objects
2197 callable taking a revision number and returning a match objects
2198 filtering the files to be detailed when displaying the revision.
2198 filtering the files to be detailed when displaying the revision.
2199 """
2199 """
2200 limit = loglimit(opts)
2200 limit = loglimit(opts)
2201 revs = _logrevs(repo, opts)
2201 revs = _logrevs(repo, opts)
2202 if not revs:
2202 if not revs:
2203 return revset.baseset([]), None, None
2203 return revset.baseset([]), None, None
2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2204 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2205 if expr:
2205 if expr:
2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2206 matcher = revset.match(repo.ui, expr, order=revset.followorder)
2207 revs = matcher(repo, revs)
2207 revs = matcher(repo, revs)
2208 if limit is not None:
2208 if limit is not None:
2209 limitedrevs = []
2209 limitedrevs = []
2210 for idx, r in enumerate(revs):
2210 for idx, r in enumerate(revs):
2211 if limit <= idx:
2211 if limit <= idx:
2212 break
2212 break
2213 limitedrevs.append(r)
2213 limitedrevs.append(r)
2214 revs = revset.baseset(limitedrevs)
2214 revs = revset.baseset(limitedrevs)
2215
2215
2216 return revs, expr, filematcher
2216 return revs, expr, filematcher
2217
2217
2218 def _graphnodeformatter(ui, displayer):
2218 def _graphnodeformatter(ui, displayer):
2219 spec = ui.config('ui', 'graphnodetemplate')
2219 spec = ui.config('ui', 'graphnodetemplate')
2220 if not spec:
2220 if not spec:
2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2221 return templatekw.showgraphnode # fast path for "{graphnode}"
2222
2222
2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2223 templ = formatter.gettemplater(ui, 'graphnode', spec)
2224 cache = {}
2224 cache = {}
2225 if isinstance(displayer, changeset_templater):
2225 if isinstance(displayer, changeset_templater):
2226 cache = displayer.cache # reuse cache of slow templates
2226 cache = displayer.cache # reuse cache of slow templates
2227 props = templatekw.keywords.copy()
2227 props = templatekw.keywords.copy()
2228 props['templ'] = templ
2228 props['templ'] = templ
2229 props['cache'] = cache
2229 props['cache'] = cache
2230 def formatnode(repo, ctx):
2230 def formatnode(repo, ctx):
2231 props['ctx'] = ctx
2231 props['ctx'] = ctx
2232 props['repo'] = repo
2232 props['repo'] = repo
2233 props['ui'] = repo.ui
2233 props['ui'] = repo.ui
2234 props['revcache'] = {}
2234 props['revcache'] = {}
2235 return templater.stringify(templ('graphnode', **props))
2235 return templater.stringify(templ('graphnode', **props))
2236 return formatnode
2236 return formatnode
2237
2237
2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2238 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2239 filematcher=None):
2239 filematcher=None):
2240 formatnode = _graphnodeformatter(ui, displayer)
2240 formatnode = _graphnodeformatter(ui, displayer)
2241 state = graphmod.asciistate()
2241 state = graphmod.asciistate()
2242 styles = state['styles']
2242 styles = state['styles']
2243
2243
2244 # only set graph styling if HGPLAIN is not set.
2244 # only set graph styling if HGPLAIN is not set.
2245 if ui.plain('graph'):
2245 if ui.plain('graph'):
2246 # set all edge styles to |, the default pre-3.8 behaviour
2246 # set all edge styles to |, the default pre-3.8 behaviour
2247 styles.update(dict.fromkeys(styles, '|'))
2247 styles.update(dict.fromkeys(styles, '|'))
2248 else:
2248 else:
2249 edgetypes = {
2249 edgetypes = {
2250 'parent': graphmod.PARENT,
2250 'parent': graphmod.PARENT,
2251 'grandparent': graphmod.GRANDPARENT,
2251 'grandparent': graphmod.GRANDPARENT,
2252 'missing': graphmod.MISSINGPARENT
2252 'missing': graphmod.MISSINGPARENT
2253 }
2253 }
2254 for name, key in edgetypes.items():
2254 for name, key in edgetypes.items():
2255 # experimental config: experimental.graphstyle.*
2255 # experimental config: experimental.graphstyle.*
2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2256 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2257 styles[key])
2257 styles[key])
2258 if not styles[key]:
2258 if not styles[key]:
2259 styles[key] = None
2259 styles[key] = None
2260
2260
2261 # experimental config: experimental.graphshorten
2261 # experimental config: experimental.graphshorten
2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2262 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2263
2263
2264 for rev, type, ctx, parents in dag:
2264 for rev, type, ctx, parents in dag:
2265 char = formatnode(repo, ctx)
2265 char = formatnode(repo, ctx)
2266 copies = None
2266 copies = None
2267 if getrenamed and ctx.rev():
2267 if getrenamed and ctx.rev():
2268 copies = []
2268 copies = []
2269 for fn in ctx.files():
2269 for fn in ctx.files():
2270 rename = getrenamed(fn, ctx.rev())
2270 rename = getrenamed(fn, ctx.rev())
2271 if rename:
2271 if rename:
2272 copies.append((fn, rename[0]))
2272 copies.append((fn, rename[0]))
2273 revmatchfn = None
2273 revmatchfn = None
2274 if filematcher is not None:
2274 if filematcher is not None:
2275 revmatchfn = filematcher(ctx.rev())
2275 revmatchfn = filematcher(ctx.rev())
2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2276 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2277 lines = displayer.hunk.pop(rev).split('\n')
2277 lines = displayer.hunk.pop(rev).split('\n')
2278 if not lines[-1]:
2278 if not lines[-1]:
2279 del lines[-1]
2279 del lines[-1]
2280 displayer.flush(ctx)
2280 displayer.flush(ctx)
2281 edges = edgefn(type, char, lines, state, rev, parents)
2281 edges = edgefn(type, char, lines, state, rev, parents)
2282 for type, char, lines, coldata in edges:
2282 for type, char, lines, coldata in edges:
2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2283 graphmod.ascii(ui, state, type, char, lines, coldata)
2284 displayer.close()
2284 displayer.close()
2285
2285
2286 def graphlog(ui, repo, *pats, **opts):
2286 def graphlog(ui, repo, *pats, **opts):
2287 # Parameters are identical to log command ones
2287 # Parameters are identical to log command ones
2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2288 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2289 revdag = graphmod.dagwalker(repo, revs)
2289 revdag = graphmod.dagwalker(repo, revs)
2290
2290
2291 getrenamed = None
2291 getrenamed = None
2292 if opts.get('copies'):
2292 if opts.get('copies'):
2293 endrev = None
2293 endrev = None
2294 if opts.get('rev'):
2294 if opts.get('rev'):
2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2295 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2296 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2297 displayer = show_changeset(ui, repo, opts, buffered=True)
2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2298 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2299 filematcher)
2299 filematcher)
2300
2300
2301 def checkunsupportedgraphflags(pats, opts):
2301 def checkunsupportedgraphflags(pats, opts):
2302 for op in ["newest_first"]:
2302 for op in ["newest_first"]:
2303 if op in opts and opts[op]:
2303 if op in opts and opts[op]:
2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2304 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2305 % op.replace("_", "-"))
2305 % op.replace("_", "-"))
2306
2306
2307 def graphrevs(repo, nodes, opts):
2307 def graphrevs(repo, nodes, opts):
2308 limit = loglimit(opts)
2308 limit = loglimit(opts)
2309 nodes.reverse()
2309 nodes.reverse()
2310 if limit is not None:
2310 if limit is not None:
2311 nodes = nodes[:limit]
2311 nodes = nodes[:limit]
2312 return graphmod.nodes(repo, nodes)
2312 return graphmod.nodes(repo, nodes)
2313
2313
2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2314 def add(ui, repo, match, prefix, explicitonly, **opts):
2315 join = lambda f: os.path.join(prefix, f)
2315 join = lambda f: os.path.join(prefix, f)
2316 bad = []
2316 bad = []
2317
2317
2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2318 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2319 names = []
2319 names = []
2320 wctx = repo[None]
2320 wctx = repo[None]
2321 cca = None
2321 cca = None
2322 abort, warn = scmutil.checkportabilityalert(ui)
2322 abort, warn = scmutil.checkportabilityalert(ui)
2323 if abort or warn:
2323 if abort or warn:
2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2324 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2325
2325
2326 badmatch = matchmod.badmatch(match, badfn)
2326 badmatch = matchmod.badmatch(match, badfn)
2327 dirstate = repo.dirstate
2327 dirstate = repo.dirstate
2328 # We don't want to just call wctx.walk here, since it would return a lot of
2328 # We don't want to just call wctx.walk here, since it would return a lot of
2329 # clean files, which we aren't interested in and takes time.
2329 # clean files, which we aren't interested in and takes time.
2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2330 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2331 True, False, full=False)):
2331 True, False, full=False)):
2332 exact = match.exact(f)
2332 exact = match.exact(f)
2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2333 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2334 if cca:
2334 if cca:
2335 cca(f)
2335 cca(f)
2336 names.append(f)
2336 names.append(f)
2337 if ui.verbose or not exact:
2337 if ui.verbose or not exact:
2338 ui.status(_('adding %s\n') % match.rel(f))
2338 ui.status(_('adding %s\n') % match.rel(f))
2339
2339
2340 for subpath in sorted(wctx.substate):
2340 for subpath in sorted(wctx.substate):
2341 sub = wctx.sub(subpath)
2341 sub = wctx.sub(subpath)
2342 try:
2342 try:
2343 submatch = matchmod.subdirmatcher(subpath, match)
2343 submatch = matchmod.subdirmatcher(subpath, match)
2344 if opts.get('subrepos'):
2344 if opts.get('subrepos'):
2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2345 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2346 else:
2346 else:
2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2347 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2348 except error.LookupError:
2348 except error.LookupError:
2349 ui.status(_("skipping missing subrepository: %s\n")
2349 ui.status(_("skipping missing subrepository: %s\n")
2350 % join(subpath))
2350 % join(subpath))
2351
2351
2352 if not opts.get('dry_run'):
2352 if not opts.get('dry_run'):
2353 rejected = wctx.add(names, prefix)
2353 rejected = wctx.add(names, prefix)
2354 bad.extend(f for f in rejected if f in match.files())
2354 bad.extend(f for f in rejected if f in match.files())
2355 return bad
2355 return bad
2356
2356
2357 def forget(ui, repo, match, prefix, explicitonly):
2357 def forget(ui, repo, match, prefix, explicitonly):
2358 join = lambda f: os.path.join(prefix, f)
2358 join = lambda f: os.path.join(prefix, f)
2359 bad = []
2359 bad = []
2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2360 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2361 wctx = repo[None]
2361 wctx = repo[None]
2362 forgot = []
2362 forgot = []
2363
2363
2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2364 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2365 forget = sorted(s[0] + s[1] + s[3] + s[6])
2366 if explicitonly:
2366 if explicitonly:
2367 forget = [f for f in forget if match.exact(f)]
2367 forget = [f for f in forget if match.exact(f)]
2368
2368
2369 for subpath in sorted(wctx.substate):
2369 for subpath in sorted(wctx.substate):
2370 sub = wctx.sub(subpath)
2370 sub = wctx.sub(subpath)
2371 try:
2371 try:
2372 submatch = matchmod.subdirmatcher(subpath, match)
2372 submatch = matchmod.subdirmatcher(subpath, match)
2373 subbad, subforgot = sub.forget(submatch, prefix)
2373 subbad, subforgot = sub.forget(submatch, prefix)
2374 bad.extend([subpath + '/' + f for f in subbad])
2374 bad.extend([subpath + '/' + f for f in subbad])
2375 forgot.extend([subpath + '/' + f for f in subforgot])
2375 forgot.extend([subpath + '/' + f for f in subforgot])
2376 except error.LookupError:
2376 except error.LookupError:
2377 ui.status(_("skipping missing subrepository: %s\n")
2377 ui.status(_("skipping missing subrepository: %s\n")
2378 % join(subpath))
2378 % join(subpath))
2379
2379
2380 if not explicitonly:
2380 if not explicitonly:
2381 for f in match.files():
2381 for f in match.files():
2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2382 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2383 if f not in forgot:
2383 if f not in forgot:
2384 if repo.wvfs.exists(f):
2384 if repo.wvfs.exists(f):
2385 # Don't complain if the exact case match wasn't given.
2385 # Don't complain if the exact case match wasn't given.
2386 # But don't do this until after checking 'forgot', so
2386 # But don't do this until after checking 'forgot', so
2387 # that subrepo files aren't normalized, and this op is
2387 # that subrepo files aren't normalized, and this op is
2388 # purely from data cached by the status walk above.
2388 # purely from data cached by the status walk above.
2389 if repo.dirstate.normalize(f) in repo.dirstate:
2389 if repo.dirstate.normalize(f) in repo.dirstate:
2390 continue
2390 continue
2391 ui.warn(_('not removing %s: '
2391 ui.warn(_('not removing %s: '
2392 'file is already untracked\n')
2392 'file is already untracked\n')
2393 % match.rel(f))
2393 % match.rel(f))
2394 bad.append(f)
2394 bad.append(f)
2395
2395
2396 for f in forget:
2396 for f in forget:
2397 if ui.verbose or not match.exact(f):
2397 if ui.verbose or not match.exact(f):
2398 ui.status(_('removing %s\n') % match.rel(f))
2398 ui.status(_('removing %s\n') % match.rel(f))
2399
2399
2400 rejected = wctx.forget(forget, prefix)
2400 rejected = wctx.forget(forget, prefix)
2401 bad.extend(f for f in rejected if f in match.files())
2401 bad.extend(f for f in rejected if f in match.files())
2402 forgot.extend(f for f in forget if f not in rejected)
2402 forgot.extend(f for f in forget if f not in rejected)
2403 return bad, forgot
2403 return bad, forgot
2404
2404
2405 def files(ui, ctx, m, fm, fmt, subrepos):
2405 def files(ui, ctx, m, fm, fmt, subrepos):
2406 rev = ctx.rev()
2406 rev = ctx.rev()
2407 ret = 1
2407 ret = 1
2408 ds = ctx.repo().dirstate
2408 ds = ctx.repo().dirstate
2409
2409
2410 for f in ctx.matches(m):
2410 for f in ctx.matches(m):
2411 if rev is None and ds[f] == 'r':
2411 if rev is None and ds[f] == 'r':
2412 continue
2412 continue
2413 fm.startitem()
2413 fm.startitem()
2414 if ui.verbose:
2414 if ui.verbose:
2415 fc = ctx[f]
2415 fc = ctx[f]
2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2416 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2417 fm.data(abspath=f)
2417 fm.data(abspath=f)
2418 fm.write('path', fmt, m.rel(f))
2418 fm.write('path', fmt, m.rel(f))
2419 ret = 0
2419 ret = 0
2420
2420
2421 for subpath in sorted(ctx.substate):
2421 for subpath in sorted(ctx.substate):
2422 submatch = matchmod.subdirmatcher(subpath, m)
2422 submatch = matchmod.subdirmatcher(subpath, m)
2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2423 if (subrepos or m.exact(subpath) or any(submatch.files())):
2424 sub = ctx.sub(subpath)
2424 sub = ctx.sub(subpath)
2425 try:
2425 try:
2426 recurse = m.exact(subpath) or subrepos
2426 recurse = m.exact(subpath) or subrepos
2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2427 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2428 ret = 0
2428 ret = 0
2429 except error.LookupError:
2429 except error.LookupError:
2430 ui.status(_("skipping missing subrepository: %s\n")
2430 ui.status(_("skipping missing subrepository: %s\n")
2431 % m.abs(subpath))
2431 % m.abs(subpath))
2432
2432
2433 return ret
2433 return ret
2434
2434
2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2435 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2436 join = lambda f: os.path.join(prefix, f)
2436 join = lambda f: os.path.join(prefix, f)
2437 ret = 0
2437 ret = 0
2438 s = repo.status(match=m, clean=True)
2438 s = repo.status(match=m, clean=True)
2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2439 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2440
2440
2441 wctx = repo[None]
2441 wctx = repo[None]
2442
2442
2443 if warnings is None:
2443 if warnings is None:
2444 warnings = []
2444 warnings = []
2445 warn = True
2445 warn = True
2446 else:
2446 else:
2447 warn = False
2447 warn = False
2448
2448
2449 subs = sorted(wctx.substate)
2449 subs = sorted(wctx.substate)
2450 total = len(subs)
2450 total = len(subs)
2451 count = 0
2451 count = 0
2452 for subpath in subs:
2452 for subpath in subs:
2453 count += 1
2453 count += 1
2454 submatch = matchmod.subdirmatcher(subpath, m)
2454 submatch = matchmod.subdirmatcher(subpath, m)
2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2455 if subrepos or m.exact(subpath) or any(submatch.files()):
2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2456 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2457 sub = wctx.sub(subpath)
2457 sub = wctx.sub(subpath)
2458 try:
2458 try:
2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2459 if sub.removefiles(submatch, prefix, after, force, subrepos,
2460 warnings):
2460 warnings):
2461 ret = 1
2461 ret = 1
2462 except error.LookupError:
2462 except error.LookupError:
2463 warnings.append(_("skipping missing subrepository: %s\n")
2463 warnings.append(_("skipping missing subrepository: %s\n")
2464 % join(subpath))
2464 % join(subpath))
2465 ui.progress(_('searching'), None)
2465 ui.progress(_('searching'), None)
2466
2466
2467 # warn about failure to delete explicit files/dirs
2467 # warn about failure to delete explicit files/dirs
2468 deleteddirs = util.dirs(deleted)
2468 deleteddirs = util.dirs(deleted)
2469 files = m.files()
2469 files = m.files()
2470 total = len(files)
2470 total = len(files)
2471 count = 0
2471 count = 0
2472 for f in files:
2472 for f in files:
2473 def insubrepo():
2473 def insubrepo():
2474 for subpath in wctx.substate:
2474 for subpath in wctx.substate:
2475 if f.startswith(subpath + '/'):
2475 if f.startswith(subpath + '/'):
2476 return True
2476 return True
2477 return False
2477 return False
2478
2478
2479 count += 1
2479 count += 1
2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2480 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2481 isdir = f in deleteddirs or wctx.hasdir(f)
2481 isdir = f in deleteddirs or wctx.hasdir(f)
2482 if (f in repo.dirstate or isdir or f == '.'
2482 if (f in repo.dirstate or isdir or f == '.'
2483 or insubrepo() or f in subs):
2483 or insubrepo() or f in subs):
2484 continue
2484 continue
2485
2485
2486 if repo.wvfs.exists(f):
2486 if repo.wvfs.exists(f):
2487 if repo.wvfs.isdir(f):
2487 if repo.wvfs.isdir(f):
2488 warnings.append(_('not removing %s: no tracked files\n')
2488 warnings.append(_('not removing %s: no tracked files\n')
2489 % m.rel(f))
2489 % m.rel(f))
2490 else:
2490 else:
2491 warnings.append(_('not removing %s: file is untracked\n')
2491 warnings.append(_('not removing %s: file is untracked\n')
2492 % m.rel(f))
2492 % m.rel(f))
2493 # missing files will generate a warning elsewhere
2493 # missing files will generate a warning elsewhere
2494 ret = 1
2494 ret = 1
2495 ui.progress(_('deleting'), None)
2495 ui.progress(_('deleting'), None)
2496
2496
2497 if force:
2497 if force:
2498 list = modified + deleted + clean + added
2498 list = modified + deleted + clean + added
2499 elif after:
2499 elif after:
2500 list = deleted
2500 list = deleted
2501 remaining = modified + added + clean
2501 remaining = modified + added + clean
2502 total = len(remaining)
2502 total = len(remaining)
2503 count = 0
2503 count = 0
2504 for f in remaining:
2504 for f in remaining:
2505 count += 1
2505 count += 1
2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2506 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2507 warnings.append(_('not removing %s: file still exists\n')
2507 warnings.append(_('not removing %s: file still exists\n')
2508 % m.rel(f))
2508 % m.rel(f))
2509 ret = 1
2509 ret = 1
2510 ui.progress(_('skipping'), None)
2510 ui.progress(_('skipping'), None)
2511 else:
2511 else:
2512 list = deleted + clean
2512 list = deleted + clean
2513 total = len(modified) + len(added)
2513 total = len(modified) + len(added)
2514 count = 0
2514 count = 0
2515 for f in modified:
2515 for f in modified:
2516 count += 1
2516 count += 1
2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2517 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2518 warnings.append(_('not removing %s: file is modified (use -f'
2518 warnings.append(_('not removing %s: file is modified (use -f'
2519 ' to force removal)\n') % m.rel(f))
2519 ' to force removal)\n') % m.rel(f))
2520 ret = 1
2520 ret = 1
2521 for f in added:
2521 for f in added:
2522 count += 1
2522 count += 1
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2523 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2524 warnings.append(_("not removing %s: file has been marked for add"
2524 warnings.append(_("not removing %s: file has been marked for add"
2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2525 " (use 'hg forget' to undo add)\n") % m.rel(f))
2526 ret = 1
2526 ret = 1
2527 ui.progress(_('skipping'), None)
2527 ui.progress(_('skipping'), None)
2528
2528
2529 list = sorted(list)
2529 list = sorted(list)
2530 total = len(list)
2530 total = len(list)
2531 count = 0
2531 count = 0
2532 for f in list:
2532 for f in list:
2533 count += 1
2533 count += 1
2534 if ui.verbose or not m.exact(f):
2534 if ui.verbose or not m.exact(f):
2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2535 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2536 ui.status(_('removing %s\n') % m.rel(f))
2536 ui.status(_('removing %s\n') % m.rel(f))
2537 ui.progress(_('deleting'), None)
2537 ui.progress(_('deleting'), None)
2538
2538
2539 with repo.wlock():
2539 with repo.wlock():
2540 if not after:
2540 if not after:
2541 for f in list:
2541 for f in list:
2542 if f in added:
2542 if f in added:
2543 continue # we never unlink added files on remove
2543 continue # we never unlink added files on remove
2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2544 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2545 repo[None].forget(list)
2545 repo[None].forget(list)
2546
2546
2547 if warn:
2547 if warn:
2548 for warning in warnings:
2548 for warning in warnings:
2549 ui.warn(warning)
2549 ui.warn(warning)
2550
2550
2551 return ret
2551 return ret
2552
2552
2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2553 def cat(ui, repo, ctx, matcher, prefix, **opts):
2554 err = 1
2554 err = 1
2555
2555
2556 def write(path):
2556 def write(path):
2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2557 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2558 pathname=os.path.join(prefix, path))
2558 pathname=os.path.join(prefix, path))
2559 data = ctx[path].data()
2559 data = ctx[path].data()
2560 if opts.get('decode'):
2560 if opts.get('decode'):
2561 data = repo.wwritedata(path, data)
2561 data = repo.wwritedata(path, data)
2562 fp.write(data)
2562 fp.write(data)
2563 fp.close()
2563 fp.close()
2564
2564
2565 # Automation often uses hg cat on single files, so special case it
2565 # Automation often uses hg cat on single files, so special case it
2566 # for performance to avoid the cost of parsing the manifest.
2566 # for performance to avoid the cost of parsing the manifest.
2567 if len(matcher.files()) == 1 and not matcher.anypats():
2567 if len(matcher.files()) == 1 and not matcher.anypats():
2568 file = matcher.files()[0]
2568 file = matcher.files()[0]
2569 mf = repo.manifest
2569 mfl = repo.manifestlog
2570 mfnode = ctx.manifestnode()
2570 mfnode = ctx.manifestnode()
2571 if mfnode and mf.find(mfnode, file)[0]:
2571 try:
2572 if mfnode and mfl[mfnode].find(file)[0]:
2572 write(file)
2573 write(file)
2573 return 0
2574 return 0
2575 except KeyError:
2576 pass
2574
2577
2575 for abs in ctx.walk(matcher):
2578 for abs in ctx.walk(matcher):
2576 write(abs)
2579 write(abs)
2577 err = 0
2580 err = 0
2578
2581
2579 for subpath in sorted(ctx.substate):
2582 for subpath in sorted(ctx.substate):
2580 sub = ctx.sub(subpath)
2583 sub = ctx.sub(subpath)
2581 try:
2584 try:
2582 submatch = matchmod.subdirmatcher(subpath, matcher)
2585 submatch = matchmod.subdirmatcher(subpath, matcher)
2583
2586
2584 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2587 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2585 **opts):
2588 **opts):
2586 err = 0
2589 err = 0
2587 except error.RepoLookupError:
2590 except error.RepoLookupError:
2588 ui.status(_("skipping missing subrepository: %s\n")
2591 ui.status(_("skipping missing subrepository: %s\n")
2589 % os.path.join(prefix, subpath))
2592 % os.path.join(prefix, subpath))
2590
2593
2591 return err
2594 return err
2592
2595
2593 def commit(ui, repo, commitfunc, pats, opts):
2596 def commit(ui, repo, commitfunc, pats, opts):
2594 '''commit the specified files or all outstanding changes'''
2597 '''commit the specified files or all outstanding changes'''
2595 date = opts.get('date')
2598 date = opts.get('date')
2596 if date:
2599 if date:
2597 opts['date'] = util.parsedate(date)
2600 opts['date'] = util.parsedate(date)
2598 message = logmessage(ui, opts)
2601 message = logmessage(ui, opts)
2599 matcher = scmutil.match(repo[None], pats, opts)
2602 matcher = scmutil.match(repo[None], pats, opts)
2600
2603
2601 # extract addremove carefully -- this function can be called from a command
2604 # extract addremove carefully -- this function can be called from a command
2602 # that doesn't support addremove
2605 # that doesn't support addremove
2603 if opts.get('addremove'):
2606 if opts.get('addremove'):
2604 if scmutil.addremove(repo, matcher, "", opts) != 0:
2607 if scmutil.addremove(repo, matcher, "", opts) != 0:
2605 raise error.Abort(
2608 raise error.Abort(
2606 _("failed to mark all new/missing files as added/removed"))
2609 _("failed to mark all new/missing files as added/removed"))
2607
2610
2608 return commitfunc(ui, repo, message, matcher, opts)
2611 return commitfunc(ui, repo, message, matcher, opts)
2609
2612
2610 def samefile(f, ctx1, ctx2):
2613 def samefile(f, ctx1, ctx2):
2611 if f in ctx1.manifest():
2614 if f in ctx1.manifest():
2612 a = ctx1.filectx(f)
2615 a = ctx1.filectx(f)
2613 if f in ctx2.manifest():
2616 if f in ctx2.manifest():
2614 b = ctx2.filectx(f)
2617 b = ctx2.filectx(f)
2615 return (not a.cmp(b)
2618 return (not a.cmp(b)
2616 and a.flags() == b.flags())
2619 and a.flags() == b.flags())
2617 else:
2620 else:
2618 return False
2621 return False
2619 else:
2622 else:
2620 return f not in ctx2.manifest()
2623 return f not in ctx2.manifest()
2621
2624
2622 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2625 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2623 # avoid cycle context -> subrepo -> cmdutil
2626 # avoid cycle context -> subrepo -> cmdutil
2624 from . import context
2627 from . import context
2625
2628
2626 # amend will reuse the existing user if not specified, but the obsolete
2629 # amend will reuse the existing user if not specified, but the obsolete
2627 # marker creation requires that the current user's name is specified.
2630 # marker creation requires that the current user's name is specified.
2628 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2631 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2629 ui.username() # raise exception if username not set
2632 ui.username() # raise exception if username not set
2630
2633
2631 ui.note(_('amending changeset %s\n') % old)
2634 ui.note(_('amending changeset %s\n') % old)
2632 base = old.p1()
2635 base = old.p1()
2633 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2636 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2634
2637
2635 wlock = lock = newid = None
2638 wlock = lock = newid = None
2636 try:
2639 try:
2637 wlock = repo.wlock()
2640 wlock = repo.wlock()
2638 lock = repo.lock()
2641 lock = repo.lock()
2639 with repo.transaction('amend') as tr:
2642 with repo.transaction('amend') as tr:
2640 # See if we got a message from -m or -l, if not, open the editor
2643 # See if we got a message from -m or -l, if not, open the editor
2641 # with the message of the changeset to amend
2644 # with the message of the changeset to amend
2642 message = logmessage(ui, opts)
2645 message = logmessage(ui, opts)
2643 # ensure logfile does not conflict with later enforcement of the
2646 # ensure logfile does not conflict with later enforcement of the
2644 # message. potential logfile content has been processed by
2647 # message. potential logfile content has been processed by
2645 # `logmessage` anyway.
2648 # `logmessage` anyway.
2646 opts.pop('logfile')
2649 opts.pop('logfile')
2647 # First, do a regular commit to record all changes in the working
2650 # First, do a regular commit to record all changes in the working
2648 # directory (if there are any)
2651 # directory (if there are any)
2649 ui.callhooks = False
2652 ui.callhooks = False
2650 activebookmark = repo._bookmarks.active
2653 activebookmark = repo._bookmarks.active
2651 try:
2654 try:
2652 repo._bookmarks.active = None
2655 repo._bookmarks.active = None
2653 opts['message'] = 'temporary amend commit for %s' % old
2656 opts['message'] = 'temporary amend commit for %s' % old
2654 node = commit(ui, repo, commitfunc, pats, opts)
2657 node = commit(ui, repo, commitfunc, pats, opts)
2655 finally:
2658 finally:
2656 repo._bookmarks.active = activebookmark
2659 repo._bookmarks.active = activebookmark
2657 repo._bookmarks.recordchange(tr)
2660 repo._bookmarks.recordchange(tr)
2658 ui.callhooks = True
2661 ui.callhooks = True
2659 ctx = repo[node]
2662 ctx = repo[node]
2660
2663
2661 # Participating changesets:
2664 # Participating changesets:
2662 #
2665 #
2663 # node/ctx o - new (intermediate) commit that contains changes
2666 # node/ctx o - new (intermediate) commit that contains changes
2664 # | from working dir to go into amending commit
2667 # | from working dir to go into amending commit
2665 # | (or a workingctx if there were no changes)
2668 # | (or a workingctx if there were no changes)
2666 # |
2669 # |
2667 # old o - changeset to amend
2670 # old o - changeset to amend
2668 # |
2671 # |
2669 # base o - parent of amending changeset
2672 # base o - parent of amending changeset
2670
2673
2671 # Update extra dict from amended commit (e.g. to preserve graft
2674 # Update extra dict from amended commit (e.g. to preserve graft
2672 # source)
2675 # source)
2673 extra.update(old.extra())
2676 extra.update(old.extra())
2674
2677
2675 # Also update it from the intermediate commit or from the wctx
2678 # Also update it from the intermediate commit or from the wctx
2676 extra.update(ctx.extra())
2679 extra.update(ctx.extra())
2677
2680
2678 if len(old.parents()) > 1:
2681 if len(old.parents()) > 1:
2679 # ctx.files() isn't reliable for merges, so fall back to the
2682 # ctx.files() isn't reliable for merges, so fall back to the
2680 # slower repo.status() method
2683 # slower repo.status() method
2681 files = set([fn for st in repo.status(base, old)[:3]
2684 files = set([fn for st in repo.status(base, old)[:3]
2682 for fn in st])
2685 for fn in st])
2683 else:
2686 else:
2684 files = set(old.files())
2687 files = set(old.files())
2685
2688
2686 # Second, we use either the commit we just did, or if there were no
2689 # Second, we use either the commit we just did, or if there were no
2687 # changes the parent of the working directory as the version of the
2690 # changes the parent of the working directory as the version of the
2688 # files in the final amend commit
2691 # files in the final amend commit
2689 if node:
2692 if node:
2690 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2693 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2691
2694
2692 user = ctx.user()
2695 user = ctx.user()
2693 date = ctx.date()
2696 date = ctx.date()
2694 # Recompute copies (avoid recording a -> b -> a)
2697 # Recompute copies (avoid recording a -> b -> a)
2695 copied = copies.pathcopies(base, ctx)
2698 copied = copies.pathcopies(base, ctx)
2696 if old.p2:
2699 if old.p2:
2697 copied.update(copies.pathcopies(old.p2(), ctx))
2700 copied.update(copies.pathcopies(old.p2(), ctx))
2698
2701
2699 # Prune files which were reverted by the updates: if old
2702 # Prune files which were reverted by the updates: if old
2700 # introduced file X and our intermediate commit, node,
2703 # introduced file X and our intermediate commit, node,
2701 # renamed that file, then those two files are the same and
2704 # renamed that file, then those two files are the same and
2702 # we can discard X from our list of files. Likewise if X
2705 # we can discard X from our list of files. Likewise if X
2703 # was deleted, it's no longer relevant
2706 # was deleted, it's no longer relevant
2704 files.update(ctx.files())
2707 files.update(ctx.files())
2705 files = [f for f in files if not samefile(f, ctx, base)]
2708 files = [f for f in files if not samefile(f, ctx, base)]
2706
2709
2707 def filectxfn(repo, ctx_, path):
2710 def filectxfn(repo, ctx_, path):
2708 try:
2711 try:
2709 fctx = ctx[path]
2712 fctx = ctx[path]
2710 flags = fctx.flags()
2713 flags = fctx.flags()
2711 mctx = context.memfilectx(repo,
2714 mctx = context.memfilectx(repo,
2712 fctx.path(), fctx.data(),
2715 fctx.path(), fctx.data(),
2713 islink='l' in flags,
2716 islink='l' in flags,
2714 isexec='x' in flags,
2717 isexec='x' in flags,
2715 copied=copied.get(path))
2718 copied=copied.get(path))
2716 return mctx
2719 return mctx
2717 except KeyError:
2720 except KeyError:
2718 return None
2721 return None
2719 else:
2722 else:
2720 ui.note(_('copying changeset %s to %s\n') % (old, base))
2723 ui.note(_('copying changeset %s to %s\n') % (old, base))
2721
2724
2722 # Use version of files as in the old cset
2725 # Use version of files as in the old cset
2723 def filectxfn(repo, ctx_, path):
2726 def filectxfn(repo, ctx_, path):
2724 try:
2727 try:
2725 return old.filectx(path)
2728 return old.filectx(path)
2726 except KeyError:
2729 except KeyError:
2727 return None
2730 return None
2728
2731
2729 user = opts.get('user') or old.user()
2732 user = opts.get('user') or old.user()
2730 date = opts.get('date') or old.date()
2733 date = opts.get('date') or old.date()
2731 editform = mergeeditform(old, 'commit.amend')
2734 editform = mergeeditform(old, 'commit.amend')
2732 editor = getcommiteditor(editform=editform, **opts)
2735 editor = getcommiteditor(editform=editform, **opts)
2733 if not message:
2736 if not message:
2734 editor = getcommiteditor(edit=True, editform=editform)
2737 editor = getcommiteditor(edit=True, editform=editform)
2735 message = old.description()
2738 message = old.description()
2736
2739
2737 pureextra = extra.copy()
2740 pureextra = extra.copy()
2738 extra['amend_source'] = old.hex()
2741 extra['amend_source'] = old.hex()
2739
2742
2740 new = context.memctx(repo,
2743 new = context.memctx(repo,
2741 parents=[base.node(), old.p2().node()],
2744 parents=[base.node(), old.p2().node()],
2742 text=message,
2745 text=message,
2743 files=files,
2746 files=files,
2744 filectxfn=filectxfn,
2747 filectxfn=filectxfn,
2745 user=user,
2748 user=user,
2746 date=date,
2749 date=date,
2747 extra=extra,
2750 extra=extra,
2748 editor=editor)
2751 editor=editor)
2749
2752
2750 newdesc = changelog.stripdesc(new.description())
2753 newdesc = changelog.stripdesc(new.description())
2751 if ((not node)
2754 if ((not node)
2752 and newdesc == old.description()
2755 and newdesc == old.description()
2753 and user == old.user()
2756 and user == old.user()
2754 and date == old.date()
2757 and date == old.date()
2755 and pureextra == old.extra()):
2758 and pureextra == old.extra()):
2756 # nothing changed. continuing here would create a new node
2759 # nothing changed. continuing here would create a new node
2757 # anyway because of the amend_source noise.
2760 # anyway because of the amend_source noise.
2758 #
2761 #
2759 # This not what we expect from amend.
2762 # This not what we expect from amend.
2760 return old.node()
2763 return old.node()
2761
2764
2762 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2765 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2763 try:
2766 try:
2764 if opts.get('secret'):
2767 if opts.get('secret'):
2765 commitphase = 'secret'
2768 commitphase = 'secret'
2766 else:
2769 else:
2767 commitphase = old.phase()
2770 commitphase = old.phase()
2768 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2771 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2769 newid = repo.commitctx(new)
2772 newid = repo.commitctx(new)
2770 finally:
2773 finally:
2771 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2774 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2772 if newid != old.node():
2775 if newid != old.node():
2773 # Reroute the working copy parent to the new changeset
2776 # Reroute the working copy parent to the new changeset
2774 repo.setparents(newid, nullid)
2777 repo.setparents(newid, nullid)
2775
2778
2776 # Move bookmarks from old parent to amend commit
2779 # Move bookmarks from old parent to amend commit
2777 bms = repo.nodebookmarks(old.node())
2780 bms = repo.nodebookmarks(old.node())
2778 if bms:
2781 if bms:
2779 marks = repo._bookmarks
2782 marks = repo._bookmarks
2780 for bm in bms:
2783 for bm in bms:
2781 ui.debug('moving bookmarks %r from %s to %s\n' %
2784 ui.debug('moving bookmarks %r from %s to %s\n' %
2782 (marks, old.hex(), hex(newid)))
2785 (marks, old.hex(), hex(newid)))
2783 marks[bm] = newid
2786 marks[bm] = newid
2784 marks.recordchange(tr)
2787 marks.recordchange(tr)
2785 #commit the whole amend process
2788 #commit the whole amend process
2786 if createmarkers:
2789 if createmarkers:
2787 # mark the new changeset as successor of the rewritten one
2790 # mark the new changeset as successor of the rewritten one
2788 new = repo[newid]
2791 new = repo[newid]
2789 obs = [(old, (new,))]
2792 obs = [(old, (new,))]
2790 if node:
2793 if node:
2791 obs.append((ctx, ()))
2794 obs.append((ctx, ()))
2792
2795
2793 obsolete.createmarkers(repo, obs)
2796 obsolete.createmarkers(repo, obs)
2794 if not createmarkers and newid != old.node():
2797 if not createmarkers and newid != old.node():
2795 # Strip the intermediate commit (if there was one) and the amended
2798 # Strip the intermediate commit (if there was one) and the amended
2796 # commit
2799 # commit
2797 if node:
2800 if node:
2798 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2801 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2799 ui.note(_('stripping amended changeset %s\n') % old)
2802 ui.note(_('stripping amended changeset %s\n') % old)
2800 repair.strip(ui, repo, old.node(), topic='amend-backup')
2803 repair.strip(ui, repo, old.node(), topic='amend-backup')
2801 finally:
2804 finally:
2802 lockmod.release(lock, wlock)
2805 lockmod.release(lock, wlock)
2803 return newid
2806 return newid
2804
2807
2805 def commiteditor(repo, ctx, subs, editform=''):
2808 def commiteditor(repo, ctx, subs, editform=''):
2806 if ctx.description():
2809 if ctx.description():
2807 return ctx.description()
2810 return ctx.description()
2808 return commitforceeditor(repo, ctx, subs, editform=editform,
2811 return commitforceeditor(repo, ctx, subs, editform=editform,
2809 unchangedmessagedetection=True)
2812 unchangedmessagedetection=True)
2810
2813
2811 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2814 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2812 editform='', unchangedmessagedetection=False):
2815 editform='', unchangedmessagedetection=False):
2813 if not extramsg:
2816 if not extramsg:
2814 extramsg = _("Leave message empty to abort commit.")
2817 extramsg = _("Leave message empty to abort commit.")
2815
2818
2816 forms = [e for e in editform.split('.') if e]
2819 forms = [e for e in editform.split('.') if e]
2817 forms.insert(0, 'changeset')
2820 forms.insert(0, 'changeset')
2818 templatetext = None
2821 templatetext = None
2819 while forms:
2822 while forms:
2820 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2823 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2821 if tmpl:
2824 if tmpl:
2822 templatetext = committext = buildcommittemplate(
2825 templatetext = committext = buildcommittemplate(
2823 repo, ctx, subs, extramsg, tmpl)
2826 repo, ctx, subs, extramsg, tmpl)
2824 break
2827 break
2825 forms.pop()
2828 forms.pop()
2826 else:
2829 else:
2827 committext = buildcommittext(repo, ctx, subs, extramsg)
2830 committext = buildcommittext(repo, ctx, subs, extramsg)
2828
2831
2829 # run editor in the repository root
2832 # run editor in the repository root
2830 olddir = os.getcwd()
2833 olddir = os.getcwd()
2831 os.chdir(repo.root)
2834 os.chdir(repo.root)
2832
2835
2833 # make in-memory changes visible to external process
2836 # make in-memory changes visible to external process
2834 tr = repo.currenttransaction()
2837 tr = repo.currenttransaction()
2835 repo.dirstate.write(tr)
2838 repo.dirstate.write(tr)
2836 pending = tr and tr.writepending() and repo.root
2839 pending = tr and tr.writepending() and repo.root
2837
2840
2838 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2841 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2839 editform=editform, pending=pending)
2842 editform=editform, pending=pending)
2840 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2843 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2841 os.chdir(olddir)
2844 os.chdir(olddir)
2842
2845
2843 if finishdesc:
2846 if finishdesc:
2844 text = finishdesc(text)
2847 text = finishdesc(text)
2845 if not text.strip():
2848 if not text.strip():
2846 raise error.Abort(_("empty commit message"))
2849 raise error.Abort(_("empty commit message"))
2847 if unchangedmessagedetection and editortext == templatetext:
2850 if unchangedmessagedetection and editortext == templatetext:
2848 raise error.Abort(_("commit message unchanged"))
2851 raise error.Abort(_("commit message unchanged"))
2849
2852
2850 return text
2853 return text
2851
2854
2852 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2855 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2853 ui = repo.ui
2856 ui = repo.ui
2854 tmpl, mapfile = gettemplate(ui, tmpl, None)
2857 tmpl, mapfile = gettemplate(ui, tmpl, None)
2855
2858
2856 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2859 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2857
2860
2858 for k, v in repo.ui.configitems('committemplate'):
2861 for k, v in repo.ui.configitems('committemplate'):
2859 if k != 'changeset':
2862 if k != 'changeset':
2860 t.t.cache[k] = v
2863 t.t.cache[k] = v
2861
2864
2862 if not extramsg:
2865 if not extramsg:
2863 extramsg = '' # ensure that extramsg is string
2866 extramsg = '' # ensure that extramsg is string
2864
2867
2865 ui.pushbuffer()
2868 ui.pushbuffer()
2866 t.show(ctx, extramsg=extramsg)
2869 t.show(ctx, extramsg=extramsg)
2867 return ui.popbuffer()
2870 return ui.popbuffer()
2868
2871
2869 def hgprefix(msg):
2872 def hgprefix(msg):
2870 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2873 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2871
2874
2872 def buildcommittext(repo, ctx, subs, extramsg):
2875 def buildcommittext(repo, ctx, subs, extramsg):
2873 edittext = []
2876 edittext = []
2874 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2877 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2875 if ctx.description():
2878 if ctx.description():
2876 edittext.append(ctx.description())
2879 edittext.append(ctx.description())
2877 edittext.append("")
2880 edittext.append("")
2878 edittext.append("") # Empty line between message and comments.
2881 edittext.append("") # Empty line between message and comments.
2879 edittext.append(hgprefix(_("Enter commit message."
2882 edittext.append(hgprefix(_("Enter commit message."
2880 " Lines beginning with 'HG:' are removed.")))
2883 " Lines beginning with 'HG:' are removed.")))
2881 edittext.append(hgprefix(extramsg))
2884 edittext.append(hgprefix(extramsg))
2882 edittext.append("HG: --")
2885 edittext.append("HG: --")
2883 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2886 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2884 if ctx.p2():
2887 if ctx.p2():
2885 edittext.append(hgprefix(_("branch merge")))
2888 edittext.append(hgprefix(_("branch merge")))
2886 if ctx.branch():
2889 if ctx.branch():
2887 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2890 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2888 if bookmarks.isactivewdirparent(repo):
2891 if bookmarks.isactivewdirparent(repo):
2889 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2892 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2890 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2893 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2891 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2894 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2892 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2895 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2893 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2896 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2894 if not added and not modified and not removed:
2897 if not added and not modified and not removed:
2895 edittext.append(hgprefix(_("no files changed")))
2898 edittext.append(hgprefix(_("no files changed")))
2896 edittext.append("")
2899 edittext.append("")
2897
2900
2898 return "\n".join(edittext)
2901 return "\n".join(edittext)
2899
2902
2900 def commitstatus(repo, node, branch, bheads=None, opts=None):
2903 def commitstatus(repo, node, branch, bheads=None, opts=None):
2901 if opts is None:
2904 if opts is None:
2902 opts = {}
2905 opts = {}
2903 ctx = repo[node]
2906 ctx = repo[node]
2904 parents = ctx.parents()
2907 parents = ctx.parents()
2905
2908
2906 if (not opts.get('amend') and bheads and node not in bheads and not
2909 if (not opts.get('amend') and bheads and node not in bheads and not
2907 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2910 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2908 repo.ui.status(_('created new head\n'))
2911 repo.ui.status(_('created new head\n'))
2909 # The message is not printed for initial roots. For the other
2912 # The message is not printed for initial roots. For the other
2910 # changesets, it is printed in the following situations:
2913 # changesets, it is printed in the following situations:
2911 #
2914 #
2912 # Par column: for the 2 parents with ...
2915 # Par column: for the 2 parents with ...
2913 # N: null or no parent
2916 # N: null or no parent
2914 # B: parent is on another named branch
2917 # B: parent is on another named branch
2915 # C: parent is a regular non head changeset
2918 # C: parent is a regular non head changeset
2916 # H: parent was a branch head of the current branch
2919 # H: parent was a branch head of the current branch
2917 # Msg column: whether we print "created new head" message
2920 # Msg column: whether we print "created new head" message
2918 # In the following, it is assumed that there already exists some
2921 # In the following, it is assumed that there already exists some
2919 # initial branch heads of the current branch, otherwise nothing is
2922 # initial branch heads of the current branch, otherwise nothing is
2920 # printed anyway.
2923 # printed anyway.
2921 #
2924 #
2922 # Par Msg Comment
2925 # Par Msg Comment
2923 # N N y additional topo root
2926 # N N y additional topo root
2924 #
2927 #
2925 # B N y additional branch root
2928 # B N y additional branch root
2926 # C N y additional topo head
2929 # C N y additional topo head
2927 # H N n usual case
2930 # H N n usual case
2928 #
2931 #
2929 # B B y weird additional branch root
2932 # B B y weird additional branch root
2930 # C B y branch merge
2933 # C B y branch merge
2931 # H B n merge with named branch
2934 # H B n merge with named branch
2932 #
2935 #
2933 # C C y additional head from merge
2936 # C C y additional head from merge
2934 # C H n merge with a head
2937 # C H n merge with a head
2935 #
2938 #
2936 # H H n head merge: head count decreases
2939 # H H n head merge: head count decreases
2937
2940
2938 if not opts.get('close_branch'):
2941 if not opts.get('close_branch'):
2939 for r in parents:
2942 for r in parents:
2940 if r.closesbranch() and r.branch() == branch:
2943 if r.closesbranch() and r.branch() == branch:
2941 repo.ui.status(_('reopening closed branch head %d\n') % r)
2944 repo.ui.status(_('reopening closed branch head %d\n') % r)
2942
2945
2943 if repo.ui.debugflag:
2946 if repo.ui.debugflag:
2944 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2947 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2945 elif repo.ui.verbose:
2948 elif repo.ui.verbose:
2946 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2949 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2947
2950
2948 def postcommitstatus(repo, pats, opts):
2951 def postcommitstatus(repo, pats, opts):
2949 return repo.status(match=scmutil.match(repo[None], pats, opts))
2952 return repo.status(match=scmutil.match(repo[None], pats, opts))
2950
2953
2951 def revert(ui, repo, ctx, parents, *pats, **opts):
2954 def revert(ui, repo, ctx, parents, *pats, **opts):
2952 parent, p2 = parents
2955 parent, p2 = parents
2953 node = ctx.node()
2956 node = ctx.node()
2954
2957
2955 mf = ctx.manifest()
2958 mf = ctx.manifest()
2956 if node == p2:
2959 if node == p2:
2957 parent = p2
2960 parent = p2
2958
2961
2959 # need all matching names in dirstate and manifest of target rev,
2962 # need all matching names in dirstate and manifest of target rev,
2960 # so have to walk both. do not print errors if files exist in one
2963 # so have to walk both. do not print errors if files exist in one
2961 # but not other. in both cases, filesets should be evaluated against
2964 # but not other. in both cases, filesets should be evaluated against
2962 # workingctx to get consistent result (issue4497). this means 'set:**'
2965 # workingctx to get consistent result (issue4497). this means 'set:**'
2963 # cannot be used to select missing files from target rev.
2966 # cannot be used to select missing files from target rev.
2964
2967
2965 # `names` is a mapping for all elements in working copy and target revision
2968 # `names` is a mapping for all elements in working copy and target revision
2966 # The mapping is in the form:
2969 # The mapping is in the form:
2967 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2970 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2968 names = {}
2971 names = {}
2969
2972
2970 with repo.wlock():
2973 with repo.wlock():
2971 ## filling of the `names` mapping
2974 ## filling of the `names` mapping
2972 # walk dirstate to fill `names`
2975 # walk dirstate to fill `names`
2973
2976
2974 interactive = opts.get('interactive', False)
2977 interactive = opts.get('interactive', False)
2975 wctx = repo[None]
2978 wctx = repo[None]
2976 m = scmutil.match(wctx, pats, opts)
2979 m = scmutil.match(wctx, pats, opts)
2977
2980
2978 # we'll need this later
2981 # we'll need this later
2979 targetsubs = sorted(s for s in wctx.substate if m(s))
2982 targetsubs = sorted(s for s in wctx.substate if m(s))
2980
2983
2981 if not m.always():
2984 if not m.always():
2982 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2985 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2983 names[abs] = m.rel(abs), m.exact(abs)
2986 names[abs] = m.rel(abs), m.exact(abs)
2984
2987
2985 # walk target manifest to fill `names`
2988 # walk target manifest to fill `names`
2986
2989
2987 def badfn(path, msg):
2990 def badfn(path, msg):
2988 if path in names:
2991 if path in names:
2989 return
2992 return
2990 if path in ctx.substate:
2993 if path in ctx.substate:
2991 return
2994 return
2992 path_ = path + '/'
2995 path_ = path + '/'
2993 for f in names:
2996 for f in names:
2994 if f.startswith(path_):
2997 if f.startswith(path_):
2995 return
2998 return
2996 ui.warn("%s: %s\n" % (m.rel(path), msg))
2999 ui.warn("%s: %s\n" % (m.rel(path), msg))
2997
3000
2998 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3001 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2999 if abs not in names:
3002 if abs not in names:
3000 names[abs] = m.rel(abs), m.exact(abs)
3003 names[abs] = m.rel(abs), m.exact(abs)
3001
3004
3002 # Find status of all file in `names`.
3005 # Find status of all file in `names`.
3003 m = scmutil.matchfiles(repo, names)
3006 m = scmutil.matchfiles(repo, names)
3004
3007
3005 changes = repo.status(node1=node, match=m,
3008 changes = repo.status(node1=node, match=m,
3006 unknown=True, ignored=True, clean=True)
3009 unknown=True, ignored=True, clean=True)
3007 else:
3010 else:
3008 changes = repo.status(node1=node, match=m)
3011 changes = repo.status(node1=node, match=m)
3009 for kind in changes:
3012 for kind in changes:
3010 for abs in kind:
3013 for abs in kind:
3011 names[abs] = m.rel(abs), m.exact(abs)
3014 names[abs] = m.rel(abs), m.exact(abs)
3012
3015
3013 m = scmutil.matchfiles(repo, names)
3016 m = scmutil.matchfiles(repo, names)
3014
3017
3015 modified = set(changes.modified)
3018 modified = set(changes.modified)
3016 added = set(changes.added)
3019 added = set(changes.added)
3017 removed = set(changes.removed)
3020 removed = set(changes.removed)
3018 _deleted = set(changes.deleted)
3021 _deleted = set(changes.deleted)
3019 unknown = set(changes.unknown)
3022 unknown = set(changes.unknown)
3020 unknown.update(changes.ignored)
3023 unknown.update(changes.ignored)
3021 clean = set(changes.clean)
3024 clean = set(changes.clean)
3022 modadded = set()
3025 modadded = set()
3023
3026
3024 # split between files known in target manifest and the others
3027 # split between files known in target manifest and the others
3025 smf = set(mf)
3028 smf = set(mf)
3026
3029
3027 # determine the exact nature of the deleted changesets
3030 # determine the exact nature of the deleted changesets
3028 deladded = _deleted - smf
3031 deladded = _deleted - smf
3029 deleted = _deleted - deladded
3032 deleted = _deleted - deladded
3030
3033
3031 # We need to account for the state of the file in the dirstate,
3034 # We need to account for the state of the file in the dirstate,
3032 # even when we revert against something else than parent. This will
3035 # even when we revert against something else than parent. This will
3033 # slightly alter the behavior of revert (doing back up or not, delete
3036 # slightly alter the behavior of revert (doing back up or not, delete
3034 # or just forget etc).
3037 # or just forget etc).
3035 if parent == node:
3038 if parent == node:
3036 dsmodified = modified
3039 dsmodified = modified
3037 dsadded = added
3040 dsadded = added
3038 dsremoved = removed
3041 dsremoved = removed
3039 # store all local modifications, useful later for rename detection
3042 # store all local modifications, useful later for rename detection
3040 localchanges = dsmodified | dsadded
3043 localchanges = dsmodified | dsadded
3041 modified, added, removed = set(), set(), set()
3044 modified, added, removed = set(), set(), set()
3042 else:
3045 else:
3043 changes = repo.status(node1=parent, match=m)
3046 changes = repo.status(node1=parent, match=m)
3044 dsmodified = set(changes.modified)
3047 dsmodified = set(changes.modified)
3045 dsadded = set(changes.added)
3048 dsadded = set(changes.added)
3046 dsremoved = set(changes.removed)
3049 dsremoved = set(changes.removed)
3047 # store all local modifications, useful later for rename detection
3050 # store all local modifications, useful later for rename detection
3048 localchanges = dsmodified | dsadded
3051 localchanges = dsmodified | dsadded
3049
3052
3050 # only take into account for removes between wc and target
3053 # only take into account for removes between wc and target
3051 clean |= dsremoved - removed
3054 clean |= dsremoved - removed
3052 dsremoved &= removed
3055 dsremoved &= removed
3053 # distinct between dirstate remove and other
3056 # distinct between dirstate remove and other
3054 removed -= dsremoved
3057 removed -= dsremoved
3055
3058
3056 modadded = added & dsmodified
3059 modadded = added & dsmodified
3057 added -= modadded
3060 added -= modadded
3058
3061
3059 # tell newly modified apart.
3062 # tell newly modified apart.
3060 dsmodified &= modified
3063 dsmodified &= modified
3061 dsmodified |= modified & dsadded # dirstate added may need backup
3064 dsmodified |= modified & dsadded # dirstate added may need backup
3062 modified -= dsmodified
3065 modified -= dsmodified
3063
3066
3064 # We need to wait for some post-processing to update this set
3067 # We need to wait for some post-processing to update this set
3065 # before making the distinction. The dirstate will be used for
3068 # before making the distinction. The dirstate will be used for
3066 # that purpose.
3069 # that purpose.
3067 dsadded = added
3070 dsadded = added
3068
3071
3069 # in case of merge, files that are actually added can be reported as
3072 # in case of merge, files that are actually added can be reported as
3070 # modified, we need to post process the result
3073 # modified, we need to post process the result
3071 if p2 != nullid:
3074 if p2 != nullid:
3072 mergeadd = dsmodified - smf
3075 mergeadd = dsmodified - smf
3073 dsadded |= mergeadd
3076 dsadded |= mergeadd
3074 dsmodified -= mergeadd
3077 dsmodified -= mergeadd
3075
3078
3076 # if f is a rename, update `names` to also revert the source
3079 # if f is a rename, update `names` to also revert the source
3077 cwd = repo.getcwd()
3080 cwd = repo.getcwd()
3078 for f in localchanges:
3081 for f in localchanges:
3079 src = repo.dirstate.copied(f)
3082 src = repo.dirstate.copied(f)
3080 # XXX should we check for rename down to target node?
3083 # XXX should we check for rename down to target node?
3081 if src and src not in names and repo.dirstate[src] == 'r':
3084 if src and src not in names and repo.dirstate[src] == 'r':
3082 dsremoved.add(src)
3085 dsremoved.add(src)
3083 names[src] = (repo.pathto(src, cwd), True)
3086 names[src] = (repo.pathto(src, cwd), True)
3084
3087
3085 # distinguish between file to forget and the other
3088 # distinguish between file to forget and the other
3086 added = set()
3089 added = set()
3087 for abs in dsadded:
3090 for abs in dsadded:
3088 if repo.dirstate[abs] != 'a':
3091 if repo.dirstate[abs] != 'a':
3089 added.add(abs)
3092 added.add(abs)
3090 dsadded -= added
3093 dsadded -= added
3091
3094
3092 for abs in deladded:
3095 for abs in deladded:
3093 if repo.dirstate[abs] == 'a':
3096 if repo.dirstate[abs] == 'a':
3094 dsadded.add(abs)
3097 dsadded.add(abs)
3095 deladded -= dsadded
3098 deladded -= dsadded
3096
3099
3097 # For files marked as removed, we check if an unknown file is present at
3100 # For files marked as removed, we check if an unknown file is present at
3098 # the same path. If a such file exists it may need to be backed up.
3101 # the same path. If a such file exists it may need to be backed up.
3099 # Making the distinction at this stage helps have simpler backup
3102 # Making the distinction at this stage helps have simpler backup
3100 # logic.
3103 # logic.
3101 removunk = set()
3104 removunk = set()
3102 for abs in removed:
3105 for abs in removed:
3103 target = repo.wjoin(abs)
3106 target = repo.wjoin(abs)
3104 if os.path.lexists(target):
3107 if os.path.lexists(target):
3105 removunk.add(abs)
3108 removunk.add(abs)
3106 removed -= removunk
3109 removed -= removunk
3107
3110
3108 dsremovunk = set()
3111 dsremovunk = set()
3109 for abs in dsremoved:
3112 for abs in dsremoved:
3110 target = repo.wjoin(abs)
3113 target = repo.wjoin(abs)
3111 if os.path.lexists(target):
3114 if os.path.lexists(target):
3112 dsremovunk.add(abs)
3115 dsremovunk.add(abs)
3113 dsremoved -= dsremovunk
3116 dsremoved -= dsremovunk
3114
3117
3115 # action to be actually performed by revert
3118 # action to be actually performed by revert
3116 # (<list of file>, message>) tuple
3119 # (<list of file>, message>) tuple
3117 actions = {'revert': ([], _('reverting %s\n')),
3120 actions = {'revert': ([], _('reverting %s\n')),
3118 'add': ([], _('adding %s\n')),
3121 'add': ([], _('adding %s\n')),
3119 'remove': ([], _('removing %s\n')),
3122 'remove': ([], _('removing %s\n')),
3120 'drop': ([], _('removing %s\n')),
3123 'drop': ([], _('removing %s\n')),
3121 'forget': ([], _('forgetting %s\n')),
3124 'forget': ([], _('forgetting %s\n')),
3122 'undelete': ([], _('undeleting %s\n')),
3125 'undelete': ([], _('undeleting %s\n')),
3123 'noop': (None, _('no changes needed to %s\n')),
3126 'noop': (None, _('no changes needed to %s\n')),
3124 'unknown': (None, _('file not managed: %s\n')),
3127 'unknown': (None, _('file not managed: %s\n')),
3125 }
3128 }
3126
3129
3127 # "constant" that convey the backup strategy.
3130 # "constant" that convey the backup strategy.
3128 # All set to `discard` if `no-backup` is set do avoid checking
3131 # All set to `discard` if `no-backup` is set do avoid checking
3129 # no_backup lower in the code.
3132 # no_backup lower in the code.
3130 # These values are ordered for comparison purposes
3133 # These values are ordered for comparison purposes
3131 backupinteractive = 3 # do backup if interactively modified
3134 backupinteractive = 3 # do backup if interactively modified
3132 backup = 2 # unconditionally do backup
3135 backup = 2 # unconditionally do backup
3133 check = 1 # check if the existing file differs from target
3136 check = 1 # check if the existing file differs from target
3134 discard = 0 # never do backup
3137 discard = 0 # never do backup
3135 if opts.get('no_backup'):
3138 if opts.get('no_backup'):
3136 backupinteractive = backup = check = discard
3139 backupinteractive = backup = check = discard
3137 if interactive:
3140 if interactive:
3138 dsmodifiedbackup = backupinteractive
3141 dsmodifiedbackup = backupinteractive
3139 else:
3142 else:
3140 dsmodifiedbackup = backup
3143 dsmodifiedbackup = backup
3141 tobackup = set()
3144 tobackup = set()
3142
3145
3143 backupanddel = actions['remove']
3146 backupanddel = actions['remove']
3144 if not opts.get('no_backup'):
3147 if not opts.get('no_backup'):
3145 backupanddel = actions['drop']
3148 backupanddel = actions['drop']
3146
3149
3147 disptable = (
3150 disptable = (
3148 # dispatch table:
3151 # dispatch table:
3149 # file state
3152 # file state
3150 # action
3153 # action
3151 # make backup
3154 # make backup
3152
3155
3153 ## Sets that results that will change file on disk
3156 ## Sets that results that will change file on disk
3154 # Modified compared to target, no local change
3157 # Modified compared to target, no local change
3155 (modified, actions['revert'], discard),
3158 (modified, actions['revert'], discard),
3156 # Modified compared to target, but local file is deleted
3159 # Modified compared to target, but local file is deleted
3157 (deleted, actions['revert'], discard),
3160 (deleted, actions['revert'], discard),
3158 # Modified compared to target, local change
3161 # Modified compared to target, local change
3159 (dsmodified, actions['revert'], dsmodifiedbackup),
3162 (dsmodified, actions['revert'], dsmodifiedbackup),
3160 # Added since target
3163 # Added since target
3161 (added, actions['remove'], discard),
3164 (added, actions['remove'], discard),
3162 # Added in working directory
3165 # Added in working directory
3163 (dsadded, actions['forget'], discard),
3166 (dsadded, actions['forget'], discard),
3164 # Added since target, have local modification
3167 # Added since target, have local modification
3165 (modadded, backupanddel, backup),
3168 (modadded, backupanddel, backup),
3166 # Added since target but file is missing in working directory
3169 # Added since target but file is missing in working directory
3167 (deladded, actions['drop'], discard),
3170 (deladded, actions['drop'], discard),
3168 # Removed since target, before working copy parent
3171 # Removed since target, before working copy parent
3169 (removed, actions['add'], discard),
3172 (removed, actions['add'], discard),
3170 # Same as `removed` but an unknown file exists at the same path
3173 # Same as `removed` but an unknown file exists at the same path
3171 (removunk, actions['add'], check),
3174 (removunk, actions['add'], check),
3172 # Removed since targe, marked as such in working copy parent
3175 # Removed since targe, marked as such in working copy parent
3173 (dsremoved, actions['undelete'], discard),
3176 (dsremoved, actions['undelete'], discard),
3174 # Same as `dsremoved` but an unknown file exists at the same path
3177 # Same as `dsremoved` but an unknown file exists at the same path
3175 (dsremovunk, actions['undelete'], check),
3178 (dsremovunk, actions['undelete'], check),
3176 ## the following sets does not result in any file changes
3179 ## the following sets does not result in any file changes
3177 # File with no modification
3180 # File with no modification
3178 (clean, actions['noop'], discard),
3181 (clean, actions['noop'], discard),
3179 # Existing file, not tracked anywhere
3182 # Existing file, not tracked anywhere
3180 (unknown, actions['unknown'], discard),
3183 (unknown, actions['unknown'], discard),
3181 )
3184 )
3182
3185
3183 for abs, (rel, exact) in sorted(names.items()):
3186 for abs, (rel, exact) in sorted(names.items()):
3184 # target file to be touch on disk (relative to cwd)
3187 # target file to be touch on disk (relative to cwd)
3185 target = repo.wjoin(abs)
3188 target = repo.wjoin(abs)
3186 # search the entry in the dispatch table.
3189 # search the entry in the dispatch table.
3187 # if the file is in any of these sets, it was touched in the working
3190 # if the file is in any of these sets, it was touched in the working
3188 # directory parent and we are sure it needs to be reverted.
3191 # directory parent and we are sure it needs to be reverted.
3189 for table, (xlist, msg), dobackup in disptable:
3192 for table, (xlist, msg), dobackup in disptable:
3190 if abs not in table:
3193 if abs not in table:
3191 continue
3194 continue
3192 if xlist is not None:
3195 if xlist is not None:
3193 xlist.append(abs)
3196 xlist.append(abs)
3194 if dobackup:
3197 if dobackup:
3195 # If in interactive mode, don't automatically create
3198 # If in interactive mode, don't automatically create
3196 # .orig files (issue4793)
3199 # .orig files (issue4793)
3197 if dobackup == backupinteractive:
3200 if dobackup == backupinteractive:
3198 tobackup.add(abs)
3201 tobackup.add(abs)
3199 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3202 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3200 bakname = scmutil.origpath(ui, repo, rel)
3203 bakname = scmutil.origpath(ui, repo, rel)
3201 ui.note(_('saving current version of %s as %s\n') %
3204 ui.note(_('saving current version of %s as %s\n') %
3202 (rel, bakname))
3205 (rel, bakname))
3203 if not opts.get('dry_run'):
3206 if not opts.get('dry_run'):
3204 if interactive:
3207 if interactive:
3205 util.copyfile(target, bakname)
3208 util.copyfile(target, bakname)
3206 else:
3209 else:
3207 util.rename(target, bakname)
3210 util.rename(target, bakname)
3208 if ui.verbose or not exact:
3211 if ui.verbose or not exact:
3209 if not isinstance(msg, basestring):
3212 if not isinstance(msg, basestring):
3210 msg = msg(abs)
3213 msg = msg(abs)
3211 ui.status(msg % rel)
3214 ui.status(msg % rel)
3212 elif exact:
3215 elif exact:
3213 ui.warn(msg % rel)
3216 ui.warn(msg % rel)
3214 break
3217 break
3215
3218
3216 if not opts.get('dry_run'):
3219 if not opts.get('dry_run'):
3217 needdata = ('revert', 'add', 'undelete')
3220 needdata = ('revert', 'add', 'undelete')
3218 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3221 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3219 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3222 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3220
3223
3221 if targetsubs:
3224 if targetsubs:
3222 # Revert the subrepos on the revert list
3225 # Revert the subrepos on the revert list
3223 for sub in targetsubs:
3226 for sub in targetsubs:
3224 try:
3227 try:
3225 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3228 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3226 except KeyError:
3229 except KeyError:
3227 raise error.Abort("subrepository '%s' does not exist in %s!"
3230 raise error.Abort("subrepository '%s' does not exist in %s!"
3228 % (sub, short(ctx.node())))
3231 % (sub, short(ctx.node())))
3229
3232
3230 def _revertprefetch(repo, ctx, *files):
3233 def _revertprefetch(repo, ctx, *files):
3231 """Let extension changing the storage layer prefetch content"""
3234 """Let extension changing the storage layer prefetch content"""
3232 pass
3235 pass
3233
3236
3234 def _performrevert(repo, parents, ctx, actions, interactive=False,
3237 def _performrevert(repo, parents, ctx, actions, interactive=False,
3235 tobackup=None):
3238 tobackup=None):
3236 """function that actually perform all the actions computed for revert
3239 """function that actually perform all the actions computed for revert
3237
3240
3238 This is an independent function to let extension to plug in and react to
3241 This is an independent function to let extension to plug in and react to
3239 the imminent revert.
3242 the imminent revert.
3240
3243
3241 Make sure you have the working directory locked when calling this function.
3244 Make sure you have the working directory locked when calling this function.
3242 """
3245 """
3243 parent, p2 = parents
3246 parent, p2 = parents
3244 node = ctx.node()
3247 node = ctx.node()
3245 excluded_files = []
3248 excluded_files = []
3246 matcher_opts = {"exclude": excluded_files}
3249 matcher_opts = {"exclude": excluded_files}
3247
3250
3248 def checkout(f):
3251 def checkout(f):
3249 fc = ctx[f]
3252 fc = ctx[f]
3250 repo.wwrite(f, fc.data(), fc.flags())
3253 repo.wwrite(f, fc.data(), fc.flags())
3251
3254
3252 audit_path = pathutil.pathauditor(repo.root)
3255 audit_path = pathutil.pathauditor(repo.root)
3253 for f in actions['forget'][0]:
3256 for f in actions['forget'][0]:
3254 if interactive:
3257 if interactive:
3255 choice = \
3258 choice = \
3256 repo.ui.promptchoice(
3259 repo.ui.promptchoice(
3257 _("forget added file %s (yn)?$$ &Yes $$ &No")
3260 _("forget added file %s (yn)?$$ &Yes $$ &No")
3258 % f)
3261 % f)
3259 if choice == 0:
3262 if choice == 0:
3260 repo.dirstate.drop(f)
3263 repo.dirstate.drop(f)
3261 else:
3264 else:
3262 excluded_files.append(repo.wjoin(f))
3265 excluded_files.append(repo.wjoin(f))
3263 else:
3266 else:
3264 repo.dirstate.drop(f)
3267 repo.dirstate.drop(f)
3265 for f in actions['remove'][0]:
3268 for f in actions['remove'][0]:
3266 audit_path(f)
3269 audit_path(f)
3267 try:
3270 try:
3268 util.unlinkpath(repo.wjoin(f))
3271 util.unlinkpath(repo.wjoin(f))
3269 except OSError:
3272 except OSError:
3270 pass
3273 pass
3271 repo.dirstate.remove(f)
3274 repo.dirstate.remove(f)
3272 for f in actions['drop'][0]:
3275 for f in actions['drop'][0]:
3273 audit_path(f)
3276 audit_path(f)
3274 repo.dirstate.remove(f)
3277 repo.dirstate.remove(f)
3275
3278
3276 normal = None
3279 normal = None
3277 if node == parent:
3280 if node == parent:
3278 # We're reverting to our parent. If possible, we'd like status
3281 # We're reverting to our parent. If possible, we'd like status
3279 # to report the file as clean. We have to use normallookup for
3282 # to report the file as clean. We have to use normallookup for
3280 # merges to avoid losing information about merged/dirty files.
3283 # merges to avoid losing information about merged/dirty files.
3281 if p2 != nullid:
3284 if p2 != nullid:
3282 normal = repo.dirstate.normallookup
3285 normal = repo.dirstate.normallookup
3283 else:
3286 else:
3284 normal = repo.dirstate.normal
3287 normal = repo.dirstate.normal
3285
3288
3286 newlyaddedandmodifiedfiles = set()
3289 newlyaddedandmodifiedfiles = set()
3287 if interactive:
3290 if interactive:
3288 # Prompt the user for changes to revert
3291 # Prompt the user for changes to revert
3289 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3292 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3290 m = scmutil.match(ctx, torevert, matcher_opts)
3293 m = scmutil.match(ctx, torevert, matcher_opts)
3291 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3294 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3292 diffopts.nodates = True
3295 diffopts.nodates = True
3293 diffopts.git = True
3296 diffopts.git = True
3294 reversehunks = repo.ui.configbool('experimental',
3297 reversehunks = repo.ui.configbool('experimental',
3295 'revertalternateinteractivemode',
3298 'revertalternateinteractivemode',
3296 True)
3299 True)
3297 if reversehunks:
3300 if reversehunks:
3298 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3301 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3299 else:
3302 else:
3300 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3303 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3301 originalchunks = patch.parsepatch(diff)
3304 originalchunks = patch.parsepatch(diff)
3302 operation = 'discard' if node == parent else 'revert'
3305 operation = 'discard' if node == parent else 'revert'
3303
3306
3304 try:
3307 try:
3305
3308
3306 chunks, opts = recordfilter(repo.ui, originalchunks,
3309 chunks, opts = recordfilter(repo.ui, originalchunks,
3307 operation=operation)
3310 operation=operation)
3308 if reversehunks:
3311 if reversehunks:
3309 chunks = patch.reversehunks(chunks)
3312 chunks = patch.reversehunks(chunks)
3310
3313
3311 except patch.PatchError as err:
3314 except patch.PatchError as err:
3312 raise error.Abort(_('error parsing patch: %s') % err)
3315 raise error.Abort(_('error parsing patch: %s') % err)
3313
3316
3314 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3317 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3315 if tobackup is None:
3318 if tobackup is None:
3316 tobackup = set()
3319 tobackup = set()
3317 # Apply changes
3320 # Apply changes
3318 fp = stringio()
3321 fp = stringio()
3319 for c in chunks:
3322 for c in chunks:
3320 # Create a backup file only if this hunk should be backed up
3323 # Create a backup file only if this hunk should be backed up
3321 if ishunk(c) and c.header.filename() in tobackup:
3324 if ishunk(c) and c.header.filename() in tobackup:
3322 abs = c.header.filename()
3325 abs = c.header.filename()
3323 target = repo.wjoin(abs)
3326 target = repo.wjoin(abs)
3324 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3327 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3325 util.copyfile(target, bakname)
3328 util.copyfile(target, bakname)
3326 tobackup.remove(abs)
3329 tobackup.remove(abs)
3327 c.write(fp)
3330 c.write(fp)
3328 dopatch = fp.tell()
3331 dopatch = fp.tell()
3329 fp.seek(0)
3332 fp.seek(0)
3330 if dopatch:
3333 if dopatch:
3331 try:
3334 try:
3332 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3335 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3333 except patch.PatchError as err:
3336 except patch.PatchError as err:
3334 raise error.Abort(str(err))
3337 raise error.Abort(str(err))
3335 del fp
3338 del fp
3336 else:
3339 else:
3337 for f in actions['revert'][0]:
3340 for f in actions['revert'][0]:
3338 checkout(f)
3341 checkout(f)
3339 if normal:
3342 if normal:
3340 normal(f)
3343 normal(f)
3341
3344
3342 for f in actions['add'][0]:
3345 for f in actions['add'][0]:
3343 # Don't checkout modified files, they are already created by the diff
3346 # Don't checkout modified files, they are already created by the diff
3344 if f not in newlyaddedandmodifiedfiles:
3347 if f not in newlyaddedandmodifiedfiles:
3345 checkout(f)
3348 checkout(f)
3346 repo.dirstate.add(f)
3349 repo.dirstate.add(f)
3347
3350
3348 normal = repo.dirstate.normallookup
3351 normal = repo.dirstate.normallookup
3349 if node == parent and p2 == nullid:
3352 if node == parent and p2 == nullid:
3350 normal = repo.dirstate.normal
3353 normal = repo.dirstate.normal
3351 for f in actions['undelete'][0]:
3354 for f in actions['undelete'][0]:
3352 checkout(f)
3355 checkout(f)
3353 normal(f)
3356 normal(f)
3354
3357
3355 copied = copies.pathcopies(repo[parent], ctx)
3358 copied = copies.pathcopies(repo[parent], ctx)
3356
3359
3357 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3360 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3358 if f in copied:
3361 if f in copied:
3359 repo.dirstate.copy(copied[f], f)
3362 repo.dirstate.copy(copied[f], f)
3360
3363
3361 def command(table):
3364 def command(table):
3362 """Returns a function object to be used as a decorator for making commands.
3365 """Returns a function object to be used as a decorator for making commands.
3363
3366
3364 This function receives a command table as its argument. The table should
3367 This function receives a command table as its argument. The table should
3365 be a dict.
3368 be a dict.
3366
3369
3367 The returned function can be used as a decorator for adding commands
3370 The returned function can be used as a decorator for adding commands
3368 to that command table. This function accepts multiple arguments to define
3371 to that command table. This function accepts multiple arguments to define
3369 a command.
3372 a command.
3370
3373
3371 The first argument is the command name.
3374 The first argument is the command name.
3372
3375
3373 The options argument is an iterable of tuples defining command arguments.
3376 The options argument is an iterable of tuples defining command arguments.
3374 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3377 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3375
3378
3376 The synopsis argument defines a short, one line summary of how to use the
3379 The synopsis argument defines a short, one line summary of how to use the
3377 command. This shows up in the help output.
3380 command. This shows up in the help output.
3378
3381
3379 The norepo argument defines whether the command does not require a
3382 The norepo argument defines whether the command does not require a
3380 local repository. Most commands operate against a repository, thus the
3383 local repository. Most commands operate against a repository, thus the
3381 default is False.
3384 default is False.
3382
3385
3383 The optionalrepo argument defines whether the command optionally requires
3386 The optionalrepo argument defines whether the command optionally requires
3384 a local repository.
3387 a local repository.
3385
3388
3386 The inferrepo argument defines whether to try to find a repository from the
3389 The inferrepo argument defines whether to try to find a repository from the
3387 command line arguments. If True, arguments will be examined for potential
3390 command line arguments. If True, arguments will be examined for potential
3388 repository locations. See ``findrepo()``. If a repository is found, it
3391 repository locations. See ``findrepo()``. If a repository is found, it
3389 will be used.
3392 will be used.
3390 """
3393 """
3391 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3394 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3392 inferrepo=False):
3395 inferrepo=False):
3393 def decorator(func):
3396 def decorator(func):
3394 func.norepo = norepo
3397 func.norepo = norepo
3395 func.optionalrepo = optionalrepo
3398 func.optionalrepo = optionalrepo
3396 func.inferrepo = inferrepo
3399 func.inferrepo = inferrepo
3397 if synopsis:
3400 if synopsis:
3398 table[name] = func, list(options), synopsis
3401 table[name] = func, list(options), synopsis
3399 else:
3402 else:
3400 table[name] = func, list(options)
3403 table[name] = func, list(options)
3401 return func
3404 return func
3402 return decorator
3405 return decorator
3403
3406
3404 return cmd
3407 return cmd
3405
3408
3406 def checkunresolved(ms):
3409 def checkunresolved(ms):
3407 if list(ms.unresolved()):
3410 if list(ms.unresolved()):
3408 raise error.Abort(_("unresolved merge conflicts "
3411 raise error.Abort(_("unresolved merge conflicts "
3409 "(see 'hg help resolve')"))
3412 "(see 'hg help resolve')"))
3410 if ms.mdstate() != 's' or list(ms.driverresolved()):
3413 if ms.mdstate() != 's' or list(ms.driverresolved()):
3411 raise error.Abort(_('driver-resolved merge conflicts'),
3414 raise error.Abort(_('driver-resolved merge conflicts'),
3412 hint=_('run "hg resolve --all" to resolve'))
3415 hint=_('run "hg resolve --all" to resolve'))
3413
3416
3414 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3417 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3415 # commands.outgoing. "missing" is "missing" of the result of
3418 # commands.outgoing. "missing" is "missing" of the result of
3416 # "findcommonoutgoing()"
3419 # "findcommonoutgoing()"
3417 outgoinghooks = util.hooks()
3420 outgoinghooks = util.hooks()
3418
3421
3419 # a list of (ui, repo) functions called by commands.summary
3422 # a list of (ui, repo) functions called by commands.summary
3420 summaryhooks = util.hooks()
3423 summaryhooks = util.hooks()
3421
3424
3422 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3425 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3423 #
3426 #
3424 # functions should return tuple of booleans below, if 'changes' is None:
3427 # functions should return tuple of booleans below, if 'changes' is None:
3425 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3428 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3426 #
3429 #
3427 # otherwise, 'changes' is a tuple of tuples below:
3430 # otherwise, 'changes' is a tuple of tuples below:
3428 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3431 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3429 # - (desturl, destbranch, destpeer, outgoing)
3432 # - (desturl, destbranch, destpeer, outgoing)
3430 summaryremotehooks = util.hooks()
3433 summaryremotehooks = util.hooks()
3431
3434
3432 # A list of state files kept by multistep operations like graft.
3435 # A list of state files kept by multistep operations like graft.
3433 # Since graft cannot be aborted, it is considered 'clearable' by update.
3436 # Since graft cannot be aborted, it is considered 'clearable' by update.
3434 # note: bisect is intentionally excluded
3437 # note: bisect is intentionally excluded
3435 # (state file, clearable, allowcommit, error, hint)
3438 # (state file, clearable, allowcommit, error, hint)
3436 unfinishedstates = [
3439 unfinishedstates = [
3437 ('graftstate', True, False, _('graft in progress'),
3440 ('graftstate', True, False, _('graft in progress'),
3438 _("use 'hg graft --continue' or 'hg update' to abort")),
3441 _("use 'hg graft --continue' or 'hg update' to abort")),
3439 ('updatestate', True, False, _('last update was interrupted'),
3442 ('updatestate', True, False, _('last update was interrupted'),
3440 _("use 'hg update' to get a consistent checkout"))
3443 _("use 'hg update' to get a consistent checkout"))
3441 ]
3444 ]
3442
3445
3443 def checkunfinished(repo, commit=False):
3446 def checkunfinished(repo, commit=False):
3444 '''Look for an unfinished multistep operation, like graft, and abort
3447 '''Look for an unfinished multistep operation, like graft, and abort
3445 if found. It's probably good to check this right before
3448 if found. It's probably good to check this right before
3446 bailifchanged().
3449 bailifchanged().
3447 '''
3450 '''
3448 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3451 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3449 if commit and allowcommit:
3452 if commit and allowcommit:
3450 continue
3453 continue
3451 if repo.vfs.exists(f):
3454 if repo.vfs.exists(f):
3452 raise error.Abort(msg, hint=hint)
3455 raise error.Abort(msg, hint=hint)
3453
3456
3454 def clearunfinished(repo):
3457 def clearunfinished(repo):
3455 '''Check for unfinished operations (as above), and clear the ones
3458 '''Check for unfinished operations (as above), and clear the ones
3456 that are clearable.
3459 that are clearable.
3457 '''
3460 '''
3458 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3459 if not clearable and repo.vfs.exists(f):
3462 if not clearable and repo.vfs.exists(f):
3460 raise error.Abort(msg, hint=hint)
3463 raise error.Abort(msg, hint=hint)
3461 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3464 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3462 if clearable and repo.vfs.exists(f):
3465 if clearable and repo.vfs.exists(f):
3463 util.unlink(repo.join(f))
3466 util.unlink(repo.join(f))
3464
3467
3465 afterresolvedstates = [
3468 afterresolvedstates = [
3466 ('graftstate',
3469 ('graftstate',
3467 _('hg graft --continue')),
3470 _('hg graft --continue')),
3468 ]
3471 ]
3469
3472
3470 def howtocontinue(repo):
3473 def howtocontinue(repo):
3471 '''Check for an unfinished operation and return the command to finish
3474 '''Check for an unfinished operation and return the command to finish
3472 it.
3475 it.
3473
3476
3474 afterresolvedstates tuples define a .hg/{file} and the corresponding
3477 afterresolvedstates tuples define a .hg/{file} and the corresponding
3475 command needed to finish it.
3478 command needed to finish it.
3476
3479
3477 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3480 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3478 a boolean.
3481 a boolean.
3479 '''
3482 '''
3480 contmsg = _("continue: %s")
3483 contmsg = _("continue: %s")
3481 for f, msg in afterresolvedstates:
3484 for f, msg in afterresolvedstates:
3482 if repo.vfs.exists(f):
3485 if repo.vfs.exists(f):
3483 return contmsg % msg, True
3486 return contmsg % msg, True
3484 workingctx = repo[None]
3487 workingctx = repo[None]
3485 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3488 dirty = any(repo.status()) or any(workingctx.sub(s).dirty()
3486 for s in workingctx.substate)
3489 for s in workingctx.substate)
3487 if dirty:
3490 if dirty:
3488 return contmsg % _("hg commit"), False
3491 return contmsg % _("hg commit"), False
3489 return None, None
3492 return None, None
3490
3493
3491 def checkafterresolved(repo):
3494 def checkafterresolved(repo):
3492 '''Inform the user about the next action after completing hg resolve
3495 '''Inform the user about the next action after completing hg resolve
3493
3496
3494 If there's a matching afterresolvedstates, howtocontinue will yield
3497 If there's a matching afterresolvedstates, howtocontinue will yield
3495 repo.ui.warn as the reporter.
3498 repo.ui.warn as the reporter.
3496
3499
3497 Otherwise, it will yield repo.ui.note.
3500 Otherwise, it will yield repo.ui.note.
3498 '''
3501 '''
3499 msg, warning = howtocontinue(repo)
3502 msg, warning = howtocontinue(repo)
3500 if msg is not None:
3503 if msg is not None:
3501 if warning:
3504 if warning:
3502 repo.ui.warn("%s\n" % msg)
3505 repo.ui.warn("%s\n" % msg)
3503 else:
3506 else:
3504 repo.ui.note("%s\n" % msg)
3507 repo.ui.note("%s\n" % msg)
3505
3508
3506 def wrongtooltocontinue(repo, task):
3509 def wrongtooltocontinue(repo, task):
3507 '''Raise an abort suggesting how to properly continue if there is an
3510 '''Raise an abort suggesting how to properly continue if there is an
3508 active task.
3511 active task.
3509
3512
3510 Uses howtocontinue() to find the active task.
3513 Uses howtocontinue() to find the active task.
3511
3514
3512 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3515 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3513 a hint.
3516 a hint.
3514 '''
3517 '''
3515 after = howtocontinue(repo)
3518 after = howtocontinue(repo)
3516 hint = None
3519 hint = None
3517 if after[1]:
3520 if after[1]:
3518 hint = after[0]
3521 hint = after[0]
3519 raise error.Abort(_('no %s in progress') % task, hint=hint)
3522 raise error.Abort(_('no %s in progress') % task, hint=hint)
3520
3523
3521 class dirstateguard(object):
3524 class dirstateguard(object):
3522 '''Restore dirstate at unexpected failure.
3525 '''Restore dirstate at unexpected failure.
3523
3526
3524 At the construction, this class does:
3527 At the construction, this class does:
3525
3528
3526 - write current ``repo.dirstate`` out, and
3529 - write current ``repo.dirstate`` out, and
3527 - save ``.hg/dirstate`` into the backup file
3530 - save ``.hg/dirstate`` into the backup file
3528
3531
3529 This restores ``.hg/dirstate`` from backup file, if ``release()``
3532 This restores ``.hg/dirstate`` from backup file, if ``release()``
3530 is invoked before ``close()``.
3533 is invoked before ``close()``.
3531
3534
3532 This just removes the backup file at ``close()`` before ``release()``.
3535 This just removes the backup file at ``close()`` before ``release()``.
3533 '''
3536 '''
3534
3537
3535 def __init__(self, repo, name):
3538 def __init__(self, repo, name):
3536 self._repo = repo
3539 self._repo = repo
3537 self._active = False
3540 self._active = False
3538 self._closed = False
3541 self._closed = False
3539 self._suffix = '.backup.%s.%d' % (name, id(self))
3542 self._suffix = '.backup.%s.%d' % (name, id(self))
3540 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3543 repo.dirstate.savebackup(repo.currenttransaction(), self._suffix)
3541 self._active = True
3544 self._active = True
3542
3545
3543 def __del__(self):
3546 def __del__(self):
3544 if self._active: # still active
3547 if self._active: # still active
3545 # this may occur, even if this class is used correctly:
3548 # this may occur, even if this class is used correctly:
3546 # for example, releasing other resources like transaction
3549 # for example, releasing other resources like transaction
3547 # may raise exception before ``dirstateguard.release`` in
3550 # may raise exception before ``dirstateguard.release`` in
3548 # ``release(tr, ....)``.
3551 # ``release(tr, ....)``.
3549 self._abort()
3552 self._abort()
3550
3553
3551 def close(self):
3554 def close(self):
3552 if not self._active: # already inactivated
3555 if not self._active: # already inactivated
3553 msg = (_("can't close already inactivated backup: dirstate%s")
3556 msg = (_("can't close already inactivated backup: dirstate%s")
3554 % self._suffix)
3557 % self._suffix)
3555 raise error.Abort(msg)
3558 raise error.Abort(msg)
3556
3559
3557 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3560 self._repo.dirstate.clearbackup(self._repo.currenttransaction(),
3558 self._suffix)
3561 self._suffix)
3559 self._active = False
3562 self._active = False
3560 self._closed = True
3563 self._closed = True
3561
3564
3562 def _abort(self):
3565 def _abort(self):
3563 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3566 self._repo.dirstate.restorebackup(self._repo.currenttransaction(),
3564 self._suffix)
3567 self._suffix)
3565 self._active = False
3568 self._active = False
3566
3569
3567 def release(self):
3570 def release(self):
3568 if not self._closed:
3571 if not self._closed:
3569 if not self._active: # already inactivated
3572 if not self._active: # already inactivated
3570 msg = (_("can't release already inactivated backup:"
3573 msg = (_("can't release already inactivated backup:"
3571 " dirstate%s")
3574 " dirstate%s")
3572 % self._suffix)
3575 % self._suffix)
3573 raise error.Abort(msg)
3576 raise error.Abort(msg)
3574 self._abort()
3577 self._abort()
@@ -1,1982 +1,1984 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 bin,
17 bin,
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 wdirid,
22 wdirid,
23 )
23 )
24 from . import (
24 from . import (
25 encoding,
25 encoding,
26 error,
26 error,
27 fileset,
27 fileset,
28 match as matchmod,
28 match as matchmod,
29 mdiff,
29 mdiff,
30 obsolete as obsmod,
30 obsolete as obsmod,
31 patch,
31 patch,
32 phases,
32 phases,
33 repoview,
33 repoview,
34 revlog,
34 revlog,
35 scmutil,
35 scmutil,
36 subrepo,
36 subrepo,
37 util,
37 util,
38 )
38 )
39
39
40 propertycache = util.propertycache
40 propertycache = util.propertycache
41
41
42 # Phony node value to stand-in for new files in some uses of
42 # Phony node value to stand-in for new files in some uses of
43 # manifests. Manifests support 21-byte hashes for nodes which are
43 # manifests. Manifests support 21-byte hashes for nodes which are
44 # dirty in the working copy.
44 # dirty in the working copy.
45 _newnode = '!' * 21
45 _newnode = '!' * 21
46
46
47 nonascii = re.compile(r'[^\x21-\x7f]').search
47 nonascii = re.compile(r'[^\x21-\x7f]').search
48
48
49 class basectx(object):
49 class basectx(object):
50 """A basectx object represents the common logic for its children:
50 """A basectx object represents the common logic for its children:
51 changectx: read-only context that is already present in the repo,
51 changectx: read-only context that is already present in the repo,
52 workingctx: a context that represents the working directory and can
52 workingctx: a context that represents the working directory and can
53 be committed,
53 be committed,
54 memctx: a context that represents changes in-memory and can also
54 memctx: a context that represents changes in-memory and can also
55 be committed."""
55 be committed."""
56 def __new__(cls, repo, changeid='', *args, **kwargs):
56 def __new__(cls, repo, changeid='', *args, **kwargs):
57 if isinstance(changeid, basectx):
57 if isinstance(changeid, basectx):
58 return changeid
58 return changeid
59
59
60 o = super(basectx, cls).__new__(cls)
60 o = super(basectx, cls).__new__(cls)
61
61
62 o._repo = repo
62 o._repo = repo
63 o._rev = nullrev
63 o._rev = nullrev
64 o._node = nullid
64 o._node = nullid
65
65
66 return o
66 return o
67
67
68 def __str__(self):
68 def __str__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 def __int__(self):
71 def __int__(self):
72 return self.rev()
72 return self.rev()
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _manifestmatches(self, match, s):
95 def _manifestmatches(self, match, s):
96 """generate a new manifest filtered by the match argument
96 """generate a new manifest filtered by the match argument
97
97
98 This method is for internal use only and mainly exists to provide an
98 This method is for internal use only and mainly exists to provide an
99 object oriented way for other contexts to customize the manifest
99 object oriented way for other contexts to customize the manifest
100 generation.
100 generation.
101 """
101 """
102 return self.manifest().matches(match)
102 return self.manifest().matches(match)
103
103
104 def _matchstatus(self, other, match):
104 def _matchstatus(self, other, match):
105 """return match.always if match is none
105 """return match.always if match is none
106
106
107 This internal method provides a way for child objects to override the
107 This internal method provides a way for child objects to override the
108 match operator.
108 match operator.
109 """
109 """
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
111
111
112 def _buildstatus(self, other, s, match, listignored, listclean,
112 def _buildstatus(self, other, s, match, listignored, listclean,
113 listunknown):
113 listunknown):
114 """build a status with respect to another context"""
114 """build a status with respect to another context"""
115 # Load earliest manifest first for caching reasons. More specifically,
115 # Load earliest manifest first for caching reasons. More specifically,
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # 1000 and cache it so that when you read 1001, we just need to apply a
119 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta to what's in the cache. So that's one full reconstruction + one
120 # delta application.
120 # delta application.
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 self.manifest()
122 self.manifest()
123 mf1 = other._manifestmatches(match, s)
123 mf1 = other._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
124 mf2 = self._manifestmatches(match, s)
125
125
126 modified, added = [], []
126 modified, added = [], []
127 removed = []
127 removed = []
128 clean = []
128 clean = []
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
129 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deletedset = set(deleted)
130 deletedset = set(deleted)
131 d = mf1.diff(mf2, clean=listclean)
131 d = mf1.diff(mf2, clean=listclean)
132 for fn, value in d.iteritems():
132 for fn, value in d.iteritems():
133 if fn in deletedset:
133 if fn in deletedset:
134 continue
134 continue
135 if value is None:
135 if value is None:
136 clean.append(fn)
136 clean.append(fn)
137 continue
137 continue
138 (node1, flag1), (node2, flag2) = value
138 (node1, flag1), (node2, flag2) = value
139 if node1 is None:
139 if node1 is None:
140 added.append(fn)
140 added.append(fn)
141 elif node2 is None:
141 elif node2 is None:
142 removed.append(fn)
142 removed.append(fn)
143 elif flag1 != flag2:
143 elif flag1 != flag2:
144 modified.append(fn)
144 modified.append(fn)
145 elif node2 != _newnode:
145 elif node2 != _newnode:
146 # When comparing files between two commits, we save time by
146 # When comparing files between two commits, we save time by
147 # not comparing the file contents when the nodeids differ.
147 # not comparing the file contents when the nodeids differ.
148 # Note that this means we incorrectly report a reverted change
148 # Note that this means we incorrectly report a reverted change
149 # to a file as a modification.
149 # to a file as a modification.
150 modified.append(fn)
150 modified.append(fn)
151 elif self[fn].cmp(other[fn]):
151 elif self[fn].cmp(other[fn]):
152 modified.append(fn)
152 modified.append(fn)
153 else:
153 else:
154 clean.append(fn)
154 clean.append(fn)
155
155
156 if removed:
156 if removed:
157 # need to filter files if they are already reported as removed
157 # need to filter files if they are already reported as removed
158 unknown = [fn for fn in unknown if fn not in mf1]
158 unknown = [fn for fn in unknown if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
159 ignored = [fn for fn in ignored if fn not in mf1]
160 # if they're deleted, don't report them as removed
160 # if they're deleted, don't report them as removed
161 removed = [fn for fn in removed if fn not in deletedset]
161 removed = [fn for fn in removed if fn not in deletedset]
162
162
163 return scmutil.status(modified, added, removed, deleted, unknown,
163 return scmutil.status(modified, added, removed, deleted, unknown,
164 ignored, clean)
164 ignored, clean)
165
165
166 @propertycache
166 @propertycache
167 def substate(self):
167 def substate(self):
168 return subrepo.state(self, self._repo.ui)
168 return subrepo.state(self, self._repo.ui)
169
169
170 def subrev(self, subpath):
170 def subrev(self, subpath):
171 return self.substate[subpath][1]
171 return self.substate[subpath][1]
172
172
173 def rev(self):
173 def rev(self):
174 return self._rev
174 return self._rev
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177 def hex(self):
177 def hex(self):
178 return hex(self.node())
178 return hex(self.node())
179 def manifest(self):
179 def manifest(self):
180 return self._manifest
180 return self._manifest
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def unstable(self):
199 def unstable(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
201 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
202
202
203 def bumped(self):
203 def bumped(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
208 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
209
209
210 def divergent(self):
210 def divergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
216
216
217 def troubled(self):
217 def troubled(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.unstable() or self.bumped() or self.divergent()
219 return self.unstable() or self.bumped() or self.divergent()
220
220
221 def troubles(self):
221 def troubles(self):
222 """return the list of troubles affecting this changesets.
222 """return the list of troubles affecting this changesets.
223
223
224 Troubles are returned as strings. possible values are:
224 Troubles are returned as strings. possible values are:
225 - unstable,
225 - unstable,
226 - bumped,
226 - bumped,
227 - divergent.
227 - divergent.
228 """
228 """
229 troubles = []
229 troubles = []
230 if self.unstable():
230 if self.unstable():
231 troubles.append('unstable')
231 troubles.append('unstable')
232 if self.bumped():
232 if self.bumped():
233 troubles.append('bumped')
233 troubles.append('bumped')
234 if self.divergent():
234 if self.divergent():
235 troubles.append('divergent')
235 troubles.append('divergent')
236 return troubles
236 return troubles
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if '_manifest' in self.__dict__:
252 if '_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if '_manifestdelta' in self.__dict__ or path in self.files():
258 if '_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 node, flag = self._repo.manifest.find(self._changeset.manifest, path)
262 mfl = self._repo.manifestlog
263 if not node:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
264 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
265 _('not found in manifest'))
267 _('not found in manifest'))
266
268
267 return node, flag
269 return node, flag
268
270
269 def filenode(self, path):
271 def filenode(self, path):
270 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
271
273
272 def flags(self, path):
274 def flags(self, path):
273 try:
275 try:
274 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
275 except error.LookupError:
277 except error.LookupError:
276 return ''
278 return ''
277
279
278 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
279 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
280 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
281
283
282 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
283 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
284
286
285 def workingsub(self, path):
287 def workingsub(self, path):
286 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
287 context.
289 context.
288 '''
290 '''
289 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
290
292
291 def match(self, pats=[], include=None, exclude=None, default='glob',
293 def match(self, pats=[], include=None, exclude=None, default='glob',
292 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
293 r = self._repo
295 r = self._repo
294 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
295 include, exclude, default,
297 include, exclude, default,
296 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
297 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
298
300
299 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
300 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
301 if ctx2 is None:
303 if ctx2 is None:
302 ctx2 = self.p1()
304 ctx2 = self.p1()
303 if ctx2 is not None:
305 if ctx2 is not None:
304 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
305 diffopts = patch.diffopts(self._repo.ui, opts)
307 diffopts = patch.diffopts(self._repo.ui, opts)
306 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
307
309
308 def dirs(self):
310 def dirs(self):
309 return self._manifest.dirs()
311 return self._manifest.dirs()
310
312
311 def hasdir(self, dir):
313 def hasdir(self, dir):
312 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
313
315
314 def dirty(self, missing=False, merge=True, branch=True):
316 def dirty(self, missing=False, merge=True, branch=True):
315 return False
317 return False
316
318
317 def status(self, other=None, match=None, listignored=False,
319 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
320 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
321 """return status of files between two nodes or node and working
320 directory.
322 directory.
321
323
322 If other is None, compare this node with working directory.
324 If other is None, compare this node with working directory.
323
325
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
326 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
327 """
326
328
327 ctx1 = self
329 ctx1 = self
328 ctx2 = self._repo[other]
330 ctx2 = self._repo[other]
329
331
330 # This next code block is, admittedly, fragile logic that tests for
332 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
333 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
334 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
335 # with its first parent.
334 #
336 #
335 # What we're aiming for here is the ability to call:
337 # What we're aiming for here is the ability to call:
336 #
338 #
337 # workingctx.status(parentctx)
339 # workingctx.status(parentctx)
338 #
340 #
339 # If we always built the manifest for each context and compared those,
341 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
342 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
343 # just copy the manifest of the parent.
342 reversed = False
344 reversed = False
343 if (not isinstance(ctx1, changectx)
345 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
346 and isinstance(ctx2, changectx)):
345 reversed = True
347 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
348 ctx1, ctx2 = ctx2, ctx1
347
349
348 match = ctx2._matchstatus(ctx1, match)
350 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
351 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
353 listunknown)
352
354
353 if reversed:
355 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
357 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
359 r.clean)
358
360
359 if listsubrepos:
361 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
363 try:
362 rev2 = ctx2.subrev(subpath)
364 rev2 = ctx2.subrev(subpath)
363 except KeyError:
365 except KeyError:
364 # A subrepo that existed in node1 was deleted between
366 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
368 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
369 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
370 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
371 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
372 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
373 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
374 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
376
375 for l in r:
377 for l in r:
376 l.sort()
378 l.sort()
377
379
378 return r
380 return r
379
381
380
382
381 def makememctx(repo, parents, text, user, date, branch, files, store,
383 def makememctx(repo, parents, text, user, date, branch, files, store,
382 editor=None, extra=None):
384 editor=None, extra=None):
383 def getfilectx(repo, memctx, path):
385 def getfilectx(repo, memctx, path):
384 data, mode, copied = store.getfile(path)
386 data, mode, copied = store.getfile(path)
385 if data is None:
387 if data is None:
386 return None
388 return None
387 islink, isexec = mode
389 islink, isexec = mode
388 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
390 return memfilectx(repo, path, data, islink=islink, isexec=isexec,
389 copied=copied, memctx=memctx)
391 copied=copied, memctx=memctx)
390 if extra is None:
392 if extra is None:
391 extra = {}
393 extra = {}
392 if branch:
394 if branch:
393 extra['branch'] = encoding.fromlocal(branch)
395 extra['branch'] = encoding.fromlocal(branch)
394 ctx = memctx(repo, parents, text, files, getfilectx, user,
396 ctx = memctx(repo, parents, text, files, getfilectx, user,
395 date, extra, editor)
397 date, extra, editor)
396 return ctx
398 return ctx
397
399
398 class changectx(basectx):
400 class changectx(basectx):
399 """A changecontext object makes access to data related to a particular
401 """A changecontext object makes access to data related to a particular
400 changeset convenient. It represents a read-only context already present in
402 changeset convenient. It represents a read-only context already present in
401 the repo."""
403 the repo."""
402 def __init__(self, repo, changeid=''):
404 def __init__(self, repo, changeid=''):
403 """changeid is a revision number, node, or tag"""
405 """changeid is a revision number, node, or tag"""
404
406
405 # since basectx.__new__ already took care of copying the object, we
407 # since basectx.__new__ already took care of copying the object, we
406 # don't need to do anything in __init__, so we just exit here
408 # don't need to do anything in __init__, so we just exit here
407 if isinstance(changeid, basectx):
409 if isinstance(changeid, basectx):
408 return
410 return
409
411
410 if changeid == '':
412 if changeid == '':
411 changeid = '.'
413 changeid = '.'
412 self._repo = repo
414 self._repo = repo
413
415
414 try:
416 try:
415 if isinstance(changeid, int):
417 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
418 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
419 self._rev = changeid
418 return
420 return
419 if isinstance(changeid, long):
421 if isinstance(changeid, long):
420 changeid = str(changeid)
422 changeid = str(changeid)
421 if changeid == 'null':
423 if changeid == 'null':
422 self._node = nullid
424 self._node = nullid
423 self._rev = nullrev
425 self._rev = nullrev
424 return
426 return
425 if changeid == 'tip':
427 if changeid == 'tip':
426 self._node = repo.changelog.tip()
428 self._node = repo.changelog.tip()
427 self._rev = repo.changelog.rev(self._node)
429 self._rev = repo.changelog.rev(self._node)
428 return
430 return
429 if changeid == '.' or changeid == repo.dirstate.p1():
431 if changeid == '.' or changeid == repo.dirstate.p1():
430 # this is a hack to delay/avoid loading obsmarkers
432 # this is a hack to delay/avoid loading obsmarkers
431 # when we know that '.' won't be hidden
433 # when we know that '.' won't be hidden
432 self._node = repo.dirstate.p1()
434 self._node = repo.dirstate.p1()
433 self._rev = repo.unfiltered().changelog.rev(self._node)
435 self._rev = repo.unfiltered().changelog.rev(self._node)
434 return
436 return
435 if len(changeid) == 20:
437 if len(changeid) == 20:
436 try:
438 try:
437 self._node = changeid
439 self._node = changeid
438 self._rev = repo.changelog.rev(changeid)
440 self._rev = repo.changelog.rev(changeid)
439 return
441 return
440 except error.FilteredRepoLookupError:
442 except error.FilteredRepoLookupError:
441 raise
443 raise
442 except LookupError:
444 except LookupError:
443 pass
445 pass
444
446
445 try:
447 try:
446 r = int(changeid)
448 r = int(changeid)
447 if str(r) != changeid:
449 if str(r) != changeid:
448 raise ValueError
450 raise ValueError
449 l = len(repo.changelog)
451 l = len(repo.changelog)
450 if r < 0:
452 if r < 0:
451 r += l
453 r += l
452 if r < 0 or r >= l:
454 if r < 0 or r >= l:
453 raise ValueError
455 raise ValueError
454 self._rev = r
456 self._rev = r
455 self._node = repo.changelog.node(r)
457 self._node = repo.changelog.node(r)
456 return
458 return
457 except error.FilteredIndexError:
459 except error.FilteredIndexError:
458 raise
460 raise
459 except (ValueError, OverflowError, IndexError):
461 except (ValueError, OverflowError, IndexError):
460 pass
462 pass
461
463
462 if len(changeid) == 40:
464 if len(changeid) == 40:
463 try:
465 try:
464 self._node = bin(changeid)
466 self._node = bin(changeid)
465 self._rev = repo.changelog.rev(self._node)
467 self._rev = repo.changelog.rev(self._node)
466 return
468 return
467 except error.FilteredLookupError:
469 except error.FilteredLookupError:
468 raise
470 raise
469 except (TypeError, LookupError):
471 except (TypeError, LookupError):
470 pass
472 pass
471
473
472 # lookup bookmarks through the name interface
474 # lookup bookmarks through the name interface
473 try:
475 try:
474 self._node = repo.names.singlenode(repo, changeid)
476 self._node = repo.names.singlenode(repo, changeid)
475 self._rev = repo.changelog.rev(self._node)
477 self._rev = repo.changelog.rev(self._node)
476 return
478 return
477 except KeyError:
479 except KeyError:
478 pass
480 pass
479 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
480 raise
482 raise
481 except error.RepoLookupError:
483 except error.RepoLookupError:
482 pass
484 pass
483
485
484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
486 self._node = repo.unfiltered().changelog._partialmatch(changeid)
485 if self._node is not None:
487 if self._node is not None:
486 self._rev = repo.changelog.rev(self._node)
488 self._rev = repo.changelog.rev(self._node)
487 return
489 return
488
490
489 # lookup failed
491 # lookup failed
490 # check if it might have come from damaged dirstate
492 # check if it might have come from damaged dirstate
491 #
493 #
492 # XXX we could avoid the unfiltered if we had a recognizable
494 # XXX we could avoid the unfiltered if we had a recognizable
493 # exception for filtered changeset access
495 # exception for filtered changeset access
494 if changeid in repo.unfiltered().dirstate.parents():
496 if changeid in repo.unfiltered().dirstate.parents():
495 msg = _("working directory has unknown parent '%s'!")
497 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
498 raise error.Abort(msg % short(changeid))
497 try:
499 try:
498 if len(changeid) == 20 and nonascii(changeid):
500 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
501 changeid = hex(changeid)
500 except TypeError:
502 except TypeError:
501 pass
503 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
504 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
505 error.FilteredRepoLookupError):
504 if repo.filtername.startswith('visible'):
506 if repo.filtername.startswith('visible'):
505 msg = _("hidden revision '%s'") % changeid
507 msg = _("hidden revision '%s'") % changeid
506 hint = _('use --hidden to access hidden revisions')
508 hint = _('use --hidden to access hidden revisions')
507 raise error.FilteredRepoLookupError(msg, hint=hint)
509 raise error.FilteredRepoLookupError(msg, hint=hint)
508 msg = _("filtered revision '%s' (not in '%s' subset)")
510 msg = _("filtered revision '%s' (not in '%s' subset)")
509 msg %= (changeid, repo.filtername)
511 msg %= (changeid, repo.filtername)
510 raise error.FilteredRepoLookupError(msg)
512 raise error.FilteredRepoLookupError(msg)
511 except IndexError:
513 except IndexError:
512 pass
514 pass
513 raise error.RepoLookupError(
515 raise error.RepoLookupError(
514 _("unknown revision '%s'") % changeid)
516 _("unknown revision '%s'") % changeid)
515
517
516 def __hash__(self):
518 def __hash__(self):
517 try:
519 try:
518 return hash(self._rev)
520 return hash(self._rev)
519 except AttributeError:
521 except AttributeError:
520 return id(self)
522 return id(self)
521
523
522 def __nonzero__(self):
524 def __nonzero__(self):
523 return self._rev != nullrev
525 return self._rev != nullrev
524
526
525 @propertycache
527 @propertycache
526 def _changeset(self):
528 def _changeset(self):
527 return self._repo.changelog.changelogrevision(self.rev())
529 return self._repo.changelog.changelogrevision(self.rev())
528
530
529 @propertycache
531 @propertycache
530 def _manifest(self):
532 def _manifest(self):
531 return self._repo.manifestlog[self._changeset.manifest].read()
533 return self._repo.manifestlog[self._changeset.manifest].read()
532
534
533 @propertycache
535 @propertycache
534 def _manifestdelta(self):
536 def _manifestdelta(self):
535 mfnode = self._changeset.manifest
537 mfnode = self._changeset.manifest
536 return self._repo.manifestlog[mfnode].readdelta()
538 return self._repo.manifestlog[mfnode].readdelta()
537
539
538 @propertycache
540 @propertycache
539 def _parents(self):
541 def _parents(self):
540 repo = self._repo
542 repo = self._repo
541 p1, p2 = repo.changelog.parentrevs(self._rev)
543 p1, p2 = repo.changelog.parentrevs(self._rev)
542 if p2 == nullrev:
544 if p2 == nullrev:
543 return [changectx(repo, p1)]
545 return [changectx(repo, p1)]
544 return [changectx(repo, p1), changectx(repo, p2)]
546 return [changectx(repo, p1), changectx(repo, p2)]
545
547
546 def changeset(self):
548 def changeset(self):
547 c = self._changeset
549 c = self._changeset
548 return (
550 return (
549 c.manifest,
551 c.manifest,
550 c.user,
552 c.user,
551 c.date,
553 c.date,
552 c.files,
554 c.files,
553 c.description,
555 c.description,
554 c.extra,
556 c.extra,
555 )
557 )
556 def manifestnode(self):
558 def manifestnode(self):
557 return self._changeset.manifest
559 return self._changeset.manifest
558
560
559 def user(self):
561 def user(self):
560 return self._changeset.user
562 return self._changeset.user
561 def date(self):
563 def date(self):
562 return self._changeset.date
564 return self._changeset.date
563 def files(self):
565 def files(self):
564 return self._changeset.files
566 return self._changeset.files
565 def description(self):
567 def description(self):
566 return self._changeset.description
568 return self._changeset.description
567 def branch(self):
569 def branch(self):
568 return encoding.tolocal(self._changeset.extra.get("branch"))
570 return encoding.tolocal(self._changeset.extra.get("branch"))
569 def closesbranch(self):
571 def closesbranch(self):
570 return 'close' in self._changeset.extra
572 return 'close' in self._changeset.extra
571 def extra(self):
573 def extra(self):
572 return self._changeset.extra
574 return self._changeset.extra
573 def tags(self):
575 def tags(self):
574 return self._repo.nodetags(self._node)
576 return self._repo.nodetags(self._node)
575 def bookmarks(self):
577 def bookmarks(self):
576 return self._repo.nodebookmarks(self._node)
578 return self._repo.nodebookmarks(self._node)
577 def phase(self):
579 def phase(self):
578 return self._repo._phasecache.phase(self._repo, self._rev)
580 return self._repo._phasecache.phase(self._repo, self._rev)
579 def hidden(self):
581 def hidden(self):
580 return self._rev in repoview.filterrevs(self._repo, 'visible')
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
581
583
582 def children(self):
584 def children(self):
583 """return contexts for each child changeset"""
585 """return contexts for each child changeset"""
584 c = self._repo.changelog.children(self._node)
586 c = self._repo.changelog.children(self._node)
585 return [changectx(self._repo, x) for x in c]
587 return [changectx(self._repo, x) for x in c]
586
588
587 def ancestors(self):
589 def ancestors(self):
588 for a in self._repo.changelog.ancestors([self._rev]):
590 for a in self._repo.changelog.ancestors([self._rev]):
589 yield changectx(self._repo, a)
591 yield changectx(self._repo, a)
590
592
591 def descendants(self):
593 def descendants(self):
592 for d in self._repo.changelog.descendants([self._rev]):
594 for d in self._repo.changelog.descendants([self._rev]):
593 yield changectx(self._repo, d)
595 yield changectx(self._repo, d)
594
596
595 def filectx(self, path, fileid=None, filelog=None):
597 def filectx(self, path, fileid=None, filelog=None):
596 """get a file context from this changeset"""
598 """get a file context from this changeset"""
597 if fileid is None:
599 if fileid is None:
598 fileid = self.filenode(path)
600 fileid = self.filenode(path)
599 return filectx(self._repo, path, fileid=fileid,
601 return filectx(self._repo, path, fileid=fileid,
600 changectx=self, filelog=filelog)
602 changectx=self, filelog=filelog)
601
603
602 def ancestor(self, c2, warn=False):
604 def ancestor(self, c2, warn=False):
603 """return the "best" ancestor context of self and c2
605 """return the "best" ancestor context of self and c2
604
606
605 If there are multiple candidates, it will show a message and check
607 If there are multiple candidates, it will show a message and check
606 merge.preferancestor configuration before falling back to the
608 merge.preferancestor configuration before falling back to the
607 revlog ancestor."""
609 revlog ancestor."""
608 # deal with workingctxs
610 # deal with workingctxs
609 n2 = c2._node
611 n2 = c2._node
610 if n2 is None:
612 if n2 is None:
611 n2 = c2._parents[0]._node
613 n2 = c2._parents[0]._node
612 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
614 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
613 if not cahs:
615 if not cahs:
614 anc = nullid
616 anc = nullid
615 elif len(cahs) == 1:
617 elif len(cahs) == 1:
616 anc = cahs[0]
618 anc = cahs[0]
617 else:
619 else:
618 # experimental config: merge.preferancestor
620 # experimental config: merge.preferancestor
619 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
621 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
620 try:
622 try:
621 ctx = changectx(self._repo, r)
623 ctx = changectx(self._repo, r)
622 except error.RepoLookupError:
624 except error.RepoLookupError:
623 continue
625 continue
624 anc = ctx.node()
626 anc = ctx.node()
625 if anc in cahs:
627 if anc in cahs:
626 break
628 break
627 else:
629 else:
628 anc = self._repo.changelog.ancestor(self._node, n2)
630 anc = self._repo.changelog.ancestor(self._node, n2)
629 if warn:
631 if warn:
630 self._repo.ui.status(
632 self._repo.ui.status(
631 (_("note: using %s as ancestor of %s and %s\n") %
633 (_("note: using %s as ancestor of %s and %s\n") %
632 (short(anc), short(self._node), short(n2))) +
634 (short(anc), short(self._node), short(n2))) +
633 ''.join(_(" alternatively, use --config "
635 ''.join(_(" alternatively, use --config "
634 "merge.preferancestor=%s\n") %
636 "merge.preferancestor=%s\n") %
635 short(n) for n in sorted(cahs) if n != anc))
637 short(n) for n in sorted(cahs) if n != anc))
636 return changectx(self._repo, anc)
638 return changectx(self._repo, anc)
637
639
638 def descendant(self, other):
640 def descendant(self, other):
639 """True if other is descendant of this changeset"""
641 """True if other is descendant of this changeset"""
640 return self._repo.changelog.descendant(self._rev, other._rev)
642 return self._repo.changelog.descendant(self._rev, other._rev)
641
643
642 def walk(self, match):
644 def walk(self, match):
643 '''Generates matching file names.'''
645 '''Generates matching file names.'''
644
646
645 # Wrap match.bad method to have message with nodeid
647 # Wrap match.bad method to have message with nodeid
646 def bad(fn, msg):
648 def bad(fn, msg):
647 # The manifest doesn't know about subrepos, so don't complain about
649 # The manifest doesn't know about subrepos, so don't complain about
648 # paths into valid subrepos.
650 # paths into valid subrepos.
649 if any(fn == s or fn.startswith(s + '/')
651 if any(fn == s or fn.startswith(s + '/')
650 for s in self.substate):
652 for s in self.substate):
651 return
653 return
652 match.bad(fn, _('no such file in rev %s') % self)
654 match.bad(fn, _('no such file in rev %s') % self)
653
655
654 m = matchmod.badmatch(match, bad)
656 m = matchmod.badmatch(match, bad)
655 return self._manifest.walk(m)
657 return self._manifest.walk(m)
656
658
657 def matches(self, match):
659 def matches(self, match):
658 return self.walk(match)
660 return self.walk(match)
659
661
660 class basefilectx(object):
662 class basefilectx(object):
661 """A filecontext object represents the common logic for its children:
663 """A filecontext object represents the common logic for its children:
662 filectx: read-only access to a filerevision that is already present
664 filectx: read-only access to a filerevision that is already present
663 in the repo,
665 in the repo,
664 workingfilectx: a filecontext that represents files from the working
666 workingfilectx: a filecontext that represents files from the working
665 directory,
667 directory,
666 memfilectx: a filecontext that represents files in-memory."""
668 memfilectx: a filecontext that represents files in-memory."""
667 def __new__(cls, repo, path, *args, **kwargs):
669 def __new__(cls, repo, path, *args, **kwargs):
668 return super(basefilectx, cls).__new__(cls)
670 return super(basefilectx, cls).__new__(cls)
669
671
670 @propertycache
672 @propertycache
671 def _filelog(self):
673 def _filelog(self):
672 return self._repo.file(self._path)
674 return self._repo.file(self._path)
673
675
674 @propertycache
676 @propertycache
675 def _changeid(self):
677 def _changeid(self):
676 if '_changeid' in self.__dict__:
678 if '_changeid' in self.__dict__:
677 return self._changeid
679 return self._changeid
678 elif '_changectx' in self.__dict__:
680 elif '_changectx' in self.__dict__:
679 return self._changectx.rev()
681 return self._changectx.rev()
680 elif '_descendantrev' in self.__dict__:
682 elif '_descendantrev' in self.__dict__:
681 # this file context was created from a revision with a known
683 # this file context was created from a revision with a known
682 # descendant, we can (lazily) correct for linkrev aliases
684 # descendant, we can (lazily) correct for linkrev aliases
683 return self._adjustlinkrev(self._descendantrev)
685 return self._adjustlinkrev(self._descendantrev)
684 else:
686 else:
685 return self._filelog.linkrev(self._filerev)
687 return self._filelog.linkrev(self._filerev)
686
688
687 @propertycache
689 @propertycache
688 def _filenode(self):
690 def _filenode(self):
689 if '_fileid' in self.__dict__:
691 if '_fileid' in self.__dict__:
690 return self._filelog.lookup(self._fileid)
692 return self._filelog.lookup(self._fileid)
691 else:
693 else:
692 return self._changectx.filenode(self._path)
694 return self._changectx.filenode(self._path)
693
695
694 @propertycache
696 @propertycache
695 def _filerev(self):
697 def _filerev(self):
696 return self._filelog.rev(self._filenode)
698 return self._filelog.rev(self._filenode)
697
699
698 @propertycache
700 @propertycache
699 def _repopath(self):
701 def _repopath(self):
700 return self._path
702 return self._path
701
703
702 def __nonzero__(self):
704 def __nonzero__(self):
703 try:
705 try:
704 self._filenode
706 self._filenode
705 return True
707 return True
706 except error.LookupError:
708 except error.LookupError:
707 # file is missing
709 # file is missing
708 return False
710 return False
709
711
710 def __str__(self):
712 def __str__(self):
711 try:
713 try:
712 return "%s@%s" % (self.path(), self._changectx)
714 return "%s@%s" % (self.path(), self._changectx)
713 except error.LookupError:
715 except error.LookupError:
714 return "%s@???" % self.path()
716 return "%s@???" % self.path()
715
717
716 def __repr__(self):
718 def __repr__(self):
717 return "<%s %s>" % (type(self).__name__, str(self))
719 return "<%s %s>" % (type(self).__name__, str(self))
718
720
719 def __hash__(self):
721 def __hash__(self):
720 try:
722 try:
721 return hash((self._path, self._filenode))
723 return hash((self._path, self._filenode))
722 except AttributeError:
724 except AttributeError:
723 return id(self)
725 return id(self)
724
726
725 def __eq__(self, other):
727 def __eq__(self, other):
726 try:
728 try:
727 return (type(self) == type(other) and self._path == other._path
729 return (type(self) == type(other) and self._path == other._path
728 and self._filenode == other._filenode)
730 and self._filenode == other._filenode)
729 except AttributeError:
731 except AttributeError:
730 return False
732 return False
731
733
732 def __ne__(self, other):
734 def __ne__(self, other):
733 return not (self == other)
735 return not (self == other)
734
736
735 def filerev(self):
737 def filerev(self):
736 return self._filerev
738 return self._filerev
737 def filenode(self):
739 def filenode(self):
738 return self._filenode
740 return self._filenode
739 def flags(self):
741 def flags(self):
740 return self._changectx.flags(self._path)
742 return self._changectx.flags(self._path)
741 def filelog(self):
743 def filelog(self):
742 return self._filelog
744 return self._filelog
743 def rev(self):
745 def rev(self):
744 return self._changeid
746 return self._changeid
745 def linkrev(self):
747 def linkrev(self):
746 return self._filelog.linkrev(self._filerev)
748 return self._filelog.linkrev(self._filerev)
747 def node(self):
749 def node(self):
748 return self._changectx.node()
750 return self._changectx.node()
749 def hex(self):
751 def hex(self):
750 return self._changectx.hex()
752 return self._changectx.hex()
751 def user(self):
753 def user(self):
752 return self._changectx.user()
754 return self._changectx.user()
753 def date(self):
755 def date(self):
754 return self._changectx.date()
756 return self._changectx.date()
755 def files(self):
757 def files(self):
756 return self._changectx.files()
758 return self._changectx.files()
757 def description(self):
759 def description(self):
758 return self._changectx.description()
760 return self._changectx.description()
759 def branch(self):
761 def branch(self):
760 return self._changectx.branch()
762 return self._changectx.branch()
761 def extra(self):
763 def extra(self):
762 return self._changectx.extra()
764 return self._changectx.extra()
763 def phase(self):
765 def phase(self):
764 return self._changectx.phase()
766 return self._changectx.phase()
765 def phasestr(self):
767 def phasestr(self):
766 return self._changectx.phasestr()
768 return self._changectx.phasestr()
767 def manifest(self):
769 def manifest(self):
768 return self._changectx.manifest()
770 return self._changectx.manifest()
769 def changectx(self):
771 def changectx(self):
770 return self._changectx
772 return self._changectx
771 def repo(self):
773 def repo(self):
772 return self._repo
774 return self._repo
773
775
774 def path(self):
776 def path(self):
775 return self._path
777 return self._path
776
778
777 def isbinary(self):
779 def isbinary(self):
778 try:
780 try:
779 return util.binary(self.data())
781 return util.binary(self.data())
780 except IOError:
782 except IOError:
781 return False
783 return False
782 def isexec(self):
784 def isexec(self):
783 return 'x' in self.flags()
785 return 'x' in self.flags()
784 def islink(self):
786 def islink(self):
785 return 'l' in self.flags()
787 return 'l' in self.flags()
786
788
787 def isabsent(self):
789 def isabsent(self):
788 """whether this filectx represents a file not in self._changectx
790 """whether this filectx represents a file not in self._changectx
789
791
790 This is mainly for merge code to detect change/delete conflicts. This is
792 This is mainly for merge code to detect change/delete conflicts. This is
791 expected to be True for all subclasses of basectx."""
793 expected to be True for all subclasses of basectx."""
792 return False
794 return False
793
795
794 _customcmp = False
796 _customcmp = False
795 def cmp(self, fctx):
797 def cmp(self, fctx):
796 """compare with other file context
798 """compare with other file context
797
799
798 returns True if different than fctx.
800 returns True if different than fctx.
799 """
801 """
800 if fctx._customcmp:
802 if fctx._customcmp:
801 return fctx.cmp(self)
803 return fctx.cmp(self)
802
804
803 if (fctx._filenode is None
805 if (fctx._filenode is None
804 and (self._repo._encodefilterpats
806 and (self._repo._encodefilterpats
805 # if file data starts with '\1\n', empty metadata block is
807 # if file data starts with '\1\n', empty metadata block is
806 # prepended, which adds 4 bytes to filelog.size().
808 # prepended, which adds 4 bytes to filelog.size().
807 or self.size() - 4 == fctx.size())
809 or self.size() - 4 == fctx.size())
808 or self.size() == fctx.size()):
810 or self.size() == fctx.size()):
809 return self._filelog.cmp(self._filenode, fctx.data())
811 return self._filelog.cmp(self._filenode, fctx.data())
810
812
811 return True
813 return True
812
814
813 def _adjustlinkrev(self, srcrev, inclusive=False):
815 def _adjustlinkrev(self, srcrev, inclusive=False):
814 """return the first ancestor of <srcrev> introducing <fnode>
816 """return the first ancestor of <srcrev> introducing <fnode>
815
817
816 If the linkrev of the file revision does not point to an ancestor of
818 If the linkrev of the file revision does not point to an ancestor of
817 srcrev, we'll walk down the ancestors until we find one introducing
819 srcrev, we'll walk down the ancestors until we find one introducing
818 this file revision.
820 this file revision.
819
821
820 :srcrev: the changeset revision we search ancestors from
822 :srcrev: the changeset revision we search ancestors from
821 :inclusive: if true, the src revision will also be checked
823 :inclusive: if true, the src revision will also be checked
822 """
824 """
823 repo = self._repo
825 repo = self._repo
824 cl = repo.unfiltered().changelog
826 cl = repo.unfiltered().changelog
825 mfl = repo.manifestlog
827 mfl = repo.manifestlog
826 # fetch the linkrev
828 # fetch the linkrev
827 lkr = self.linkrev()
829 lkr = self.linkrev()
828 # hack to reuse ancestor computation when searching for renames
830 # hack to reuse ancestor computation when searching for renames
829 memberanc = getattr(self, '_ancestrycontext', None)
831 memberanc = getattr(self, '_ancestrycontext', None)
830 iteranc = None
832 iteranc = None
831 if srcrev is None:
833 if srcrev is None:
832 # wctx case, used by workingfilectx during mergecopy
834 # wctx case, used by workingfilectx during mergecopy
833 revs = [p.rev() for p in self._repo[None].parents()]
835 revs = [p.rev() for p in self._repo[None].parents()]
834 inclusive = True # we skipped the real (revless) source
836 inclusive = True # we skipped the real (revless) source
835 else:
837 else:
836 revs = [srcrev]
838 revs = [srcrev]
837 if memberanc is None:
839 if memberanc is None:
838 memberanc = iteranc = cl.ancestors(revs, lkr,
840 memberanc = iteranc = cl.ancestors(revs, lkr,
839 inclusive=inclusive)
841 inclusive=inclusive)
840 # check if this linkrev is an ancestor of srcrev
842 # check if this linkrev is an ancestor of srcrev
841 if lkr not in memberanc:
843 if lkr not in memberanc:
842 if iteranc is None:
844 if iteranc is None:
843 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
845 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
844 fnode = self._filenode
846 fnode = self._filenode
845 path = self._path
847 path = self._path
846 for a in iteranc:
848 for a in iteranc:
847 ac = cl.read(a) # get changeset data (we avoid object creation)
849 ac = cl.read(a) # get changeset data (we avoid object creation)
848 if path in ac[3]: # checking the 'files' field.
850 if path in ac[3]: # checking the 'files' field.
849 # The file has been touched, check if the content is
851 # The file has been touched, check if the content is
850 # similar to the one we search for.
852 # similar to the one we search for.
851 if fnode == mfl[ac[0]].readfast().get(path):
853 if fnode == mfl[ac[0]].readfast().get(path):
852 return a
854 return a
853 # In theory, we should never get out of that loop without a result.
855 # In theory, we should never get out of that loop without a result.
854 # But if manifest uses a buggy file revision (not children of the
856 # But if manifest uses a buggy file revision (not children of the
855 # one it replaces) we could. Such a buggy situation will likely
857 # one it replaces) we could. Such a buggy situation will likely
856 # result is crash somewhere else at to some point.
858 # result is crash somewhere else at to some point.
857 return lkr
859 return lkr
858
860
859 def introrev(self):
861 def introrev(self):
860 """return the rev of the changeset which introduced this file revision
862 """return the rev of the changeset which introduced this file revision
861
863
862 This method is different from linkrev because it take into account the
864 This method is different from linkrev because it take into account the
863 changeset the filectx was created from. It ensures the returned
865 changeset the filectx was created from. It ensures the returned
864 revision is one of its ancestors. This prevents bugs from
866 revision is one of its ancestors. This prevents bugs from
865 'linkrev-shadowing' when a file revision is used by multiple
867 'linkrev-shadowing' when a file revision is used by multiple
866 changesets.
868 changesets.
867 """
869 """
868 lkr = self.linkrev()
870 lkr = self.linkrev()
869 attrs = vars(self)
871 attrs = vars(self)
870 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
872 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
871 if noctx or self.rev() == lkr:
873 if noctx or self.rev() == lkr:
872 return self.linkrev()
874 return self.linkrev()
873 return self._adjustlinkrev(self.rev(), inclusive=True)
875 return self._adjustlinkrev(self.rev(), inclusive=True)
874
876
875 def _parentfilectx(self, path, fileid, filelog):
877 def _parentfilectx(self, path, fileid, filelog):
876 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
878 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
877 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
879 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
878 if '_changeid' in vars(self) or '_changectx' in vars(self):
880 if '_changeid' in vars(self) or '_changectx' in vars(self):
879 # If self is associated with a changeset (probably explicitly
881 # If self is associated with a changeset (probably explicitly
880 # fed), ensure the created filectx is associated with a
882 # fed), ensure the created filectx is associated with a
881 # changeset that is an ancestor of self.changectx.
883 # changeset that is an ancestor of self.changectx.
882 # This lets us later use _adjustlinkrev to get a correct link.
884 # This lets us later use _adjustlinkrev to get a correct link.
883 fctx._descendantrev = self.rev()
885 fctx._descendantrev = self.rev()
884 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
886 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
885 elif '_descendantrev' in vars(self):
887 elif '_descendantrev' in vars(self):
886 # Otherwise propagate _descendantrev if we have one associated.
888 # Otherwise propagate _descendantrev if we have one associated.
887 fctx._descendantrev = self._descendantrev
889 fctx._descendantrev = self._descendantrev
888 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 return fctx
891 return fctx
890
892
891 def parents(self):
893 def parents(self):
892 _path = self._path
894 _path = self._path
893 fl = self._filelog
895 fl = self._filelog
894 parents = self._filelog.parents(self._filenode)
896 parents = self._filelog.parents(self._filenode)
895 pl = [(_path, node, fl) for node in parents if node != nullid]
897 pl = [(_path, node, fl) for node in parents if node != nullid]
896
898
897 r = fl.renamed(self._filenode)
899 r = fl.renamed(self._filenode)
898 if r:
900 if r:
899 # - In the simple rename case, both parent are nullid, pl is empty.
901 # - In the simple rename case, both parent are nullid, pl is empty.
900 # - In case of merge, only one of the parent is null id and should
902 # - In case of merge, only one of the parent is null id and should
901 # be replaced with the rename information. This parent is -always-
903 # be replaced with the rename information. This parent is -always-
902 # the first one.
904 # the first one.
903 #
905 #
904 # As null id have always been filtered out in the previous list
906 # As null id have always been filtered out in the previous list
905 # comprehension, inserting to 0 will always result in "replacing
907 # comprehension, inserting to 0 will always result in "replacing
906 # first nullid parent with rename information.
908 # first nullid parent with rename information.
907 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
909 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
908
910
909 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
911 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
910
912
911 def p1(self):
913 def p1(self):
912 return self.parents()[0]
914 return self.parents()[0]
913
915
914 def p2(self):
916 def p2(self):
915 p = self.parents()
917 p = self.parents()
916 if len(p) == 2:
918 if len(p) == 2:
917 return p[1]
919 return p[1]
918 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
920 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
919
921
920 def annotate(self, follow=False, linenumber=False, diffopts=None):
922 def annotate(self, follow=False, linenumber=False, diffopts=None):
921 '''returns a list of tuples of ((ctx, number), line) for each line
923 '''returns a list of tuples of ((ctx, number), line) for each line
922 in the file, where ctx is the filectx of the node where
924 in the file, where ctx is the filectx of the node where
923 that line was last changed; if linenumber parameter is true, number is
925 that line was last changed; if linenumber parameter is true, number is
924 the line number at the first appearance in the managed file, otherwise,
926 the line number at the first appearance in the managed file, otherwise,
925 number has a fixed value of False.
927 number has a fixed value of False.
926 '''
928 '''
927
929
928 def lines(text):
930 def lines(text):
929 if text.endswith("\n"):
931 if text.endswith("\n"):
930 return text.count("\n")
932 return text.count("\n")
931 return text.count("\n") + int(bool(text))
933 return text.count("\n") + int(bool(text))
932
934
933 if linenumber:
935 if linenumber:
934 def decorate(text, rev):
936 def decorate(text, rev):
935 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
937 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
936 else:
938 else:
937 def decorate(text, rev):
939 def decorate(text, rev):
938 return ([(rev, False)] * lines(text), text)
940 return ([(rev, False)] * lines(text), text)
939
941
940 def pair(parent, child):
942 def pair(parent, child):
941 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
943 blocks = mdiff.allblocks(parent[1], child[1], opts=diffopts)
942 for (a1, a2, b1, b2), t in blocks:
944 for (a1, a2, b1, b2), t in blocks:
943 # Changed blocks ('!') or blocks made only of blank lines ('~')
945 # Changed blocks ('!') or blocks made only of blank lines ('~')
944 # belong to the child.
946 # belong to the child.
945 if t == '=':
947 if t == '=':
946 child[0][b1:b2] = parent[0][a1:a2]
948 child[0][b1:b2] = parent[0][a1:a2]
947 return child
949 return child
948
950
949 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
951 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950
952
951 def parents(f):
953 def parents(f):
952 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
954 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
955 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 # from the topmost introrev (= srcrev) down to p.linkrev() if it
956 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 # isn't an ancestor of the srcrev.
957 # isn't an ancestor of the srcrev.
956 f._changeid
958 f._changeid
957 pl = f.parents()
959 pl = f.parents()
958
960
959 # Don't return renamed parents if we aren't following.
961 # Don't return renamed parents if we aren't following.
960 if not follow:
962 if not follow:
961 pl = [p for p in pl if p.path() == f.path()]
963 pl = [p for p in pl if p.path() == f.path()]
962
964
963 # renamed filectx won't have a filelog yet, so set it
965 # renamed filectx won't have a filelog yet, so set it
964 # from the cache to save time
966 # from the cache to save time
965 for p in pl:
967 for p in pl:
966 if not '_filelog' in p.__dict__:
968 if not '_filelog' in p.__dict__:
967 p._filelog = getlog(p.path())
969 p._filelog = getlog(p.path())
968
970
969 return pl
971 return pl
970
972
971 # use linkrev to find the first changeset where self appeared
973 # use linkrev to find the first changeset where self appeared
972 base = self
974 base = self
973 introrev = self.introrev()
975 introrev = self.introrev()
974 if self.rev() != introrev:
976 if self.rev() != introrev:
975 base = self.filectx(self.filenode(), changeid=introrev)
977 base = self.filectx(self.filenode(), changeid=introrev)
976 if getattr(base, '_ancestrycontext', None) is None:
978 if getattr(base, '_ancestrycontext', None) is None:
977 cl = self._repo.changelog
979 cl = self._repo.changelog
978 if introrev is None:
980 if introrev is None:
979 # wctx is not inclusive, but works because _ancestrycontext
981 # wctx is not inclusive, but works because _ancestrycontext
980 # is used to test filelog revisions
982 # is used to test filelog revisions
981 ac = cl.ancestors([p.rev() for p in base.parents()],
983 ac = cl.ancestors([p.rev() for p in base.parents()],
982 inclusive=True)
984 inclusive=True)
983 else:
985 else:
984 ac = cl.ancestors([introrev], inclusive=True)
986 ac = cl.ancestors([introrev], inclusive=True)
985 base._ancestrycontext = ac
987 base._ancestrycontext = ac
986
988
987 # This algorithm would prefer to be recursive, but Python is a
989 # This algorithm would prefer to be recursive, but Python is a
988 # bit recursion-hostile. Instead we do an iterative
990 # bit recursion-hostile. Instead we do an iterative
989 # depth-first search.
991 # depth-first search.
990
992
991 # 1st DFS pre-calculates pcache and needed
993 # 1st DFS pre-calculates pcache and needed
992 visit = [base]
994 visit = [base]
993 pcache = {}
995 pcache = {}
994 needed = {base: 1}
996 needed = {base: 1}
995 while visit:
997 while visit:
996 f = visit.pop()
998 f = visit.pop()
997 if f in pcache:
999 if f in pcache:
998 continue
1000 continue
999 pl = parents(f)
1001 pl = parents(f)
1000 pcache[f] = pl
1002 pcache[f] = pl
1001 for p in pl:
1003 for p in pl:
1002 needed[p] = needed.get(p, 0) + 1
1004 needed[p] = needed.get(p, 0) + 1
1003 if p not in pcache:
1005 if p not in pcache:
1004 visit.append(p)
1006 visit.append(p)
1005
1007
1006 # 2nd DFS does the actual annotate
1008 # 2nd DFS does the actual annotate
1007 visit[:] = [base]
1009 visit[:] = [base]
1008 hist = {}
1010 hist = {}
1009 while visit:
1011 while visit:
1010 f = visit[-1]
1012 f = visit[-1]
1011 if f in hist:
1013 if f in hist:
1012 visit.pop()
1014 visit.pop()
1013 continue
1015 continue
1014
1016
1015 ready = True
1017 ready = True
1016 pl = pcache[f]
1018 pl = pcache[f]
1017 for p in pl:
1019 for p in pl:
1018 if p not in hist:
1020 if p not in hist:
1019 ready = False
1021 ready = False
1020 visit.append(p)
1022 visit.append(p)
1021 if ready:
1023 if ready:
1022 visit.pop()
1024 visit.pop()
1023 curr = decorate(f.data(), f)
1025 curr = decorate(f.data(), f)
1024 for p in pl:
1026 for p in pl:
1025 curr = pair(hist[p], curr)
1027 curr = pair(hist[p], curr)
1026 if needed[p] == 1:
1028 if needed[p] == 1:
1027 del hist[p]
1029 del hist[p]
1028 del needed[p]
1030 del needed[p]
1029 else:
1031 else:
1030 needed[p] -= 1
1032 needed[p] -= 1
1031
1033
1032 hist[f] = curr
1034 hist[f] = curr
1033 del pcache[f]
1035 del pcache[f]
1034
1036
1035 return zip(hist[base][0], hist[base][1].splitlines(True))
1037 return zip(hist[base][0], hist[base][1].splitlines(True))
1036
1038
1037 def ancestors(self, followfirst=False):
1039 def ancestors(self, followfirst=False):
1038 visit = {}
1040 visit = {}
1039 c = self
1041 c = self
1040 if followfirst:
1042 if followfirst:
1041 cut = 1
1043 cut = 1
1042 else:
1044 else:
1043 cut = None
1045 cut = None
1044
1046
1045 while True:
1047 while True:
1046 for parent in c.parents()[:cut]:
1048 for parent in c.parents()[:cut]:
1047 visit[(parent.linkrev(), parent.filenode())] = parent
1049 visit[(parent.linkrev(), parent.filenode())] = parent
1048 if not visit:
1050 if not visit:
1049 break
1051 break
1050 c = visit.pop(max(visit))
1052 c = visit.pop(max(visit))
1051 yield c
1053 yield c
1052
1054
1053 class filectx(basefilectx):
1055 class filectx(basefilectx):
1054 """A filecontext object makes access to data related to a particular
1056 """A filecontext object makes access to data related to a particular
1055 filerevision convenient."""
1057 filerevision convenient."""
1056 def __init__(self, repo, path, changeid=None, fileid=None,
1058 def __init__(self, repo, path, changeid=None, fileid=None,
1057 filelog=None, changectx=None):
1059 filelog=None, changectx=None):
1058 """changeid can be a changeset revision, node, or tag.
1060 """changeid can be a changeset revision, node, or tag.
1059 fileid can be a file revision or node."""
1061 fileid can be a file revision or node."""
1060 self._repo = repo
1062 self._repo = repo
1061 self._path = path
1063 self._path = path
1062
1064
1063 assert (changeid is not None
1065 assert (changeid is not None
1064 or fileid is not None
1066 or fileid is not None
1065 or changectx is not None), \
1067 or changectx is not None), \
1066 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1068 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1067 % (changeid, fileid, changectx))
1069 % (changeid, fileid, changectx))
1068
1070
1069 if filelog is not None:
1071 if filelog is not None:
1070 self._filelog = filelog
1072 self._filelog = filelog
1071
1073
1072 if changeid is not None:
1074 if changeid is not None:
1073 self._changeid = changeid
1075 self._changeid = changeid
1074 if changectx is not None:
1076 if changectx is not None:
1075 self._changectx = changectx
1077 self._changectx = changectx
1076 if fileid is not None:
1078 if fileid is not None:
1077 self._fileid = fileid
1079 self._fileid = fileid
1078
1080
1079 @propertycache
1081 @propertycache
1080 def _changectx(self):
1082 def _changectx(self):
1081 try:
1083 try:
1082 return changectx(self._repo, self._changeid)
1084 return changectx(self._repo, self._changeid)
1083 except error.FilteredRepoLookupError:
1085 except error.FilteredRepoLookupError:
1084 # Linkrev may point to any revision in the repository. When the
1086 # Linkrev may point to any revision in the repository. When the
1085 # repository is filtered this may lead to `filectx` trying to build
1087 # repository is filtered this may lead to `filectx` trying to build
1086 # `changectx` for filtered revision. In such case we fallback to
1088 # `changectx` for filtered revision. In such case we fallback to
1087 # creating `changectx` on the unfiltered version of the reposition.
1089 # creating `changectx` on the unfiltered version of the reposition.
1088 # This fallback should not be an issue because `changectx` from
1090 # This fallback should not be an issue because `changectx` from
1089 # `filectx` are not used in complex operations that care about
1091 # `filectx` are not used in complex operations that care about
1090 # filtering.
1092 # filtering.
1091 #
1093 #
1092 # This fallback is a cheap and dirty fix that prevent several
1094 # This fallback is a cheap and dirty fix that prevent several
1093 # crashes. It does not ensure the behavior is correct. However the
1095 # crashes. It does not ensure the behavior is correct. However the
1094 # behavior was not correct before filtering either and "incorrect
1096 # behavior was not correct before filtering either and "incorrect
1095 # behavior" is seen as better as "crash"
1097 # behavior" is seen as better as "crash"
1096 #
1098 #
1097 # Linkrevs have several serious troubles with filtering that are
1099 # Linkrevs have several serious troubles with filtering that are
1098 # complicated to solve. Proper handling of the issue here should be
1100 # complicated to solve. Proper handling of the issue here should be
1099 # considered when solving linkrev issue are on the table.
1101 # considered when solving linkrev issue are on the table.
1100 return changectx(self._repo.unfiltered(), self._changeid)
1102 return changectx(self._repo.unfiltered(), self._changeid)
1101
1103
1102 def filectx(self, fileid, changeid=None):
1104 def filectx(self, fileid, changeid=None):
1103 '''opens an arbitrary revision of the file without
1105 '''opens an arbitrary revision of the file without
1104 opening a new filelog'''
1106 opening a new filelog'''
1105 return filectx(self._repo, self._path, fileid=fileid,
1107 return filectx(self._repo, self._path, fileid=fileid,
1106 filelog=self._filelog, changeid=changeid)
1108 filelog=self._filelog, changeid=changeid)
1107
1109
1108 def data(self):
1110 def data(self):
1109 try:
1111 try:
1110 return self._filelog.read(self._filenode)
1112 return self._filelog.read(self._filenode)
1111 except error.CensoredNodeError:
1113 except error.CensoredNodeError:
1112 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1114 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1113 return ""
1115 return ""
1114 raise error.Abort(_("censored node: %s") % short(self._filenode),
1116 raise error.Abort(_("censored node: %s") % short(self._filenode),
1115 hint=_("set censor.policy to ignore errors"))
1117 hint=_("set censor.policy to ignore errors"))
1116
1118
1117 def size(self):
1119 def size(self):
1118 return self._filelog.size(self._filerev)
1120 return self._filelog.size(self._filerev)
1119
1121
1120 def renamed(self):
1122 def renamed(self):
1121 """check if file was actually renamed in this changeset revision
1123 """check if file was actually renamed in this changeset revision
1122
1124
1123 If rename logged in file revision, we report copy for changeset only
1125 If rename logged in file revision, we report copy for changeset only
1124 if file revisions linkrev points back to the changeset in question
1126 if file revisions linkrev points back to the changeset in question
1125 or both changeset parents contain different file revisions.
1127 or both changeset parents contain different file revisions.
1126 """
1128 """
1127
1129
1128 renamed = self._filelog.renamed(self._filenode)
1130 renamed = self._filelog.renamed(self._filenode)
1129 if not renamed:
1131 if not renamed:
1130 return renamed
1132 return renamed
1131
1133
1132 if self.rev() == self.linkrev():
1134 if self.rev() == self.linkrev():
1133 return renamed
1135 return renamed
1134
1136
1135 name = self.path()
1137 name = self.path()
1136 fnode = self._filenode
1138 fnode = self._filenode
1137 for p in self._changectx.parents():
1139 for p in self._changectx.parents():
1138 try:
1140 try:
1139 if fnode == p.filenode(name):
1141 if fnode == p.filenode(name):
1140 return None
1142 return None
1141 except error.LookupError:
1143 except error.LookupError:
1142 pass
1144 pass
1143 return renamed
1145 return renamed
1144
1146
1145 def children(self):
1147 def children(self):
1146 # hard for renames
1148 # hard for renames
1147 c = self._filelog.children(self._filenode)
1149 c = self._filelog.children(self._filenode)
1148 return [filectx(self._repo, self._path, fileid=x,
1150 return [filectx(self._repo, self._path, fileid=x,
1149 filelog=self._filelog) for x in c]
1151 filelog=self._filelog) for x in c]
1150
1152
1151 class committablectx(basectx):
1153 class committablectx(basectx):
1152 """A committablectx object provides common functionality for a context that
1154 """A committablectx object provides common functionality for a context that
1153 wants the ability to commit, e.g. workingctx or memctx."""
1155 wants the ability to commit, e.g. workingctx or memctx."""
1154 def __init__(self, repo, text="", user=None, date=None, extra=None,
1156 def __init__(self, repo, text="", user=None, date=None, extra=None,
1155 changes=None):
1157 changes=None):
1156 self._repo = repo
1158 self._repo = repo
1157 self._rev = None
1159 self._rev = None
1158 self._node = None
1160 self._node = None
1159 self._text = text
1161 self._text = text
1160 if date:
1162 if date:
1161 self._date = util.parsedate(date)
1163 self._date = util.parsedate(date)
1162 if user:
1164 if user:
1163 self._user = user
1165 self._user = user
1164 if changes:
1166 if changes:
1165 self._status = changes
1167 self._status = changes
1166
1168
1167 self._extra = {}
1169 self._extra = {}
1168 if extra:
1170 if extra:
1169 self._extra = extra.copy()
1171 self._extra = extra.copy()
1170 if 'branch' not in self._extra:
1172 if 'branch' not in self._extra:
1171 try:
1173 try:
1172 branch = encoding.fromlocal(self._repo.dirstate.branch())
1174 branch = encoding.fromlocal(self._repo.dirstate.branch())
1173 except UnicodeDecodeError:
1175 except UnicodeDecodeError:
1174 raise error.Abort(_('branch name not in UTF-8!'))
1176 raise error.Abort(_('branch name not in UTF-8!'))
1175 self._extra['branch'] = branch
1177 self._extra['branch'] = branch
1176 if self._extra['branch'] == '':
1178 if self._extra['branch'] == '':
1177 self._extra['branch'] = 'default'
1179 self._extra['branch'] = 'default'
1178
1180
1179 def __str__(self):
1181 def __str__(self):
1180 return str(self._parents[0]) + "+"
1182 return str(self._parents[0]) + "+"
1181
1183
1182 def __nonzero__(self):
1184 def __nonzero__(self):
1183 return True
1185 return True
1184
1186
1185 def _buildflagfunc(self):
1187 def _buildflagfunc(self):
1186 # Create a fallback function for getting file flags when the
1188 # Create a fallback function for getting file flags when the
1187 # filesystem doesn't support them
1189 # filesystem doesn't support them
1188
1190
1189 copiesget = self._repo.dirstate.copies().get
1191 copiesget = self._repo.dirstate.copies().get
1190 parents = self.parents()
1192 parents = self.parents()
1191 if len(parents) < 2:
1193 if len(parents) < 2:
1192 # when we have one parent, it's easy: copy from parent
1194 # when we have one parent, it's easy: copy from parent
1193 man = parents[0].manifest()
1195 man = parents[0].manifest()
1194 def func(f):
1196 def func(f):
1195 f = copiesget(f, f)
1197 f = copiesget(f, f)
1196 return man.flags(f)
1198 return man.flags(f)
1197 else:
1199 else:
1198 # merges are tricky: we try to reconstruct the unstored
1200 # merges are tricky: we try to reconstruct the unstored
1199 # result from the merge (issue1802)
1201 # result from the merge (issue1802)
1200 p1, p2 = parents
1202 p1, p2 = parents
1201 pa = p1.ancestor(p2)
1203 pa = p1.ancestor(p2)
1202 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1204 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1203
1205
1204 def func(f):
1206 def func(f):
1205 f = copiesget(f, f) # may be wrong for merges with copies
1207 f = copiesget(f, f) # may be wrong for merges with copies
1206 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1208 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1207 if fl1 == fl2:
1209 if fl1 == fl2:
1208 return fl1
1210 return fl1
1209 if fl1 == fla:
1211 if fl1 == fla:
1210 return fl2
1212 return fl2
1211 if fl2 == fla:
1213 if fl2 == fla:
1212 return fl1
1214 return fl1
1213 return '' # punt for conflicts
1215 return '' # punt for conflicts
1214
1216
1215 return func
1217 return func
1216
1218
1217 @propertycache
1219 @propertycache
1218 def _flagfunc(self):
1220 def _flagfunc(self):
1219 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1221 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1220
1222
1221 @propertycache
1223 @propertycache
1222 def _manifest(self):
1224 def _manifest(self):
1223 """generate a manifest corresponding to the values in self._status
1225 """generate a manifest corresponding to the values in self._status
1224
1226
1225 This reuse the file nodeid from parent, but we append an extra letter
1227 This reuse the file nodeid from parent, but we append an extra letter
1226 when modified. Modified files get an extra 'm' while added files get
1228 when modified. Modified files get an extra 'm' while added files get
1227 an extra 'a'. This is used by manifests merge to see that files
1229 an extra 'a'. This is used by manifests merge to see that files
1228 are different and by update logic to avoid deleting newly added files.
1230 are different and by update logic to avoid deleting newly added files.
1229 """
1231 """
1230 parents = self.parents()
1232 parents = self.parents()
1231
1233
1232 man1 = parents[0].manifest()
1234 man1 = parents[0].manifest()
1233 man = man1.copy()
1235 man = man1.copy()
1234 if len(parents) > 1:
1236 if len(parents) > 1:
1235 man2 = self.p2().manifest()
1237 man2 = self.p2().manifest()
1236 def getman(f):
1238 def getman(f):
1237 if f in man1:
1239 if f in man1:
1238 return man1
1240 return man1
1239 return man2
1241 return man2
1240 else:
1242 else:
1241 getman = lambda f: man1
1243 getman = lambda f: man1
1242
1244
1243 copied = self._repo.dirstate.copies()
1245 copied = self._repo.dirstate.copies()
1244 ff = self._flagfunc
1246 ff = self._flagfunc
1245 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1247 for i, l in (("a", self._status.added), ("m", self._status.modified)):
1246 for f in l:
1248 for f in l:
1247 orig = copied.get(f, f)
1249 orig = copied.get(f, f)
1248 man[f] = getman(orig).get(orig, nullid) + i
1250 man[f] = getman(orig).get(orig, nullid) + i
1249 try:
1251 try:
1250 man.setflag(f, ff(f))
1252 man.setflag(f, ff(f))
1251 except OSError:
1253 except OSError:
1252 pass
1254 pass
1253
1255
1254 for f in self._status.deleted + self._status.removed:
1256 for f in self._status.deleted + self._status.removed:
1255 if f in man:
1257 if f in man:
1256 del man[f]
1258 del man[f]
1257
1259
1258 return man
1260 return man
1259
1261
1260 @propertycache
1262 @propertycache
1261 def _status(self):
1263 def _status(self):
1262 return self._repo.status()
1264 return self._repo.status()
1263
1265
1264 @propertycache
1266 @propertycache
1265 def _user(self):
1267 def _user(self):
1266 return self._repo.ui.username()
1268 return self._repo.ui.username()
1267
1269
1268 @propertycache
1270 @propertycache
1269 def _date(self):
1271 def _date(self):
1270 return util.makedate()
1272 return util.makedate()
1271
1273
1272 def subrev(self, subpath):
1274 def subrev(self, subpath):
1273 return None
1275 return None
1274
1276
1275 def manifestnode(self):
1277 def manifestnode(self):
1276 return None
1278 return None
1277 def user(self):
1279 def user(self):
1278 return self._user or self._repo.ui.username()
1280 return self._user or self._repo.ui.username()
1279 def date(self):
1281 def date(self):
1280 return self._date
1282 return self._date
1281 def description(self):
1283 def description(self):
1282 return self._text
1284 return self._text
1283 def files(self):
1285 def files(self):
1284 return sorted(self._status.modified + self._status.added +
1286 return sorted(self._status.modified + self._status.added +
1285 self._status.removed)
1287 self._status.removed)
1286
1288
1287 def modified(self):
1289 def modified(self):
1288 return self._status.modified
1290 return self._status.modified
1289 def added(self):
1291 def added(self):
1290 return self._status.added
1292 return self._status.added
1291 def removed(self):
1293 def removed(self):
1292 return self._status.removed
1294 return self._status.removed
1293 def deleted(self):
1295 def deleted(self):
1294 return self._status.deleted
1296 return self._status.deleted
1295 def branch(self):
1297 def branch(self):
1296 return encoding.tolocal(self._extra['branch'])
1298 return encoding.tolocal(self._extra['branch'])
1297 def closesbranch(self):
1299 def closesbranch(self):
1298 return 'close' in self._extra
1300 return 'close' in self._extra
1299 def extra(self):
1301 def extra(self):
1300 return self._extra
1302 return self._extra
1301
1303
1302 def tags(self):
1304 def tags(self):
1303 return []
1305 return []
1304
1306
1305 def bookmarks(self):
1307 def bookmarks(self):
1306 b = []
1308 b = []
1307 for p in self.parents():
1309 for p in self.parents():
1308 b.extend(p.bookmarks())
1310 b.extend(p.bookmarks())
1309 return b
1311 return b
1310
1312
1311 def phase(self):
1313 def phase(self):
1312 phase = phases.draft # default phase to draft
1314 phase = phases.draft # default phase to draft
1313 for p in self.parents():
1315 for p in self.parents():
1314 phase = max(phase, p.phase())
1316 phase = max(phase, p.phase())
1315 return phase
1317 return phase
1316
1318
1317 def hidden(self):
1319 def hidden(self):
1318 return False
1320 return False
1319
1321
1320 def children(self):
1322 def children(self):
1321 return []
1323 return []
1322
1324
1323 def flags(self, path):
1325 def flags(self, path):
1324 if '_manifest' in self.__dict__:
1326 if '_manifest' in self.__dict__:
1325 try:
1327 try:
1326 return self._manifest.flags(path)
1328 return self._manifest.flags(path)
1327 except KeyError:
1329 except KeyError:
1328 return ''
1330 return ''
1329
1331
1330 try:
1332 try:
1331 return self._flagfunc(path)
1333 return self._flagfunc(path)
1332 except OSError:
1334 except OSError:
1333 return ''
1335 return ''
1334
1336
1335 def ancestor(self, c2):
1337 def ancestor(self, c2):
1336 """return the "best" ancestor context of self and c2"""
1338 """return the "best" ancestor context of self and c2"""
1337 return self._parents[0].ancestor(c2) # punt on two parents for now
1339 return self._parents[0].ancestor(c2) # punt on two parents for now
1338
1340
1339 def walk(self, match):
1341 def walk(self, match):
1340 '''Generates matching file names.'''
1342 '''Generates matching file names.'''
1341 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1343 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1342 True, False))
1344 True, False))
1343
1345
1344 def matches(self, match):
1346 def matches(self, match):
1345 return sorted(self._repo.dirstate.matches(match))
1347 return sorted(self._repo.dirstate.matches(match))
1346
1348
1347 def ancestors(self):
1349 def ancestors(self):
1348 for p in self._parents:
1350 for p in self._parents:
1349 yield p
1351 yield p
1350 for a in self._repo.changelog.ancestors(
1352 for a in self._repo.changelog.ancestors(
1351 [p.rev() for p in self._parents]):
1353 [p.rev() for p in self._parents]):
1352 yield changectx(self._repo, a)
1354 yield changectx(self._repo, a)
1353
1355
1354 def markcommitted(self, node):
1356 def markcommitted(self, node):
1355 """Perform post-commit cleanup necessary after committing this ctx
1357 """Perform post-commit cleanup necessary after committing this ctx
1356
1358
1357 Specifically, this updates backing stores this working context
1359 Specifically, this updates backing stores this working context
1358 wraps to reflect the fact that the changes reflected by this
1360 wraps to reflect the fact that the changes reflected by this
1359 workingctx have been committed. For example, it marks
1361 workingctx have been committed. For example, it marks
1360 modified and added files as normal in the dirstate.
1362 modified and added files as normal in the dirstate.
1361
1363
1362 """
1364 """
1363
1365
1364 self._repo.dirstate.beginparentchange()
1366 self._repo.dirstate.beginparentchange()
1365 for f in self.modified() + self.added():
1367 for f in self.modified() + self.added():
1366 self._repo.dirstate.normal(f)
1368 self._repo.dirstate.normal(f)
1367 for f in self.removed():
1369 for f in self.removed():
1368 self._repo.dirstate.drop(f)
1370 self._repo.dirstate.drop(f)
1369 self._repo.dirstate.setparents(node)
1371 self._repo.dirstate.setparents(node)
1370 self._repo.dirstate.endparentchange()
1372 self._repo.dirstate.endparentchange()
1371
1373
1372 # write changes out explicitly, because nesting wlock at
1374 # write changes out explicitly, because nesting wlock at
1373 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1375 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1374 # from immediately doing so for subsequent changing files
1376 # from immediately doing so for subsequent changing files
1375 self._repo.dirstate.write(self._repo.currenttransaction())
1377 self._repo.dirstate.write(self._repo.currenttransaction())
1376
1378
1377 class workingctx(committablectx):
1379 class workingctx(committablectx):
1378 """A workingctx object makes access to data related to
1380 """A workingctx object makes access to data related to
1379 the current working directory convenient.
1381 the current working directory convenient.
1380 date - any valid date string or (unixtime, offset), or None.
1382 date - any valid date string or (unixtime, offset), or None.
1381 user - username string, or None.
1383 user - username string, or None.
1382 extra - a dictionary of extra values, or None.
1384 extra - a dictionary of extra values, or None.
1383 changes - a list of file lists as returned by localrepo.status()
1385 changes - a list of file lists as returned by localrepo.status()
1384 or None to use the repository status.
1386 or None to use the repository status.
1385 """
1387 """
1386 def __init__(self, repo, text="", user=None, date=None, extra=None,
1388 def __init__(self, repo, text="", user=None, date=None, extra=None,
1387 changes=None):
1389 changes=None):
1388 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1390 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1389
1391
1390 def __iter__(self):
1392 def __iter__(self):
1391 d = self._repo.dirstate
1393 d = self._repo.dirstate
1392 for f in d:
1394 for f in d:
1393 if d[f] != 'r':
1395 if d[f] != 'r':
1394 yield f
1396 yield f
1395
1397
1396 def __contains__(self, key):
1398 def __contains__(self, key):
1397 return self._repo.dirstate[key] not in "?r"
1399 return self._repo.dirstate[key] not in "?r"
1398
1400
1399 def hex(self):
1401 def hex(self):
1400 return hex(wdirid)
1402 return hex(wdirid)
1401
1403
1402 @propertycache
1404 @propertycache
1403 def _parents(self):
1405 def _parents(self):
1404 p = self._repo.dirstate.parents()
1406 p = self._repo.dirstate.parents()
1405 if p[1] == nullid:
1407 if p[1] == nullid:
1406 p = p[:-1]
1408 p = p[:-1]
1407 return [changectx(self._repo, x) for x in p]
1409 return [changectx(self._repo, x) for x in p]
1408
1410
1409 def filectx(self, path, filelog=None):
1411 def filectx(self, path, filelog=None):
1410 """get a file context from the working directory"""
1412 """get a file context from the working directory"""
1411 return workingfilectx(self._repo, path, workingctx=self,
1413 return workingfilectx(self._repo, path, workingctx=self,
1412 filelog=filelog)
1414 filelog=filelog)
1413
1415
1414 def dirty(self, missing=False, merge=True, branch=True):
1416 def dirty(self, missing=False, merge=True, branch=True):
1415 "check whether a working directory is modified"
1417 "check whether a working directory is modified"
1416 # check subrepos first
1418 # check subrepos first
1417 for s in sorted(self.substate):
1419 for s in sorted(self.substate):
1418 if self.sub(s).dirty():
1420 if self.sub(s).dirty():
1419 return True
1421 return True
1420 # check current working dir
1422 # check current working dir
1421 return ((merge and self.p2()) or
1423 return ((merge and self.p2()) or
1422 (branch and self.branch() != self.p1().branch()) or
1424 (branch and self.branch() != self.p1().branch()) or
1423 self.modified() or self.added() or self.removed() or
1425 self.modified() or self.added() or self.removed() or
1424 (missing and self.deleted()))
1426 (missing and self.deleted()))
1425
1427
1426 def add(self, list, prefix=""):
1428 def add(self, list, prefix=""):
1427 join = lambda f: os.path.join(prefix, f)
1429 join = lambda f: os.path.join(prefix, f)
1428 with self._repo.wlock():
1430 with self._repo.wlock():
1429 ui, ds = self._repo.ui, self._repo.dirstate
1431 ui, ds = self._repo.ui, self._repo.dirstate
1430 rejected = []
1432 rejected = []
1431 lstat = self._repo.wvfs.lstat
1433 lstat = self._repo.wvfs.lstat
1432 for f in list:
1434 for f in list:
1433 scmutil.checkportable(ui, join(f))
1435 scmutil.checkportable(ui, join(f))
1434 try:
1436 try:
1435 st = lstat(f)
1437 st = lstat(f)
1436 except OSError:
1438 except OSError:
1437 ui.warn(_("%s does not exist!\n") % join(f))
1439 ui.warn(_("%s does not exist!\n") % join(f))
1438 rejected.append(f)
1440 rejected.append(f)
1439 continue
1441 continue
1440 if st.st_size > 10000000:
1442 if st.st_size > 10000000:
1441 ui.warn(_("%s: up to %d MB of RAM may be required "
1443 ui.warn(_("%s: up to %d MB of RAM may be required "
1442 "to manage this file\n"
1444 "to manage this file\n"
1443 "(use 'hg revert %s' to cancel the "
1445 "(use 'hg revert %s' to cancel the "
1444 "pending addition)\n")
1446 "pending addition)\n")
1445 % (f, 3 * st.st_size // 1000000, join(f)))
1447 % (f, 3 * st.st_size // 1000000, join(f)))
1446 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1448 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1447 ui.warn(_("%s not added: only files and symlinks "
1449 ui.warn(_("%s not added: only files and symlinks "
1448 "supported currently\n") % join(f))
1450 "supported currently\n") % join(f))
1449 rejected.append(f)
1451 rejected.append(f)
1450 elif ds[f] in 'amn':
1452 elif ds[f] in 'amn':
1451 ui.warn(_("%s already tracked!\n") % join(f))
1453 ui.warn(_("%s already tracked!\n") % join(f))
1452 elif ds[f] == 'r':
1454 elif ds[f] == 'r':
1453 ds.normallookup(f)
1455 ds.normallookup(f)
1454 else:
1456 else:
1455 ds.add(f)
1457 ds.add(f)
1456 return rejected
1458 return rejected
1457
1459
1458 def forget(self, files, prefix=""):
1460 def forget(self, files, prefix=""):
1459 join = lambda f: os.path.join(prefix, f)
1461 join = lambda f: os.path.join(prefix, f)
1460 with self._repo.wlock():
1462 with self._repo.wlock():
1461 rejected = []
1463 rejected = []
1462 for f in files:
1464 for f in files:
1463 if f not in self._repo.dirstate:
1465 if f not in self._repo.dirstate:
1464 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1466 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1465 rejected.append(f)
1467 rejected.append(f)
1466 elif self._repo.dirstate[f] != 'a':
1468 elif self._repo.dirstate[f] != 'a':
1467 self._repo.dirstate.remove(f)
1469 self._repo.dirstate.remove(f)
1468 else:
1470 else:
1469 self._repo.dirstate.drop(f)
1471 self._repo.dirstate.drop(f)
1470 return rejected
1472 return rejected
1471
1473
1472 def undelete(self, list):
1474 def undelete(self, list):
1473 pctxs = self.parents()
1475 pctxs = self.parents()
1474 with self._repo.wlock():
1476 with self._repo.wlock():
1475 for f in list:
1477 for f in list:
1476 if self._repo.dirstate[f] != 'r':
1478 if self._repo.dirstate[f] != 'r':
1477 self._repo.ui.warn(_("%s not removed!\n") % f)
1479 self._repo.ui.warn(_("%s not removed!\n") % f)
1478 else:
1480 else:
1479 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1481 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1480 t = fctx.data()
1482 t = fctx.data()
1481 self._repo.wwrite(f, t, fctx.flags())
1483 self._repo.wwrite(f, t, fctx.flags())
1482 self._repo.dirstate.normal(f)
1484 self._repo.dirstate.normal(f)
1483
1485
1484 def copy(self, source, dest):
1486 def copy(self, source, dest):
1485 try:
1487 try:
1486 st = self._repo.wvfs.lstat(dest)
1488 st = self._repo.wvfs.lstat(dest)
1487 except OSError as err:
1489 except OSError as err:
1488 if err.errno != errno.ENOENT:
1490 if err.errno != errno.ENOENT:
1489 raise
1491 raise
1490 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1492 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1491 return
1493 return
1492 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1494 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1493 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1495 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1494 "symbolic link\n") % dest)
1496 "symbolic link\n") % dest)
1495 else:
1497 else:
1496 with self._repo.wlock():
1498 with self._repo.wlock():
1497 if self._repo.dirstate[dest] in '?':
1499 if self._repo.dirstate[dest] in '?':
1498 self._repo.dirstate.add(dest)
1500 self._repo.dirstate.add(dest)
1499 elif self._repo.dirstate[dest] in 'r':
1501 elif self._repo.dirstate[dest] in 'r':
1500 self._repo.dirstate.normallookup(dest)
1502 self._repo.dirstate.normallookup(dest)
1501 self._repo.dirstate.copy(source, dest)
1503 self._repo.dirstate.copy(source, dest)
1502
1504
1503 def match(self, pats=[], include=None, exclude=None, default='glob',
1505 def match(self, pats=[], include=None, exclude=None, default='glob',
1504 listsubrepos=False, badfn=None):
1506 listsubrepos=False, badfn=None):
1505 r = self._repo
1507 r = self._repo
1506
1508
1507 # Only a case insensitive filesystem needs magic to translate user input
1509 # Only a case insensitive filesystem needs magic to translate user input
1508 # to actual case in the filesystem.
1510 # to actual case in the filesystem.
1509 if not util.fscasesensitive(r.root):
1511 if not util.fscasesensitive(r.root):
1510 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1512 return matchmod.icasefsmatcher(r.root, r.getcwd(), pats, include,
1511 exclude, default, r.auditor, self,
1513 exclude, default, r.auditor, self,
1512 listsubrepos=listsubrepos,
1514 listsubrepos=listsubrepos,
1513 badfn=badfn)
1515 badfn=badfn)
1514 return matchmod.match(r.root, r.getcwd(), pats,
1516 return matchmod.match(r.root, r.getcwd(), pats,
1515 include, exclude, default,
1517 include, exclude, default,
1516 auditor=r.auditor, ctx=self,
1518 auditor=r.auditor, ctx=self,
1517 listsubrepos=listsubrepos, badfn=badfn)
1519 listsubrepos=listsubrepos, badfn=badfn)
1518
1520
1519 def _filtersuspectsymlink(self, files):
1521 def _filtersuspectsymlink(self, files):
1520 if not files or self._repo.dirstate._checklink:
1522 if not files or self._repo.dirstate._checklink:
1521 return files
1523 return files
1522
1524
1523 # Symlink placeholders may get non-symlink-like contents
1525 # Symlink placeholders may get non-symlink-like contents
1524 # via user error or dereferencing by NFS or Samba servers,
1526 # via user error or dereferencing by NFS or Samba servers,
1525 # so we filter out any placeholders that don't look like a
1527 # so we filter out any placeholders that don't look like a
1526 # symlink
1528 # symlink
1527 sane = []
1529 sane = []
1528 for f in files:
1530 for f in files:
1529 if self.flags(f) == 'l':
1531 if self.flags(f) == 'l':
1530 d = self[f].data()
1532 d = self[f].data()
1531 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1533 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1532 self._repo.ui.debug('ignoring suspect symlink placeholder'
1534 self._repo.ui.debug('ignoring suspect symlink placeholder'
1533 ' "%s"\n' % f)
1535 ' "%s"\n' % f)
1534 continue
1536 continue
1535 sane.append(f)
1537 sane.append(f)
1536 return sane
1538 return sane
1537
1539
1538 def _checklookup(self, files):
1540 def _checklookup(self, files):
1539 # check for any possibly clean files
1541 # check for any possibly clean files
1540 if not files:
1542 if not files:
1541 return [], []
1543 return [], []
1542
1544
1543 modified = []
1545 modified = []
1544 fixup = []
1546 fixup = []
1545 pctx = self._parents[0]
1547 pctx = self._parents[0]
1546 # do a full compare of any files that might have changed
1548 # do a full compare of any files that might have changed
1547 for f in sorted(files):
1549 for f in sorted(files):
1548 if (f not in pctx or self.flags(f) != pctx.flags(f)
1550 if (f not in pctx or self.flags(f) != pctx.flags(f)
1549 or pctx[f].cmp(self[f])):
1551 or pctx[f].cmp(self[f])):
1550 modified.append(f)
1552 modified.append(f)
1551 else:
1553 else:
1552 fixup.append(f)
1554 fixup.append(f)
1553
1555
1554 # update dirstate for files that are actually clean
1556 # update dirstate for files that are actually clean
1555 if fixup:
1557 if fixup:
1556 try:
1558 try:
1557 # updating the dirstate is optional
1559 # updating the dirstate is optional
1558 # so we don't wait on the lock
1560 # so we don't wait on the lock
1559 # wlock can invalidate the dirstate, so cache normal _after_
1561 # wlock can invalidate the dirstate, so cache normal _after_
1560 # taking the lock
1562 # taking the lock
1561 with self._repo.wlock(False):
1563 with self._repo.wlock(False):
1562 normal = self._repo.dirstate.normal
1564 normal = self._repo.dirstate.normal
1563 for f in fixup:
1565 for f in fixup:
1564 normal(f)
1566 normal(f)
1565 # write changes out explicitly, because nesting
1567 # write changes out explicitly, because nesting
1566 # wlock at runtime may prevent 'wlock.release()'
1568 # wlock at runtime may prevent 'wlock.release()'
1567 # after this block from doing so for subsequent
1569 # after this block from doing so for subsequent
1568 # changing files
1570 # changing files
1569 self._repo.dirstate.write(self._repo.currenttransaction())
1571 self._repo.dirstate.write(self._repo.currenttransaction())
1570 except error.LockError:
1572 except error.LockError:
1571 pass
1573 pass
1572 return modified, fixup
1574 return modified, fixup
1573
1575
1574 def _manifestmatches(self, match, s):
1576 def _manifestmatches(self, match, s):
1575 """Slow path for workingctx
1577 """Slow path for workingctx
1576
1578
1577 The fast path is when we compare the working directory to its parent
1579 The fast path is when we compare the working directory to its parent
1578 which means this function is comparing with a non-parent; therefore we
1580 which means this function is comparing with a non-parent; therefore we
1579 need to build a manifest and return what matches.
1581 need to build a manifest and return what matches.
1580 """
1582 """
1581 mf = self._repo['.']._manifestmatches(match, s)
1583 mf = self._repo['.']._manifestmatches(match, s)
1582 for f in s.modified + s.added:
1584 for f in s.modified + s.added:
1583 mf[f] = _newnode
1585 mf[f] = _newnode
1584 mf.setflag(f, self.flags(f))
1586 mf.setflag(f, self.flags(f))
1585 for f in s.removed:
1587 for f in s.removed:
1586 if f in mf:
1588 if f in mf:
1587 del mf[f]
1589 del mf[f]
1588 return mf
1590 return mf
1589
1591
1590 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1592 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1591 unknown=False):
1593 unknown=False):
1592 '''Gets the status from the dirstate -- internal use only.'''
1594 '''Gets the status from the dirstate -- internal use only.'''
1593 listignored, listclean, listunknown = ignored, clean, unknown
1595 listignored, listclean, listunknown = ignored, clean, unknown
1594 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1596 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1595 subrepos = []
1597 subrepos = []
1596 if '.hgsub' in self:
1598 if '.hgsub' in self:
1597 subrepos = sorted(self.substate)
1599 subrepos = sorted(self.substate)
1598 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1600 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1599 listclean, listunknown)
1601 listclean, listunknown)
1600
1602
1601 # check for any possibly clean files
1603 # check for any possibly clean files
1602 if cmp:
1604 if cmp:
1603 modified2, fixup = self._checklookup(cmp)
1605 modified2, fixup = self._checklookup(cmp)
1604 s.modified.extend(modified2)
1606 s.modified.extend(modified2)
1605
1607
1606 # update dirstate for files that are actually clean
1608 # update dirstate for files that are actually clean
1607 if fixup and listclean:
1609 if fixup and listclean:
1608 s.clean.extend(fixup)
1610 s.clean.extend(fixup)
1609
1611
1610 if match.always():
1612 if match.always():
1611 # cache for performance
1613 # cache for performance
1612 if s.unknown or s.ignored or s.clean:
1614 if s.unknown or s.ignored or s.clean:
1613 # "_status" is cached with list*=False in the normal route
1615 # "_status" is cached with list*=False in the normal route
1614 self._status = scmutil.status(s.modified, s.added, s.removed,
1616 self._status = scmutil.status(s.modified, s.added, s.removed,
1615 s.deleted, [], [], [])
1617 s.deleted, [], [], [])
1616 else:
1618 else:
1617 self._status = s
1619 self._status = s
1618
1620
1619 return s
1621 return s
1620
1622
1621 def _buildstatus(self, other, s, match, listignored, listclean,
1623 def _buildstatus(self, other, s, match, listignored, listclean,
1622 listunknown):
1624 listunknown):
1623 """build a status with respect to another context
1625 """build a status with respect to another context
1624
1626
1625 This includes logic for maintaining the fast path of status when
1627 This includes logic for maintaining the fast path of status when
1626 comparing the working directory against its parent, which is to skip
1628 comparing the working directory against its parent, which is to skip
1627 building a new manifest if self (working directory) is not comparing
1629 building a new manifest if self (working directory) is not comparing
1628 against its parent (repo['.']).
1630 against its parent (repo['.']).
1629 """
1631 """
1630 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1632 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1631 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1633 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1632 # might have accidentally ended up with the entire contents of the file
1634 # might have accidentally ended up with the entire contents of the file
1633 # they are supposed to be linking to.
1635 # they are supposed to be linking to.
1634 s.modified[:] = self._filtersuspectsymlink(s.modified)
1636 s.modified[:] = self._filtersuspectsymlink(s.modified)
1635 if other != self._repo['.']:
1637 if other != self._repo['.']:
1636 s = super(workingctx, self)._buildstatus(other, s, match,
1638 s = super(workingctx, self)._buildstatus(other, s, match,
1637 listignored, listclean,
1639 listignored, listclean,
1638 listunknown)
1640 listunknown)
1639 return s
1641 return s
1640
1642
1641 def _matchstatus(self, other, match):
1643 def _matchstatus(self, other, match):
1642 """override the match method with a filter for directory patterns
1644 """override the match method with a filter for directory patterns
1643
1645
1644 We use inheritance to customize the match.bad method only in cases of
1646 We use inheritance to customize the match.bad method only in cases of
1645 workingctx since it belongs only to the working directory when
1647 workingctx since it belongs only to the working directory when
1646 comparing against the parent changeset.
1648 comparing against the parent changeset.
1647
1649
1648 If we aren't comparing against the working directory's parent, then we
1650 If we aren't comparing against the working directory's parent, then we
1649 just use the default match object sent to us.
1651 just use the default match object sent to us.
1650 """
1652 """
1651 superself = super(workingctx, self)
1653 superself = super(workingctx, self)
1652 match = superself._matchstatus(other, match)
1654 match = superself._matchstatus(other, match)
1653 if other != self._repo['.']:
1655 if other != self._repo['.']:
1654 def bad(f, msg):
1656 def bad(f, msg):
1655 # 'f' may be a directory pattern from 'match.files()',
1657 # 'f' may be a directory pattern from 'match.files()',
1656 # so 'f not in ctx1' is not enough
1658 # so 'f not in ctx1' is not enough
1657 if f not in other and not other.hasdir(f):
1659 if f not in other and not other.hasdir(f):
1658 self._repo.ui.warn('%s: %s\n' %
1660 self._repo.ui.warn('%s: %s\n' %
1659 (self._repo.dirstate.pathto(f), msg))
1661 (self._repo.dirstate.pathto(f), msg))
1660 match.bad = bad
1662 match.bad = bad
1661 return match
1663 return match
1662
1664
1663 class committablefilectx(basefilectx):
1665 class committablefilectx(basefilectx):
1664 """A committablefilectx provides common functionality for a file context
1666 """A committablefilectx provides common functionality for a file context
1665 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1667 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1666 def __init__(self, repo, path, filelog=None, ctx=None):
1668 def __init__(self, repo, path, filelog=None, ctx=None):
1667 self._repo = repo
1669 self._repo = repo
1668 self._path = path
1670 self._path = path
1669 self._changeid = None
1671 self._changeid = None
1670 self._filerev = self._filenode = None
1672 self._filerev = self._filenode = None
1671
1673
1672 if filelog is not None:
1674 if filelog is not None:
1673 self._filelog = filelog
1675 self._filelog = filelog
1674 if ctx:
1676 if ctx:
1675 self._changectx = ctx
1677 self._changectx = ctx
1676
1678
1677 def __nonzero__(self):
1679 def __nonzero__(self):
1678 return True
1680 return True
1679
1681
1680 def linkrev(self):
1682 def linkrev(self):
1681 # linked to self._changectx no matter if file is modified or not
1683 # linked to self._changectx no matter if file is modified or not
1682 return self.rev()
1684 return self.rev()
1683
1685
1684 def parents(self):
1686 def parents(self):
1685 '''return parent filectxs, following copies if necessary'''
1687 '''return parent filectxs, following copies if necessary'''
1686 def filenode(ctx, path):
1688 def filenode(ctx, path):
1687 return ctx._manifest.get(path, nullid)
1689 return ctx._manifest.get(path, nullid)
1688
1690
1689 path = self._path
1691 path = self._path
1690 fl = self._filelog
1692 fl = self._filelog
1691 pcl = self._changectx._parents
1693 pcl = self._changectx._parents
1692 renamed = self.renamed()
1694 renamed = self.renamed()
1693
1695
1694 if renamed:
1696 if renamed:
1695 pl = [renamed + (None,)]
1697 pl = [renamed + (None,)]
1696 else:
1698 else:
1697 pl = [(path, filenode(pcl[0], path), fl)]
1699 pl = [(path, filenode(pcl[0], path), fl)]
1698
1700
1699 for pc in pcl[1:]:
1701 for pc in pcl[1:]:
1700 pl.append((path, filenode(pc, path), fl))
1702 pl.append((path, filenode(pc, path), fl))
1701
1703
1702 return [self._parentfilectx(p, fileid=n, filelog=l)
1704 return [self._parentfilectx(p, fileid=n, filelog=l)
1703 for p, n, l in pl if n != nullid]
1705 for p, n, l in pl if n != nullid]
1704
1706
1705 def children(self):
1707 def children(self):
1706 return []
1708 return []
1707
1709
1708 class workingfilectx(committablefilectx):
1710 class workingfilectx(committablefilectx):
1709 """A workingfilectx object makes access to data related to a particular
1711 """A workingfilectx object makes access to data related to a particular
1710 file in the working directory convenient."""
1712 file in the working directory convenient."""
1711 def __init__(self, repo, path, filelog=None, workingctx=None):
1713 def __init__(self, repo, path, filelog=None, workingctx=None):
1712 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1714 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1713
1715
1714 @propertycache
1716 @propertycache
1715 def _changectx(self):
1717 def _changectx(self):
1716 return workingctx(self._repo)
1718 return workingctx(self._repo)
1717
1719
1718 def data(self):
1720 def data(self):
1719 return self._repo.wread(self._path)
1721 return self._repo.wread(self._path)
1720 def renamed(self):
1722 def renamed(self):
1721 rp = self._repo.dirstate.copied(self._path)
1723 rp = self._repo.dirstate.copied(self._path)
1722 if not rp:
1724 if not rp:
1723 return None
1725 return None
1724 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1726 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1725
1727
1726 def size(self):
1728 def size(self):
1727 return self._repo.wvfs.lstat(self._path).st_size
1729 return self._repo.wvfs.lstat(self._path).st_size
1728 def date(self):
1730 def date(self):
1729 t, tz = self._changectx.date()
1731 t, tz = self._changectx.date()
1730 try:
1732 try:
1731 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1733 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1732 except OSError as err:
1734 except OSError as err:
1733 if err.errno != errno.ENOENT:
1735 if err.errno != errno.ENOENT:
1734 raise
1736 raise
1735 return (t, tz)
1737 return (t, tz)
1736
1738
1737 def cmp(self, fctx):
1739 def cmp(self, fctx):
1738 """compare with other file context
1740 """compare with other file context
1739
1741
1740 returns True if different than fctx.
1742 returns True if different than fctx.
1741 """
1743 """
1742 # fctx should be a filectx (not a workingfilectx)
1744 # fctx should be a filectx (not a workingfilectx)
1743 # invert comparison to reuse the same code path
1745 # invert comparison to reuse the same code path
1744 return fctx.cmp(self)
1746 return fctx.cmp(self)
1745
1747
1746 def remove(self, ignoremissing=False):
1748 def remove(self, ignoremissing=False):
1747 """wraps unlink for a repo's working directory"""
1749 """wraps unlink for a repo's working directory"""
1748 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1750 util.unlinkpath(self._repo.wjoin(self._path), ignoremissing)
1749
1751
1750 def write(self, data, flags):
1752 def write(self, data, flags):
1751 """wraps repo.wwrite"""
1753 """wraps repo.wwrite"""
1752 self._repo.wwrite(self._path, data, flags)
1754 self._repo.wwrite(self._path, data, flags)
1753
1755
1754 class workingcommitctx(workingctx):
1756 class workingcommitctx(workingctx):
1755 """A workingcommitctx object makes access to data related to
1757 """A workingcommitctx object makes access to data related to
1756 the revision being committed convenient.
1758 the revision being committed convenient.
1757
1759
1758 This hides changes in the working directory, if they aren't
1760 This hides changes in the working directory, if they aren't
1759 committed in this context.
1761 committed in this context.
1760 """
1762 """
1761 def __init__(self, repo, changes,
1763 def __init__(self, repo, changes,
1762 text="", user=None, date=None, extra=None):
1764 text="", user=None, date=None, extra=None):
1763 super(workingctx, self).__init__(repo, text, user, date, extra,
1765 super(workingctx, self).__init__(repo, text, user, date, extra,
1764 changes)
1766 changes)
1765
1767
1766 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1768 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1767 unknown=False):
1769 unknown=False):
1768 """Return matched files only in ``self._status``
1770 """Return matched files only in ``self._status``
1769
1771
1770 Uncommitted files appear "clean" via this context, even if
1772 Uncommitted files appear "clean" via this context, even if
1771 they aren't actually so in the working directory.
1773 they aren't actually so in the working directory.
1772 """
1774 """
1773 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1775 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1774 if clean:
1776 if clean:
1775 clean = [f for f in self._manifest if f not in self._changedset]
1777 clean = [f for f in self._manifest if f not in self._changedset]
1776 else:
1778 else:
1777 clean = []
1779 clean = []
1778 return scmutil.status([f for f in self._status.modified if match(f)],
1780 return scmutil.status([f for f in self._status.modified if match(f)],
1779 [f for f in self._status.added if match(f)],
1781 [f for f in self._status.added if match(f)],
1780 [f for f in self._status.removed if match(f)],
1782 [f for f in self._status.removed if match(f)],
1781 [], [], [], clean)
1783 [], [], [], clean)
1782
1784
1783 @propertycache
1785 @propertycache
1784 def _changedset(self):
1786 def _changedset(self):
1785 """Return the set of files changed in this context
1787 """Return the set of files changed in this context
1786 """
1788 """
1787 changed = set(self._status.modified)
1789 changed = set(self._status.modified)
1788 changed.update(self._status.added)
1790 changed.update(self._status.added)
1789 changed.update(self._status.removed)
1791 changed.update(self._status.removed)
1790 return changed
1792 return changed
1791
1793
1792 def makecachingfilectxfn(func):
1794 def makecachingfilectxfn(func):
1793 """Create a filectxfn that caches based on the path.
1795 """Create a filectxfn that caches based on the path.
1794
1796
1795 We can't use util.cachefunc because it uses all arguments as the cache
1797 We can't use util.cachefunc because it uses all arguments as the cache
1796 key and this creates a cycle since the arguments include the repo and
1798 key and this creates a cycle since the arguments include the repo and
1797 memctx.
1799 memctx.
1798 """
1800 """
1799 cache = {}
1801 cache = {}
1800
1802
1801 def getfilectx(repo, memctx, path):
1803 def getfilectx(repo, memctx, path):
1802 if path not in cache:
1804 if path not in cache:
1803 cache[path] = func(repo, memctx, path)
1805 cache[path] = func(repo, memctx, path)
1804 return cache[path]
1806 return cache[path]
1805
1807
1806 return getfilectx
1808 return getfilectx
1807
1809
1808 class memctx(committablectx):
1810 class memctx(committablectx):
1809 """Use memctx to perform in-memory commits via localrepo.commitctx().
1811 """Use memctx to perform in-memory commits via localrepo.commitctx().
1810
1812
1811 Revision information is supplied at initialization time while
1813 Revision information is supplied at initialization time while
1812 related files data and is made available through a callback
1814 related files data and is made available through a callback
1813 mechanism. 'repo' is the current localrepo, 'parents' is a
1815 mechanism. 'repo' is the current localrepo, 'parents' is a
1814 sequence of two parent revisions identifiers (pass None for every
1816 sequence of two parent revisions identifiers (pass None for every
1815 missing parent), 'text' is the commit message and 'files' lists
1817 missing parent), 'text' is the commit message and 'files' lists
1816 names of files touched by the revision (normalized and relative to
1818 names of files touched by the revision (normalized and relative to
1817 repository root).
1819 repository root).
1818
1820
1819 filectxfn(repo, memctx, path) is a callable receiving the
1821 filectxfn(repo, memctx, path) is a callable receiving the
1820 repository, the current memctx object and the normalized path of
1822 repository, the current memctx object and the normalized path of
1821 requested file, relative to repository root. It is fired by the
1823 requested file, relative to repository root. It is fired by the
1822 commit function for every file in 'files', but calls order is
1824 commit function for every file in 'files', but calls order is
1823 undefined. If the file is available in the revision being
1825 undefined. If the file is available in the revision being
1824 committed (updated or added), filectxfn returns a memfilectx
1826 committed (updated or added), filectxfn returns a memfilectx
1825 object. If the file was removed, filectxfn raises an
1827 object. If the file was removed, filectxfn raises an
1826 IOError. Moved files are represented by marking the source file
1828 IOError. Moved files are represented by marking the source file
1827 removed and the new file added with copy information (see
1829 removed and the new file added with copy information (see
1828 memfilectx).
1830 memfilectx).
1829
1831
1830 user receives the committer name and defaults to current
1832 user receives the committer name and defaults to current
1831 repository username, date is the commit date in any format
1833 repository username, date is the commit date in any format
1832 supported by util.parsedate() and defaults to current date, extra
1834 supported by util.parsedate() and defaults to current date, extra
1833 is a dictionary of metadata or is left empty.
1835 is a dictionary of metadata or is left empty.
1834 """
1836 """
1835
1837
1836 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1838 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
1837 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1839 # Extensions that need to retain compatibility across Mercurial 3.1 can use
1838 # this field to determine what to do in filectxfn.
1840 # this field to determine what to do in filectxfn.
1839 _returnnoneformissingfiles = True
1841 _returnnoneformissingfiles = True
1840
1842
1841 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1843 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1842 date=None, extra=None, editor=False):
1844 date=None, extra=None, editor=False):
1843 super(memctx, self).__init__(repo, text, user, date, extra)
1845 super(memctx, self).__init__(repo, text, user, date, extra)
1844 self._rev = None
1846 self._rev = None
1845 self._node = None
1847 self._node = None
1846 parents = [(p or nullid) for p in parents]
1848 parents = [(p or nullid) for p in parents]
1847 p1, p2 = parents
1849 p1, p2 = parents
1848 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1850 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1849 files = sorted(set(files))
1851 files = sorted(set(files))
1850 self._files = files
1852 self._files = files
1851 self.substate = {}
1853 self.substate = {}
1852
1854
1853 # if store is not callable, wrap it in a function
1855 # if store is not callable, wrap it in a function
1854 if not callable(filectxfn):
1856 if not callable(filectxfn):
1855 def getfilectx(repo, memctx, path):
1857 def getfilectx(repo, memctx, path):
1856 fctx = filectxfn[path]
1858 fctx = filectxfn[path]
1857 # this is weird but apparently we only keep track of one parent
1859 # this is weird but apparently we only keep track of one parent
1858 # (why not only store that instead of a tuple?)
1860 # (why not only store that instead of a tuple?)
1859 copied = fctx.renamed()
1861 copied = fctx.renamed()
1860 if copied:
1862 if copied:
1861 copied = copied[0]
1863 copied = copied[0]
1862 return memfilectx(repo, path, fctx.data(),
1864 return memfilectx(repo, path, fctx.data(),
1863 islink=fctx.islink(), isexec=fctx.isexec(),
1865 islink=fctx.islink(), isexec=fctx.isexec(),
1864 copied=copied, memctx=memctx)
1866 copied=copied, memctx=memctx)
1865 self._filectxfn = getfilectx
1867 self._filectxfn = getfilectx
1866 else:
1868 else:
1867 # memoizing increases performance for e.g. vcs convert scenarios.
1869 # memoizing increases performance for e.g. vcs convert scenarios.
1868 self._filectxfn = makecachingfilectxfn(filectxfn)
1870 self._filectxfn = makecachingfilectxfn(filectxfn)
1869
1871
1870 if extra:
1872 if extra:
1871 self._extra = extra.copy()
1873 self._extra = extra.copy()
1872 else:
1874 else:
1873 self._extra = {}
1875 self._extra = {}
1874
1876
1875 if self._extra.get('branch', '') == '':
1877 if self._extra.get('branch', '') == '':
1876 self._extra['branch'] = 'default'
1878 self._extra['branch'] = 'default'
1877
1879
1878 if editor:
1880 if editor:
1879 self._text = editor(self._repo, self, [])
1881 self._text = editor(self._repo, self, [])
1880 self._repo.savecommitmessage(self._text)
1882 self._repo.savecommitmessage(self._text)
1881
1883
1882 def filectx(self, path, filelog=None):
1884 def filectx(self, path, filelog=None):
1883 """get a file context from the working directory
1885 """get a file context from the working directory
1884
1886
1885 Returns None if file doesn't exist and should be removed."""
1887 Returns None if file doesn't exist and should be removed."""
1886 return self._filectxfn(self._repo, self, path)
1888 return self._filectxfn(self._repo, self, path)
1887
1889
1888 def commit(self):
1890 def commit(self):
1889 """commit context to the repo"""
1891 """commit context to the repo"""
1890 return self._repo.commitctx(self)
1892 return self._repo.commitctx(self)
1891
1893
1892 @propertycache
1894 @propertycache
1893 def _manifest(self):
1895 def _manifest(self):
1894 """generate a manifest based on the return values of filectxfn"""
1896 """generate a manifest based on the return values of filectxfn"""
1895
1897
1896 # keep this simple for now; just worry about p1
1898 # keep this simple for now; just worry about p1
1897 pctx = self._parents[0]
1899 pctx = self._parents[0]
1898 man = pctx.manifest().copy()
1900 man = pctx.manifest().copy()
1899
1901
1900 for f in self._status.modified:
1902 for f in self._status.modified:
1901 p1node = nullid
1903 p1node = nullid
1902 p2node = nullid
1904 p2node = nullid
1903 p = pctx[f].parents() # if file isn't in pctx, check p2?
1905 p = pctx[f].parents() # if file isn't in pctx, check p2?
1904 if len(p) > 0:
1906 if len(p) > 0:
1905 p1node = p[0].filenode()
1907 p1node = p[0].filenode()
1906 if len(p) > 1:
1908 if len(p) > 1:
1907 p2node = p[1].filenode()
1909 p2node = p[1].filenode()
1908 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1910 man[f] = revlog.hash(self[f].data(), p1node, p2node)
1909
1911
1910 for f in self._status.added:
1912 for f in self._status.added:
1911 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1913 man[f] = revlog.hash(self[f].data(), nullid, nullid)
1912
1914
1913 for f in self._status.removed:
1915 for f in self._status.removed:
1914 if f in man:
1916 if f in man:
1915 del man[f]
1917 del man[f]
1916
1918
1917 return man
1919 return man
1918
1920
1919 @propertycache
1921 @propertycache
1920 def _status(self):
1922 def _status(self):
1921 """Calculate exact status from ``files`` specified at construction
1923 """Calculate exact status from ``files`` specified at construction
1922 """
1924 """
1923 man1 = self.p1().manifest()
1925 man1 = self.p1().manifest()
1924 p2 = self._parents[1]
1926 p2 = self._parents[1]
1925 # "1 < len(self._parents)" can't be used for checking
1927 # "1 < len(self._parents)" can't be used for checking
1926 # existence of the 2nd parent, because "memctx._parents" is
1928 # existence of the 2nd parent, because "memctx._parents" is
1927 # explicitly initialized by the list, of which length is 2.
1929 # explicitly initialized by the list, of which length is 2.
1928 if p2.node() != nullid:
1930 if p2.node() != nullid:
1929 man2 = p2.manifest()
1931 man2 = p2.manifest()
1930 managing = lambda f: f in man1 or f in man2
1932 managing = lambda f: f in man1 or f in man2
1931 else:
1933 else:
1932 managing = lambda f: f in man1
1934 managing = lambda f: f in man1
1933
1935
1934 modified, added, removed = [], [], []
1936 modified, added, removed = [], [], []
1935 for f in self._files:
1937 for f in self._files:
1936 if not managing(f):
1938 if not managing(f):
1937 added.append(f)
1939 added.append(f)
1938 elif self[f]:
1940 elif self[f]:
1939 modified.append(f)
1941 modified.append(f)
1940 else:
1942 else:
1941 removed.append(f)
1943 removed.append(f)
1942
1944
1943 return scmutil.status(modified, added, removed, [], [], [], [])
1945 return scmutil.status(modified, added, removed, [], [], [], [])
1944
1946
1945 class memfilectx(committablefilectx):
1947 class memfilectx(committablefilectx):
1946 """memfilectx represents an in-memory file to commit.
1948 """memfilectx represents an in-memory file to commit.
1947
1949
1948 See memctx and committablefilectx for more details.
1950 See memctx and committablefilectx for more details.
1949 """
1951 """
1950 def __init__(self, repo, path, data, islink=False,
1952 def __init__(self, repo, path, data, islink=False,
1951 isexec=False, copied=None, memctx=None):
1953 isexec=False, copied=None, memctx=None):
1952 """
1954 """
1953 path is the normalized file path relative to repository root.
1955 path is the normalized file path relative to repository root.
1954 data is the file content as a string.
1956 data is the file content as a string.
1955 islink is True if the file is a symbolic link.
1957 islink is True if the file is a symbolic link.
1956 isexec is True if the file is executable.
1958 isexec is True if the file is executable.
1957 copied is the source file path if current file was copied in the
1959 copied is the source file path if current file was copied in the
1958 revision being committed, or None."""
1960 revision being committed, or None."""
1959 super(memfilectx, self).__init__(repo, path, None, memctx)
1961 super(memfilectx, self).__init__(repo, path, None, memctx)
1960 self._data = data
1962 self._data = data
1961 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1963 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1962 self._copied = None
1964 self._copied = None
1963 if copied:
1965 if copied:
1964 self._copied = (copied, nullid)
1966 self._copied = (copied, nullid)
1965
1967
1966 def data(self):
1968 def data(self):
1967 return self._data
1969 return self._data
1968 def size(self):
1970 def size(self):
1969 return len(self.data())
1971 return len(self.data())
1970 def flags(self):
1972 def flags(self):
1971 return self._flags
1973 return self._flags
1972 def renamed(self):
1974 def renamed(self):
1973 return self._copied
1975 return self._copied
1974
1976
1975 def remove(self, ignoremissing=False):
1977 def remove(self, ignoremissing=False):
1976 """wraps unlink for a repo's working directory"""
1978 """wraps unlink for a repo's working directory"""
1977 # need to figure out what to do here
1979 # need to figure out what to do here
1978 del self._changectx[self._path]
1980 del self._changectx[self._path]
1979
1981
1980 def write(self, data, flags):
1982 def write(self, data, flags):
1981 """wraps repo.wwrite"""
1983 """wraps repo.wwrite"""
1982 self._data = data
1984 self._data = data
@@ -1,1562 +1,1559 b''
1 # manifest.py - manifest revision class for mercurial
1 # manifest.py - manifest revision class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import array
10 import array
11 import heapq
11 import heapq
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from . import (
16 from . import (
17 error,
17 error,
18 mdiff,
18 mdiff,
19 parsers,
19 parsers,
20 revlog,
20 revlog,
21 util,
21 util,
22 )
22 )
23
23
24 propertycache = util.propertycache
24 propertycache = util.propertycache
25
25
26 def _parsev1(data):
26 def _parsev1(data):
27 # This method does a little bit of excessive-looking
27 # This method does a little bit of excessive-looking
28 # precondition checking. This is so that the behavior of this
28 # precondition checking. This is so that the behavior of this
29 # class exactly matches its C counterpart to try and help
29 # class exactly matches its C counterpart to try and help
30 # prevent surprise breakage for anyone that develops against
30 # prevent surprise breakage for anyone that develops against
31 # the pure version.
31 # the pure version.
32 if data and data[-1] != '\n':
32 if data and data[-1] != '\n':
33 raise ValueError('Manifest did not end in a newline.')
33 raise ValueError('Manifest did not end in a newline.')
34 prev = None
34 prev = None
35 for l in data.splitlines():
35 for l in data.splitlines():
36 if prev is not None and prev > l:
36 if prev is not None and prev > l:
37 raise ValueError('Manifest lines not in sorted order.')
37 raise ValueError('Manifest lines not in sorted order.')
38 prev = l
38 prev = l
39 f, n = l.split('\0')
39 f, n = l.split('\0')
40 if len(n) > 40:
40 if len(n) > 40:
41 yield f, revlog.bin(n[:40]), n[40:]
41 yield f, revlog.bin(n[:40]), n[40:]
42 else:
42 else:
43 yield f, revlog.bin(n), ''
43 yield f, revlog.bin(n), ''
44
44
45 def _parsev2(data):
45 def _parsev2(data):
46 metadataend = data.find('\n')
46 metadataend = data.find('\n')
47 # Just ignore metadata for now
47 # Just ignore metadata for now
48 pos = metadataend + 1
48 pos = metadataend + 1
49 prevf = ''
49 prevf = ''
50 while pos < len(data):
50 while pos < len(data):
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 if end == -1:
52 if end == -1:
53 raise ValueError('Manifest ended with incomplete file entry.')
53 raise ValueError('Manifest ended with incomplete file entry.')
54 stemlen = ord(data[pos])
54 stemlen = ord(data[pos])
55 items = data[pos + 1:end].split('\0')
55 items = data[pos + 1:end].split('\0')
56 f = prevf[:stemlen] + items[0]
56 f = prevf[:stemlen] + items[0]
57 if prevf > f:
57 if prevf > f:
58 raise ValueError('Manifest entries not in sorted order.')
58 raise ValueError('Manifest entries not in sorted order.')
59 fl = items[1]
59 fl = items[1]
60 # Just ignore metadata (items[2:] for now)
60 # Just ignore metadata (items[2:] for now)
61 n = data[end + 1:end + 21]
61 n = data[end + 1:end + 21]
62 yield f, n, fl
62 yield f, n, fl
63 pos = end + 22
63 pos = end + 22
64 prevf = f
64 prevf = f
65
65
66 def _parse(data):
66 def _parse(data):
67 """Generates (path, node, flags) tuples from a manifest text"""
67 """Generates (path, node, flags) tuples from a manifest text"""
68 if data.startswith('\0'):
68 if data.startswith('\0'):
69 return iter(_parsev2(data))
69 return iter(_parsev2(data))
70 else:
70 else:
71 return iter(_parsev1(data))
71 return iter(_parsev1(data))
72
72
73 def _text(it, usemanifestv2):
73 def _text(it, usemanifestv2):
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 text"""
75 text"""
76 if usemanifestv2:
76 if usemanifestv2:
77 return _textv2(it)
77 return _textv2(it)
78 else:
78 else:
79 return _textv1(it)
79 return _textv1(it)
80
80
81 def _textv1(it):
81 def _textv1(it):
82 files = []
82 files = []
83 lines = []
83 lines = []
84 _hex = revlog.hex
84 _hex = revlog.hex
85 for f, n, fl in it:
85 for f, n, fl in it:
86 files.append(f)
86 files.append(f)
87 # if this is changed to support newlines in filenames,
87 # if this is changed to support newlines in filenames,
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90
90
91 _checkforbidden(files)
91 _checkforbidden(files)
92 return ''.join(lines)
92 return ''.join(lines)
93
93
94 def _textv2(it):
94 def _textv2(it):
95 files = []
95 files = []
96 lines = ['\0\n']
96 lines = ['\0\n']
97 prevf = ''
97 prevf = ''
98 for f, n, fl in it:
98 for f, n, fl in it:
99 files.append(f)
99 files.append(f)
100 stem = os.path.commonprefix([prevf, f])
100 stem = os.path.commonprefix([prevf, f])
101 stemlen = min(len(stem), 255)
101 stemlen = min(len(stem), 255)
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 prevf = f
103 prevf = f
104 _checkforbidden(files)
104 _checkforbidden(files)
105 return ''.join(lines)
105 return ''.join(lines)
106
106
107 class lazymanifestiter(object):
107 class lazymanifestiter(object):
108 def __init__(self, lm):
108 def __init__(self, lm):
109 self.pos = 0
109 self.pos = 0
110 self.lm = lm
110 self.lm = lm
111
111
112 def __iter__(self):
112 def __iter__(self):
113 return self
113 return self
114
114
115 def next(self):
115 def next(self):
116 try:
116 try:
117 data, pos = self.lm._get(self.pos)
117 data, pos = self.lm._get(self.pos)
118 except IndexError:
118 except IndexError:
119 raise StopIteration
119 raise StopIteration
120 if pos == -1:
120 if pos == -1:
121 self.pos += 1
121 self.pos += 1
122 return data[0]
122 return data[0]
123 self.pos += 1
123 self.pos += 1
124 zeropos = data.find('\x00', pos)
124 zeropos = data.find('\x00', pos)
125 return data[pos:zeropos]
125 return data[pos:zeropos]
126
126
127 class lazymanifestiterentries(object):
127 class lazymanifestiterentries(object):
128 def __init__(self, lm):
128 def __init__(self, lm):
129 self.lm = lm
129 self.lm = lm
130 self.pos = 0
130 self.pos = 0
131
131
132 def __iter__(self):
132 def __iter__(self):
133 return self
133 return self
134
134
135 def next(self):
135 def next(self):
136 try:
136 try:
137 data, pos = self.lm._get(self.pos)
137 data, pos = self.lm._get(self.pos)
138 except IndexError:
138 except IndexError:
139 raise StopIteration
139 raise StopIteration
140 if pos == -1:
140 if pos == -1:
141 self.pos += 1
141 self.pos += 1
142 return data
142 return data
143 zeropos = data.find('\x00', pos)
143 zeropos = data.find('\x00', pos)
144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
145 zeropos + 1, 40)
145 zeropos + 1, 40)
146 flags = self.lm._getflags(data, self.pos, zeropos)
146 flags = self.lm._getflags(data, self.pos, zeropos)
147 self.pos += 1
147 self.pos += 1
148 return (data[pos:zeropos], hashval, flags)
148 return (data[pos:zeropos], hashval, flags)
149
149
150 def unhexlify(data, extra, pos, length):
150 def unhexlify(data, extra, pos, length):
151 s = data[pos:pos + length].decode('hex')
151 s = data[pos:pos + length].decode('hex')
152 if extra:
152 if extra:
153 s += chr(extra & 0xff)
153 s += chr(extra & 0xff)
154 return s
154 return s
155
155
156 def _cmp(a, b):
156 def _cmp(a, b):
157 return (a > b) - (a < b)
157 return (a > b) - (a < b)
158
158
159 class _lazymanifest(object):
159 class _lazymanifest(object):
160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
161 if positions is None:
161 if positions is None:
162 self.positions = self.findlines(data)
162 self.positions = self.findlines(data)
163 self.extrainfo = [0] * len(self.positions)
163 self.extrainfo = [0] * len(self.positions)
164 self.data = data
164 self.data = data
165 self.extradata = []
165 self.extradata = []
166 else:
166 else:
167 self.positions = positions[:]
167 self.positions = positions[:]
168 self.extrainfo = extrainfo[:]
168 self.extrainfo = extrainfo[:]
169 self.extradata = extradata[:]
169 self.extradata = extradata[:]
170 self.data = data
170 self.data = data
171
171
172 def findlines(self, data):
172 def findlines(self, data):
173 if not data:
173 if not data:
174 return []
174 return []
175 pos = data.find("\n")
175 pos = data.find("\n")
176 if pos == -1 or data[-1] != '\n':
176 if pos == -1 or data[-1] != '\n':
177 raise ValueError("Manifest did not end in a newline.")
177 raise ValueError("Manifest did not end in a newline.")
178 positions = [0]
178 positions = [0]
179 prev = data[:data.find('\x00')]
179 prev = data[:data.find('\x00')]
180 while pos < len(data) - 1 and pos != -1:
180 while pos < len(data) - 1 and pos != -1:
181 positions.append(pos + 1)
181 positions.append(pos + 1)
182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
183 if nexts < prev:
183 if nexts < prev:
184 raise ValueError("Manifest lines not in sorted order.")
184 raise ValueError("Manifest lines not in sorted order.")
185 prev = nexts
185 prev = nexts
186 pos = data.find("\n", pos + 1)
186 pos = data.find("\n", pos + 1)
187 return positions
187 return positions
188
188
189 def _get(self, index):
189 def _get(self, index):
190 # get the position encoded in pos:
190 # get the position encoded in pos:
191 # positive number is an index in 'data'
191 # positive number is an index in 'data'
192 # negative number is in extrapieces
192 # negative number is in extrapieces
193 pos = self.positions[index]
193 pos = self.positions[index]
194 if pos >= 0:
194 if pos >= 0:
195 return self.data, pos
195 return self.data, pos
196 return self.extradata[-pos - 1], -1
196 return self.extradata[-pos - 1], -1
197
197
198 def _getkey(self, pos):
198 def _getkey(self, pos):
199 if pos >= 0:
199 if pos >= 0:
200 return self.data[pos:self.data.find('\x00', pos + 1)]
200 return self.data[pos:self.data.find('\x00', pos + 1)]
201 return self.extradata[-pos - 1][0]
201 return self.extradata[-pos - 1][0]
202
202
203 def bsearch(self, key):
203 def bsearch(self, key):
204 first = 0
204 first = 0
205 last = len(self.positions) - 1
205 last = len(self.positions) - 1
206
206
207 while first <= last:
207 while first <= last:
208 midpoint = (first + last)//2
208 midpoint = (first + last)//2
209 nextpos = self.positions[midpoint]
209 nextpos = self.positions[midpoint]
210 candidate = self._getkey(nextpos)
210 candidate = self._getkey(nextpos)
211 r = _cmp(key, candidate)
211 r = _cmp(key, candidate)
212 if r == 0:
212 if r == 0:
213 return midpoint
213 return midpoint
214 else:
214 else:
215 if r < 0:
215 if r < 0:
216 last = midpoint - 1
216 last = midpoint - 1
217 else:
217 else:
218 first = midpoint + 1
218 first = midpoint + 1
219 return -1
219 return -1
220
220
221 def bsearch2(self, key):
221 def bsearch2(self, key):
222 # same as the above, but will always return the position
222 # same as the above, but will always return the position
223 # done for performance reasons
223 # done for performance reasons
224 first = 0
224 first = 0
225 last = len(self.positions) - 1
225 last = len(self.positions) - 1
226
226
227 while first <= last:
227 while first <= last:
228 midpoint = (first + last)//2
228 midpoint = (first + last)//2
229 nextpos = self.positions[midpoint]
229 nextpos = self.positions[midpoint]
230 candidate = self._getkey(nextpos)
230 candidate = self._getkey(nextpos)
231 r = _cmp(key, candidate)
231 r = _cmp(key, candidate)
232 if r == 0:
232 if r == 0:
233 return (midpoint, True)
233 return (midpoint, True)
234 else:
234 else:
235 if r < 0:
235 if r < 0:
236 last = midpoint - 1
236 last = midpoint - 1
237 else:
237 else:
238 first = midpoint + 1
238 first = midpoint + 1
239 return (first, False)
239 return (first, False)
240
240
241 def __contains__(self, key):
241 def __contains__(self, key):
242 return self.bsearch(key) != -1
242 return self.bsearch(key) != -1
243
243
244 def _getflags(self, data, needle, pos):
244 def _getflags(self, data, needle, pos):
245 start = pos + 41
245 start = pos + 41
246 end = data.find("\n", start)
246 end = data.find("\n", start)
247 if end == -1:
247 if end == -1:
248 end = len(data) - 1
248 end = len(data) - 1
249 if start == end:
249 if start == end:
250 return ''
250 return ''
251 return self.data[start:end]
251 return self.data[start:end]
252
252
253 def __getitem__(self, key):
253 def __getitem__(self, key):
254 if not isinstance(key, str):
254 if not isinstance(key, str):
255 raise TypeError("getitem: manifest keys must be a string.")
255 raise TypeError("getitem: manifest keys must be a string.")
256 needle = self.bsearch(key)
256 needle = self.bsearch(key)
257 if needle == -1:
257 if needle == -1:
258 raise KeyError
258 raise KeyError
259 data, pos = self._get(needle)
259 data, pos = self._get(needle)
260 if pos == -1:
260 if pos == -1:
261 return (data[1], data[2])
261 return (data[1], data[2])
262 zeropos = data.find('\x00', pos)
262 zeropos = data.find('\x00', pos)
263 assert 0 <= needle <= len(self.positions)
263 assert 0 <= needle <= len(self.positions)
264 assert len(self.extrainfo) == len(self.positions)
264 assert len(self.extrainfo) == len(self.positions)
265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
266 flags = self._getflags(data, needle, zeropos)
266 flags = self._getflags(data, needle, zeropos)
267 return (hashval, flags)
267 return (hashval, flags)
268
268
269 def __delitem__(self, key):
269 def __delitem__(self, key):
270 needle, found = self.bsearch2(key)
270 needle, found = self.bsearch2(key)
271 if not found:
271 if not found:
272 raise KeyError
272 raise KeyError
273 cur = self.positions[needle]
273 cur = self.positions[needle]
274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
276 if cur >= 0:
276 if cur >= 0:
277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
278
278
279 def __setitem__(self, key, value):
279 def __setitem__(self, key, value):
280 if not isinstance(key, str):
280 if not isinstance(key, str):
281 raise TypeError("setitem: manifest keys must be a string.")
281 raise TypeError("setitem: manifest keys must be a string.")
282 if not isinstance(value, tuple) or len(value) != 2:
282 if not isinstance(value, tuple) or len(value) != 2:
283 raise TypeError("Manifest values must be a tuple of (node, flags).")
283 raise TypeError("Manifest values must be a tuple of (node, flags).")
284 hashval = value[0]
284 hashval = value[0]
285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
286 raise TypeError("node must be a 20-byte string")
286 raise TypeError("node must be a 20-byte string")
287 flags = value[1]
287 flags = value[1]
288 if len(hashval) == 22:
288 if len(hashval) == 22:
289 hashval = hashval[:-1]
289 hashval = hashval[:-1]
290 if not isinstance(flags, str) or len(flags) > 1:
290 if not isinstance(flags, str) or len(flags) > 1:
291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
292 needle, found = self.bsearch2(key)
292 needle, found = self.bsearch2(key)
293 if found:
293 if found:
294 # put the item
294 # put the item
295 pos = self.positions[needle]
295 pos = self.positions[needle]
296 if pos < 0:
296 if pos < 0:
297 self.extradata[-pos - 1] = (key, hashval, value[1])
297 self.extradata[-pos - 1] = (key, hashval, value[1])
298 else:
298 else:
299 # just don't bother
299 # just don't bother
300 self.extradata.append((key, hashval, value[1]))
300 self.extradata.append((key, hashval, value[1]))
301 self.positions[needle] = -len(self.extradata)
301 self.positions[needle] = -len(self.extradata)
302 else:
302 else:
303 # not found, put it in with extra positions
303 # not found, put it in with extra positions
304 self.extradata.append((key, hashval, value[1]))
304 self.extradata.append((key, hashval, value[1]))
305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
306 + self.positions[needle:])
306 + self.positions[needle:])
307 self.extrainfo = (self.extrainfo[:needle] + [0] +
307 self.extrainfo = (self.extrainfo[:needle] + [0] +
308 self.extrainfo[needle:])
308 self.extrainfo[needle:])
309
309
310 def copy(self):
310 def copy(self):
311 # XXX call _compact like in C?
311 # XXX call _compact like in C?
312 return _lazymanifest(self.data, self.positions, self.extrainfo,
312 return _lazymanifest(self.data, self.positions, self.extrainfo,
313 self.extradata)
313 self.extradata)
314
314
315 def _compact(self):
315 def _compact(self):
316 # hopefully not called TOO often
316 # hopefully not called TOO often
317 if len(self.extradata) == 0:
317 if len(self.extradata) == 0:
318 return
318 return
319 l = []
319 l = []
320 last_cut = 0
320 last_cut = 0
321 i = 0
321 i = 0
322 offset = 0
322 offset = 0
323 self.extrainfo = [0] * len(self.positions)
323 self.extrainfo = [0] * len(self.positions)
324 while i < len(self.positions):
324 while i < len(self.positions):
325 if self.positions[i] >= 0:
325 if self.positions[i] >= 0:
326 cur = self.positions[i]
326 cur = self.positions[i]
327 last_cut = cur
327 last_cut = cur
328 while True:
328 while True:
329 self.positions[i] = offset
329 self.positions[i] = offset
330 i += 1
330 i += 1
331 if i == len(self.positions) or self.positions[i] < 0:
331 if i == len(self.positions) or self.positions[i] < 0:
332 break
332 break
333 offset += self.positions[i] - cur
333 offset += self.positions[i] - cur
334 cur = self.positions[i]
334 cur = self.positions[i]
335 end_cut = self.data.find('\n', cur)
335 end_cut = self.data.find('\n', cur)
336 if end_cut != -1:
336 if end_cut != -1:
337 end_cut += 1
337 end_cut += 1
338 offset += end_cut - cur
338 offset += end_cut - cur
339 l.append(self.data[last_cut:end_cut])
339 l.append(self.data[last_cut:end_cut])
340 else:
340 else:
341 while i < len(self.positions) and self.positions[i] < 0:
341 while i < len(self.positions) and self.positions[i] < 0:
342 cur = self.positions[i]
342 cur = self.positions[i]
343 t = self.extradata[-cur - 1]
343 t = self.extradata[-cur - 1]
344 l.append(self._pack(t))
344 l.append(self._pack(t))
345 self.positions[i] = offset
345 self.positions[i] = offset
346 if len(t[1]) > 20:
346 if len(t[1]) > 20:
347 self.extrainfo[i] = ord(t[1][21])
347 self.extrainfo[i] = ord(t[1][21])
348 offset += len(l[-1])
348 offset += len(l[-1])
349 i += 1
349 i += 1
350 self.data = ''.join(l)
350 self.data = ''.join(l)
351 self.extradata = []
351 self.extradata = []
352
352
353 def _pack(self, d):
353 def _pack(self, d):
354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
355
355
356 def text(self):
356 def text(self):
357 self._compact()
357 self._compact()
358 return self.data
358 return self.data
359
359
360 def diff(self, m2, clean=False):
360 def diff(self, m2, clean=False):
361 '''Finds changes between the current manifest and m2.'''
361 '''Finds changes between the current manifest and m2.'''
362 # XXX think whether efficiency matters here
362 # XXX think whether efficiency matters here
363 diff = {}
363 diff = {}
364
364
365 for fn, e1, flags in self.iterentries():
365 for fn, e1, flags in self.iterentries():
366 if fn not in m2:
366 if fn not in m2:
367 diff[fn] = (e1, flags), (None, '')
367 diff[fn] = (e1, flags), (None, '')
368 else:
368 else:
369 e2 = m2[fn]
369 e2 = m2[fn]
370 if (e1, flags) != e2:
370 if (e1, flags) != e2:
371 diff[fn] = (e1, flags), e2
371 diff[fn] = (e1, flags), e2
372 elif clean:
372 elif clean:
373 diff[fn] = None
373 diff[fn] = None
374
374
375 for fn, e2, flags in m2.iterentries():
375 for fn, e2, flags in m2.iterentries():
376 if fn not in self:
376 if fn not in self:
377 diff[fn] = (None, ''), (e2, flags)
377 diff[fn] = (None, ''), (e2, flags)
378
378
379 return diff
379 return diff
380
380
381 def iterentries(self):
381 def iterentries(self):
382 return lazymanifestiterentries(self)
382 return lazymanifestiterentries(self)
383
383
384 def iterkeys(self):
384 def iterkeys(self):
385 return lazymanifestiter(self)
385 return lazymanifestiter(self)
386
386
387 def __iter__(self):
387 def __iter__(self):
388 return lazymanifestiter(self)
388 return lazymanifestiter(self)
389
389
390 def __len__(self):
390 def __len__(self):
391 return len(self.positions)
391 return len(self.positions)
392
392
393 def filtercopy(self, filterfn):
393 def filtercopy(self, filterfn):
394 # XXX should be optimized
394 # XXX should be optimized
395 c = _lazymanifest('')
395 c = _lazymanifest('')
396 for f, n, fl in self.iterentries():
396 for f, n, fl in self.iterentries():
397 if filterfn(f):
397 if filterfn(f):
398 c[f] = n, fl
398 c[f] = n, fl
399 return c
399 return c
400
400
401 try:
401 try:
402 _lazymanifest = parsers.lazymanifest
402 _lazymanifest = parsers.lazymanifest
403 except AttributeError:
403 except AttributeError:
404 pass
404 pass
405
405
406 class manifestdict(object):
406 class manifestdict(object):
407 def __init__(self, data=''):
407 def __init__(self, data=''):
408 if data.startswith('\0'):
408 if data.startswith('\0'):
409 #_lazymanifest can not parse v2
409 #_lazymanifest can not parse v2
410 self._lm = _lazymanifest('')
410 self._lm = _lazymanifest('')
411 for f, n, fl in _parsev2(data):
411 for f, n, fl in _parsev2(data):
412 self._lm[f] = n, fl
412 self._lm[f] = n, fl
413 else:
413 else:
414 self._lm = _lazymanifest(data)
414 self._lm = _lazymanifest(data)
415
415
416 def __getitem__(self, key):
416 def __getitem__(self, key):
417 return self._lm[key][0]
417 return self._lm[key][0]
418
418
419 def find(self, key):
419 def find(self, key):
420 return self._lm[key]
420 return self._lm[key]
421
421
422 def __len__(self):
422 def __len__(self):
423 return len(self._lm)
423 return len(self._lm)
424
424
425 def __nonzero__(self):
425 def __nonzero__(self):
426 # nonzero is covered by the __len__ function, but implementing it here
426 # nonzero is covered by the __len__ function, but implementing it here
427 # makes it easier for extensions to override.
427 # makes it easier for extensions to override.
428 return len(self._lm) != 0
428 return len(self._lm) != 0
429
429
430 def __setitem__(self, key, node):
430 def __setitem__(self, key, node):
431 self._lm[key] = node, self.flags(key, '')
431 self._lm[key] = node, self.flags(key, '')
432
432
433 def __contains__(self, key):
433 def __contains__(self, key):
434 return key in self._lm
434 return key in self._lm
435
435
436 def __delitem__(self, key):
436 def __delitem__(self, key):
437 del self._lm[key]
437 del self._lm[key]
438
438
439 def __iter__(self):
439 def __iter__(self):
440 return self._lm.__iter__()
440 return self._lm.__iter__()
441
441
442 def iterkeys(self):
442 def iterkeys(self):
443 return self._lm.iterkeys()
443 return self._lm.iterkeys()
444
444
445 def keys(self):
445 def keys(self):
446 return list(self.iterkeys())
446 return list(self.iterkeys())
447
447
448 def filesnotin(self, m2):
448 def filesnotin(self, m2):
449 '''Set of files in this manifest that are not in the other'''
449 '''Set of files in this manifest that are not in the other'''
450 diff = self.diff(m2)
450 diff = self.diff(m2)
451 files = set(filepath
451 files = set(filepath
452 for filepath, hashflags in diff.iteritems()
452 for filepath, hashflags in diff.iteritems()
453 if hashflags[1][0] is None)
453 if hashflags[1][0] is None)
454 return files
454 return files
455
455
456 @propertycache
456 @propertycache
457 def _dirs(self):
457 def _dirs(self):
458 return util.dirs(self)
458 return util.dirs(self)
459
459
460 def dirs(self):
460 def dirs(self):
461 return self._dirs
461 return self._dirs
462
462
463 def hasdir(self, dir):
463 def hasdir(self, dir):
464 return dir in self._dirs
464 return dir in self._dirs
465
465
466 def _filesfastpath(self, match):
466 def _filesfastpath(self, match):
467 '''Checks whether we can correctly and quickly iterate over matcher
467 '''Checks whether we can correctly and quickly iterate over matcher
468 files instead of over manifest files.'''
468 files instead of over manifest files.'''
469 files = match.files()
469 files = match.files()
470 return (len(files) < 100 and (match.isexact() or
470 return (len(files) < 100 and (match.isexact() or
471 (match.prefix() and all(fn in self for fn in files))))
471 (match.prefix() and all(fn in self for fn in files))))
472
472
473 def walk(self, match):
473 def walk(self, match):
474 '''Generates matching file names.
474 '''Generates matching file names.
475
475
476 Equivalent to manifest.matches(match).iterkeys(), but without creating
476 Equivalent to manifest.matches(match).iterkeys(), but without creating
477 an entirely new manifest.
477 an entirely new manifest.
478
478
479 It also reports nonexistent files by marking them bad with match.bad().
479 It also reports nonexistent files by marking them bad with match.bad().
480 '''
480 '''
481 if match.always():
481 if match.always():
482 for f in iter(self):
482 for f in iter(self):
483 yield f
483 yield f
484 return
484 return
485
485
486 fset = set(match.files())
486 fset = set(match.files())
487
487
488 # avoid the entire walk if we're only looking for specific files
488 # avoid the entire walk if we're only looking for specific files
489 if self._filesfastpath(match):
489 if self._filesfastpath(match):
490 for fn in sorted(fset):
490 for fn in sorted(fset):
491 yield fn
491 yield fn
492 return
492 return
493
493
494 for fn in self:
494 for fn in self:
495 if fn in fset:
495 if fn in fset:
496 # specified pattern is the exact name
496 # specified pattern is the exact name
497 fset.remove(fn)
497 fset.remove(fn)
498 if match(fn):
498 if match(fn):
499 yield fn
499 yield fn
500
500
501 # for dirstate.walk, files=['.'] means "walk the whole tree".
501 # for dirstate.walk, files=['.'] means "walk the whole tree".
502 # follow that here, too
502 # follow that here, too
503 fset.discard('.')
503 fset.discard('.')
504
504
505 for fn in sorted(fset):
505 for fn in sorted(fset):
506 if not self.hasdir(fn):
506 if not self.hasdir(fn):
507 match.bad(fn, None)
507 match.bad(fn, None)
508
508
509 def matches(self, match):
509 def matches(self, match):
510 '''generate a new manifest filtered by the match argument'''
510 '''generate a new manifest filtered by the match argument'''
511 if match.always():
511 if match.always():
512 return self.copy()
512 return self.copy()
513
513
514 if self._filesfastpath(match):
514 if self._filesfastpath(match):
515 m = manifestdict()
515 m = manifestdict()
516 lm = self._lm
516 lm = self._lm
517 for fn in match.files():
517 for fn in match.files():
518 if fn in lm:
518 if fn in lm:
519 m._lm[fn] = lm[fn]
519 m._lm[fn] = lm[fn]
520 return m
520 return m
521
521
522 m = manifestdict()
522 m = manifestdict()
523 m._lm = self._lm.filtercopy(match)
523 m._lm = self._lm.filtercopy(match)
524 return m
524 return m
525
525
526 def diff(self, m2, clean=False):
526 def diff(self, m2, clean=False):
527 '''Finds changes between the current manifest and m2.
527 '''Finds changes between the current manifest and m2.
528
528
529 Args:
529 Args:
530 m2: the manifest to which this manifest should be compared.
530 m2: the manifest to which this manifest should be compared.
531 clean: if true, include files unchanged between these manifests
531 clean: if true, include files unchanged between these manifests
532 with a None value in the returned dictionary.
532 with a None value in the returned dictionary.
533
533
534 The result is returned as a dict with filename as key and
534 The result is returned as a dict with filename as key and
535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
536 nodeid in the current/other manifest and fl1/fl2 is the flag
536 nodeid in the current/other manifest and fl1/fl2 is the flag
537 in the current/other manifest. Where the file does not exist,
537 in the current/other manifest. Where the file does not exist,
538 the nodeid will be None and the flags will be the empty
538 the nodeid will be None and the flags will be the empty
539 string.
539 string.
540 '''
540 '''
541 return self._lm.diff(m2._lm, clean)
541 return self._lm.diff(m2._lm, clean)
542
542
543 def setflag(self, key, flag):
543 def setflag(self, key, flag):
544 self._lm[key] = self[key], flag
544 self._lm[key] = self[key], flag
545
545
546 def get(self, key, default=None):
546 def get(self, key, default=None):
547 try:
547 try:
548 return self._lm[key][0]
548 return self._lm[key][0]
549 except KeyError:
549 except KeyError:
550 return default
550 return default
551
551
552 def flags(self, key, default=''):
552 def flags(self, key, default=''):
553 try:
553 try:
554 return self._lm[key][1]
554 return self._lm[key][1]
555 except KeyError:
555 except KeyError:
556 return default
556 return default
557
557
558 def copy(self):
558 def copy(self):
559 c = manifestdict()
559 c = manifestdict()
560 c._lm = self._lm.copy()
560 c._lm = self._lm.copy()
561 return c
561 return c
562
562
563 def iteritems(self):
563 def iteritems(self):
564 return (x[:2] for x in self._lm.iterentries())
564 return (x[:2] for x in self._lm.iterentries())
565
565
566 def iterentries(self):
566 def iterentries(self):
567 return self._lm.iterentries()
567 return self._lm.iterentries()
568
568
569 def text(self, usemanifestv2=False):
569 def text(self, usemanifestv2=False):
570 if usemanifestv2:
570 if usemanifestv2:
571 return _textv2(self._lm.iterentries())
571 return _textv2(self._lm.iterentries())
572 else:
572 else:
573 # use (probably) native version for v1
573 # use (probably) native version for v1
574 return self._lm.text()
574 return self._lm.text()
575
575
576 def fastdelta(self, base, changes):
576 def fastdelta(self, base, changes):
577 """Given a base manifest text as an array.array and a list of changes
577 """Given a base manifest text as an array.array and a list of changes
578 relative to that text, compute a delta that can be used by revlog.
578 relative to that text, compute a delta that can be used by revlog.
579 """
579 """
580 delta = []
580 delta = []
581 dstart = None
581 dstart = None
582 dend = None
582 dend = None
583 dline = [""]
583 dline = [""]
584 start = 0
584 start = 0
585 # zero copy representation of base as a buffer
585 # zero copy representation of base as a buffer
586 addbuf = util.buffer(base)
586 addbuf = util.buffer(base)
587
587
588 changes = list(changes)
588 changes = list(changes)
589 if len(changes) < 1000:
589 if len(changes) < 1000:
590 # start with a readonly loop that finds the offset of
590 # start with a readonly loop that finds the offset of
591 # each line and creates the deltas
591 # each line and creates the deltas
592 for f, todelete in changes:
592 for f, todelete in changes:
593 # bs will either be the index of the item or the insert point
593 # bs will either be the index of the item or the insert point
594 start, end = _msearch(addbuf, f, start)
594 start, end = _msearch(addbuf, f, start)
595 if not todelete:
595 if not todelete:
596 h, fl = self._lm[f]
596 h, fl = self._lm[f]
597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
598 else:
598 else:
599 if start == end:
599 if start == end:
600 # item we want to delete was not found, error out
600 # item we want to delete was not found, error out
601 raise AssertionError(
601 raise AssertionError(
602 _("failed to remove %s from manifest") % f)
602 _("failed to remove %s from manifest") % f)
603 l = ""
603 l = ""
604 if dstart is not None and dstart <= start and dend >= start:
604 if dstart is not None and dstart <= start and dend >= start:
605 if dend < end:
605 if dend < end:
606 dend = end
606 dend = end
607 if l:
607 if l:
608 dline.append(l)
608 dline.append(l)
609 else:
609 else:
610 if dstart is not None:
610 if dstart is not None:
611 delta.append([dstart, dend, "".join(dline)])
611 delta.append([dstart, dend, "".join(dline)])
612 dstart = start
612 dstart = start
613 dend = end
613 dend = end
614 dline = [l]
614 dline = [l]
615
615
616 if dstart is not None:
616 if dstart is not None:
617 delta.append([dstart, dend, "".join(dline)])
617 delta.append([dstart, dend, "".join(dline)])
618 # apply the delta to the base, and get a delta for addrevision
618 # apply the delta to the base, and get a delta for addrevision
619 deltatext, arraytext = _addlistdelta(base, delta)
619 deltatext, arraytext = _addlistdelta(base, delta)
620 else:
620 else:
621 # For large changes, it's much cheaper to just build the text and
621 # For large changes, it's much cheaper to just build the text and
622 # diff it.
622 # diff it.
623 arraytext = array.array('c', self.text())
623 arraytext = array.array('c', self.text())
624 deltatext = mdiff.textdiff(base, arraytext)
624 deltatext = mdiff.textdiff(base, arraytext)
625
625
626 return arraytext, deltatext
626 return arraytext, deltatext
627
627
628 def _msearch(m, s, lo=0, hi=None):
628 def _msearch(m, s, lo=0, hi=None):
629 '''return a tuple (start, end) that says where to find s within m.
629 '''return a tuple (start, end) that says where to find s within m.
630
630
631 If the string is found m[start:end] are the line containing
631 If the string is found m[start:end] are the line containing
632 that string. If start == end the string was not found and
632 that string. If start == end the string was not found and
633 they indicate the proper sorted insertion point.
633 they indicate the proper sorted insertion point.
634
634
635 m should be a buffer or a string
635 m should be a buffer or a string
636 s is a string'''
636 s is a string'''
637 def advance(i, c):
637 def advance(i, c):
638 while i < lenm and m[i] != c:
638 while i < lenm and m[i] != c:
639 i += 1
639 i += 1
640 return i
640 return i
641 if not s:
641 if not s:
642 return (lo, lo)
642 return (lo, lo)
643 lenm = len(m)
643 lenm = len(m)
644 if not hi:
644 if not hi:
645 hi = lenm
645 hi = lenm
646 while lo < hi:
646 while lo < hi:
647 mid = (lo + hi) // 2
647 mid = (lo + hi) // 2
648 start = mid
648 start = mid
649 while start > 0 and m[start - 1] != '\n':
649 while start > 0 and m[start - 1] != '\n':
650 start -= 1
650 start -= 1
651 end = advance(start, '\0')
651 end = advance(start, '\0')
652 if m[start:end] < s:
652 if m[start:end] < s:
653 # we know that after the null there are 40 bytes of sha1
653 # we know that after the null there are 40 bytes of sha1
654 # this translates to the bisect lo = mid + 1
654 # this translates to the bisect lo = mid + 1
655 lo = advance(end + 40, '\n') + 1
655 lo = advance(end + 40, '\n') + 1
656 else:
656 else:
657 # this translates to the bisect hi = mid
657 # this translates to the bisect hi = mid
658 hi = start
658 hi = start
659 end = advance(lo, '\0')
659 end = advance(lo, '\0')
660 found = m[lo:end]
660 found = m[lo:end]
661 if s == found:
661 if s == found:
662 # we know that after the null there are 40 bytes of sha1
662 # we know that after the null there are 40 bytes of sha1
663 end = advance(end + 40, '\n')
663 end = advance(end + 40, '\n')
664 return (lo, end + 1)
664 return (lo, end + 1)
665 else:
665 else:
666 return (lo, lo)
666 return (lo, lo)
667
667
668 def _checkforbidden(l):
668 def _checkforbidden(l):
669 """Check filenames for illegal characters."""
669 """Check filenames for illegal characters."""
670 for f in l:
670 for f in l:
671 if '\n' in f or '\r' in f:
671 if '\n' in f or '\r' in f:
672 raise error.RevlogError(
672 raise error.RevlogError(
673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
674
674
675
675
676 # apply the changes collected during the bisect loop to our addlist
676 # apply the changes collected during the bisect loop to our addlist
677 # return a delta suitable for addrevision
677 # return a delta suitable for addrevision
678 def _addlistdelta(addlist, x):
678 def _addlistdelta(addlist, x):
679 # for large addlist arrays, building a new array is cheaper
679 # for large addlist arrays, building a new array is cheaper
680 # than repeatedly modifying the existing one
680 # than repeatedly modifying the existing one
681 currentposition = 0
681 currentposition = 0
682 newaddlist = array.array('c')
682 newaddlist = array.array('c')
683
683
684 for start, end, content in x:
684 for start, end, content in x:
685 newaddlist += addlist[currentposition:start]
685 newaddlist += addlist[currentposition:start]
686 if content:
686 if content:
687 newaddlist += array.array('c', content)
687 newaddlist += array.array('c', content)
688
688
689 currentposition = end
689 currentposition = end
690
690
691 newaddlist += addlist[currentposition:]
691 newaddlist += addlist[currentposition:]
692
692
693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
694 + content for start, end, content in x)
694 + content for start, end, content in x)
695 return deltatext, newaddlist
695 return deltatext, newaddlist
696
696
697 def _splittopdir(f):
697 def _splittopdir(f):
698 if '/' in f:
698 if '/' in f:
699 dir, subpath = f.split('/', 1)
699 dir, subpath = f.split('/', 1)
700 return dir + '/', subpath
700 return dir + '/', subpath
701 else:
701 else:
702 return '', f
702 return '', f
703
703
704 _noop = lambda s: None
704 _noop = lambda s: None
705
705
706 class treemanifest(object):
706 class treemanifest(object):
707 def __init__(self, dir='', text=''):
707 def __init__(self, dir='', text=''):
708 self._dir = dir
708 self._dir = dir
709 self._node = revlog.nullid
709 self._node = revlog.nullid
710 self._loadfunc = _noop
710 self._loadfunc = _noop
711 self._copyfunc = _noop
711 self._copyfunc = _noop
712 self._dirty = False
712 self._dirty = False
713 self._dirs = {}
713 self._dirs = {}
714 # Using _lazymanifest here is a little slower than plain old dicts
714 # Using _lazymanifest here is a little slower than plain old dicts
715 self._files = {}
715 self._files = {}
716 self._flags = {}
716 self._flags = {}
717 if text:
717 if text:
718 def readsubtree(subdir, subm):
718 def readsubtree(subdir, subm):
719 raise AssertionError('treemanifest constructor only accepts '
719 raise AssertionError('treemanifest constructor only accepts '
720 'flat manifests')
720 'flat manifests')
721 self.parse(text, readsubtree)
721 self.parse(text, readsubtree)
722 self._dirty = True # Mark flat manifest dirty after parsing
722 self._dirty = True # Mark flat manifest dirty after parsing
723
723
724 def _subpath(self, path):
724 def _subpath(self, path):
725 return self._dir + path
725 return self._dir + path
726
726
727 def __len__(self):
727 def __len__(self):
728 self._load()
728 self._load()
729 size = len(self._files)
729 size = len(self._files)
730 for m in self._dirs.values():
730 for m in self._dirs.values():
731 size += m.__len__()
731 size += m.__len__()
732 return size
732 return size
733
733
734 def _isempty(self):
734 def _isempty(self):
735 self._load() # for consistency; already loaded by all callers
735 self._load() # for consistency; already loaded by all callers
736 return (not self._files and (not self._dirs or
736 return (not self._files and (not self._dirs or
737 all(m._isempty() for m in self._dirs.values())))
737 all(m._isempty() for m in self._dirs.values())))
738
738
739 def __repr__(self):
739 def __repr__(self):
740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
741 (self._dir, revlog.hex(self._node),
741 (self._dir, revlog.hex(self._node),
742 bool(self._loadfunc is _noop),
742 bool(self._loadfunc is _noop),
743 self._dirty, id(self)))
743 self._dirty, id(self)))
744
744
745 def dir(self):
745 def dir(self):
746 '''The directory that this tree manifest represents, including a
746 '''The directory that this tree manifest represents, including a
747 trailing '/'. Empty string for the repo root directory.'''
747 trailing '/'. Empty string for the repo root directory.'''
748 return self._dir
748 return self._dir
749
749
750 def node(self):
750 def node(self):
751 '''This node of this instance. nullid for unsaved instances. Should
751 '''This node of this instance. nullid for unsaved instances. Should
752 be updated when the instance is read or written from a revlog.
752 be updated when the instance is read or written from a revlog.
753 '''
753 '''
754 assert not self._dirty
754 assert not self._dirty
755 return self._node
755 return self._node
756
756
757 def setnode(self, node):
757 def setnode(self, node):
758 self._node = node
758 self._node = node
759 self._dirty = False
759 self._dirty = False
760
760
761 def iterentries(self):
761 def iterentries(self):
762 self._load()
762 self._load()
763 for p, n in sorted(self._dirs.items() + self._files.items()):
763 for p, n in sorted(self._dirs.items() + self._files.items()):
764 if p in self._files:
764 if p in self._files:
765 yield self._subpath(p), n, self._flags.get(p, '')
765 yield self._subpath(p), n, self._flags.get(p, '')
766 else:
766 else:
767 for x in n.iterentries():
767 for x in n.iterentries():
768 yield x
768 yield x
769
769
770 def iteritems(self):
770 def iteritems(self):
771 self._load()
771 self._load()
772 for p, n in sorted(self._dirs.items() + self._files.items()):
772 for p, n in sorted(self._dirs.items() + self._files.items()):
773 if p in self._files:
773 if p in self._files:
774 yield self._subpath(p), n
774 yield self._subpath(p), n
775 else:
775 else:
776 for f, sn in n.iteritems():
776 for f, sn in n.iteritems():
777 yield f, sn
777 yield f, sn
778
778
779 def iterkeys(self):
779 def iterkeys(self):
780 self._load()
780 self._load()
781 for p in sorted(self._dirs.keys() + self._files.keys()):
781 for p in sorted(self._dirs.keys() + self._files.keys()):
782 if p in self._files:
782 if p in self._files:
783 yield self._subpath(p)
783 yield self._subpath(p)
784 else:
784 else:
785 for f in self._dirs[p].iterkeys():
785 for f in self._dirs[p].iterkeys():
786 yield f
786 yield f
787
787
788 def keys(self):
788 def keys(self):
789 return list(self.iterkeys())
789 return list(self.iterkeys())
790
790
791 def __iter__(self):
791 def __iter__(self):
792 return self.iterkeys()
792 return self.iterkeys()
793
793
794 def __contains__(self, f):
794 def __contains__(self, f):
795 if f is None:
795 if f is None:
796 return False
796 return False
797 self._load()
797 self._load()
798 dir, subpath = _splittopdir(f)
798 dir, subpath = _splittopdir(f)
799 if dir:
799 if dir:
800 if dir not in self._dirs:
800 if dir not in self._dirs:
801 return False
801 return False
802 return self._dirs[dir].__contains__(subpath)
802 return self._dirs[dir].__contains__(subpath)
803 else:
803 else:
804 return f in self._files
804 return f in self._files
805
805
806 def get(self, f, default=None):
806 def get(self, f, default=None):
807 self._load()
807 self._load()
808 dir, subpath = _splittopdir(f)
808 dir, subpath = _splittopdir(f)
809 if dir:
809 if dir:
810 if dir not in self._dirs:
810 if dir not in self._dirs:
811 return default
811 return default
812 return self._dirs[dir].get(subpath, default)
812 return self._dirs[dir].get(subpath, default)
813 else:
813 else:
814 return self._files.get(f, default)
814 return self._files.get(f, default)
815
815
816 def __getitem__(self, f):
816 def __getitem__(self, f):
817 self._load()
817 self._load()
818 dir, subpath = _splittopdir(f)
818 dir, subpath = _splittopdir(f)
819 if dir:
819 if dir:
820 return self._dirs[dir].__getitem__(subpath)
820 return self._dirs[dir].__getitem__(subpath)
821 else:
821 else:
822 return self._files[f]
822 return self._files[f]
823
823
824 def flags(self, f):
824 def flags(self, f):
825 self._load()
825 self._load()
826 dir, subpath = _splittopdir(f)
826 dir, subpath = _splittopdir(f)
827 if dir:
827 if dir:
828 if dir not in self._dirs:
828 if dir not in self._dirs:
829 return ''
829 return ''
830 return self._dirs[dir].flags(subpath)
830 return self._dirs[dir].flags(subpath)
831 else:
831 else:
832 if f in self._dirs:
832 if f in self._dirs:
833 return ''
833 return ''
834 return self._flags.get(f, '')
834 return self._flags.get(f, '')
835
835
836 def find(self, f):
836 def find(self, f):
837 self._load()
837 self._load()
838 dir, subpath = _splittopdir(f)
838 dir, subpath = _splittopdir(f)
839 if dir:
839 if dir:
840 return self._dirs[dir].find(subpath)
840 return self._dirs[dir].find(subpath)
841 else:
841 else:
842 return self._files[f], self._flags.get(f, '')
842 return self._files[f], self._flags.get(f, '')
843
843
844 def __delitem__(self, f):
844 def __delitem__(self, f):
845 self._load()
845 self._load()
846 dir, subpath = _splittopdir(f)
846 dir, subpath = _splittopdir(f)
847 if dir:
847 if dir:
848 self._dirs[dir].__delitem__(subpath)
848 self._dirs[dir].__delitem__(subpath)
849 # If the directory is now empty, remove it
849 # If the directory is now empty, remove it
850 if self._dirs[dir]._isempty():
850 if self._dirs[dir]._isempty():
851 del self._dirs[dir]
851 del self._dirs[dir]
852 else:
852 else:
853 del self._files[f]
853 del self._files[f]
854 if f in self._flags:
854 if f in self._flags:
855 del self._flags[f]
855 del self._flags[f]
856 self._dirty = True
856 self._dirty = True
857
857
858 def __setitem__(self, f, n):
858 def __setitem__(self, f, n):
859 assert n is not None
859 assert n is not None
860 self._load()
860 self._load()
861 dir, subpath = _splittopdir(f)
861 dir, subpath = _splittopdir(f)
862 if dir:
862 if dir:
863 if dir not in self._dirs:
863 if dir not in self._dirs:
864 self._dirs[dir] = treemanifest(self._subpath(dir))
864 self._dirs[dir] = treemanifest(self._subpath(dir))
865 self._dirs[dir].__setitem__(subpath, n)
865 self._dirs[dir].__setitem__(subpath, n)
866 else:
866 else:
867 self._files[f] = n[:21] # to match manifestdict's behavior
867 self._files[f] = n[:21] # to match manifestdict's behavior
868 self._dirty = True
868 self._dirty = True
869
869
870 def _load(self):
870 def _load(self):
871 if self._loadfunc is not _noop:
871 if self._loadfunc is not _noop:
872 lf, self._loadfunc = self._loadfunc, _noop
872 lf, self._loadfunc = self._loadfunc, _noop
873 lf(self)
873 lf(self)
874 elif self._copyfunc is not _noop:
874 elif self._copyfunc is not _noop:
875 cf, self._copyfunc = self._copyfunc, _noop
875 cf, self._copyfunc = self._copyfunc, _noop
876 cf(self)
876 cf(self)
877
877
878 def setflag(self, f, flags):
878 def setflag(self, f, flags):
879 """Set the flags (symlink, executable) for path f."""
879 """Set the flags (symlink, executable) for path f."""
880 self._load()
880 self._load()
881 dir, subpath = _splittopdir(f)
881 dir, subpath = _splittopdir(f)
882 if dir:
882 if dir:
883 if dir not in self._dirs:
883 if dir not in self._dirs:
884 self._dirs[dir] = treemanifest(self._subpath(dir))
884 self._dirs[dir] = treemanifest(self._subpath(dir))
885 self._dirs[dir].setflag(subpath, flags)
885 self._dirs[dir].setflag(subpath, flags)
886 else:
886 else:
887 self._flags[f] = flags
887 self._flags[f] = flags
888 self._dirty = True
888 self._dirty = True
889
889
890 def copy(self):
890 def copy(self):
891 copy = treemanifest(self._dir)
891 copy = treemanifest(self._dir)
892 copy._node = self._node
892 copy._node = self._node
893 copy._dirty = self._dirty
893 copy._dirty = self._dirty
894 if self._copyfunc is _noop:
894 if self._copyfunc is _noop:
895 def _copyfunc(s):
895 def _copyfunc(s):
896 self._load()
896 self._load()
897 for d in self._dirs:
897 for d in self._dirs:
898 s._dirs[d] = self._dirs[d].copy()
898 s._dirs[d] = self._dirs[d].copy()
899 s._files = dict.copy(self._files)
899 s._files = dict.copy(self._files)
900 s._flags = dict.copy(self._flags)
900 s._flags = dict.copy(self._flags)
901 if self._loadfunc is _noop:
901 if self._loadfunc is _noop:
902 _copyfunc(copy)
902 _copyfunc(copy)
903 else:
903 else:
904 copy._copyfunc = _copyfunc
904 copy._copyfunc = _copyfunc
905 else:
905 else:
906 copy._copyfunc = self._copyfunc
906 copy._copyfunc = self._copyfunc
907 return copy
907 return copy
908
908
909 def filesnotin(self, m2):
909 def filesnotin(self, m2):
910 '''Set of files in this manifest that are not in the other'''
910 '''Set of files in this manifest that are not in the other'''
911 files = set()
911 files = set()
912 def _filesnotin(t1, t2):
912 def _filesnotin(t1, t2):
913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
914 return
914 return
915 t1._load()
915 t1._load()
916 t2._load()
916 t2._load()
917 for d, m1 in t1._dirs.iteritems():
917 for d, m1 in t1._dirs.iteritems():
918 if d in t2._dirs:
918 if d in t2._dirs:
919 m2 = t2._dirs[d]
919 m2 = t2._dirs[d]
920 _filesnotin(m1, m2)
920 _filesnotin(m1, m2)
921 else:
921 else:
922 files.update(m1.iterkeys())
922 files.update(m1.iterkeys())
923
923
924 for fn in t1._files.iterkeys():
924 for fn in t1._files.iterkeys():
925 if fn not in t2._files:
925 if fn not in t2._files:
926 files.add(t1._subpath(fn))
926 files.add(t1._subpath(fn))
927
927
928 _filesnotin(self, m2)
928 _filesnotin(self, m2)
929 return files
929 return files
930
930
931 @propertycache
931 @propertycache
932 def _alldirs(self):
932 def _alldirs(self):
933 return util.dirs(self)
933 return util.dirs(self)
934
934
935 def dirs(self):
935 def dirs(self):
936 return self._alldirs
936 return self._alldirs
937
937
938 def hasdir(self, dir):
938 def hasdir(self, dir):
939 self._load()
939 self._load()
940 topdir, subdir = _splittopdir(dir)
940 topdir, subdir = _splittopdir(dir)
941 if topdir:
941 if topdir:
942 if topdir in self._dirs:
942 if topdir in self._dirs:
943 return self._dirs[topdir].hasdir(subdir)
943 return self._dirs[topdir].hasdir(subdir)
944 return False
944 return False
945 return (dir + '/') in self._dirs
945 return (dir + '/') in self._dirs
946
946
947 def walk(self, match):
947 def walk(self, match):
948 '''Generates matching file names.
948 '''Generates matching file names.
949
949
950 Equivalent to manifest.matches(match).iterkeys(), but without creating
950 Equivalent to manifest.matches(match).iterkeys(), but without creating
951 an entirely new manifest.
951 an entirely new manifest.
952
952
953 It also reports nonexistent files by marking them bad with match.bad().
953 It also reports nonexistent files by marking them bad with match.bad().
954 '''
954 '''
955 if match.always():
955 if match.always():
956 for f in iter(self):
956 for f in iter(self):
957 yield f
957 yield f
958 return
958 return
959
959
960 fset = set(match.files())
960 fset = set(match.files())
961
961
962 for fn in self._walk(match):
962 for fn in self._walk(match):
963 if fn in fset:
963 if fn in fset:
964 # specified pattern is the exact name
964 # specified pattern is the exact name
965 fset.remove(fn)
965 fset.remove(fn)
966 yield fn
966 yield fn
967
967
968 # for dirstate.walk, files=['.'] means "walk the whole tree".
968 # for dirstate.walk, files=['.'] means "walk the whole tree".
969 # follow that here, too
969 # follow that here, too
970 fset.discard('.')
970 fset.discard('.')
971
971
972 for fn in sorted(fset):
972 for fn in sorted(fset):
973 if not self.hasdir(fn):
973 if not self.hasdir(fn):
974 match.bad(fn, None)
974 match.bad(fn, None)
975
975
976 def _walk(self, match):
976 def _walk(self, match):
977 '''Recursively generates matching file names for walk().'''
977 '''Recursively generates matching file names for walk().'''
978 if not match.visitdir(self._dir[:-1] or '.'):
978 if not match.visitdir(self._dir[:-1] or '.'):
979 return
979 return
980
980
981 # yield this dir's files and walk its submanifests
981 # yield this dir's files and walk its submanifests
982 self._load()
982 self._load()
983 for p in sorted(self._dirs.keys() + self._files.keys()):
983 for p in sorted(self._dirs.keys() + self._files.keys()):
984 if p in self._files:
984 if p in self._files:
985 fullp = self._subpath(p)
985 fullp = self._subpath(p)
986 if match(fullp):
986 if match(fullp):
987 yield fullp
987 yield fullp
988 else:
988 else:
989 for f in self._dirs[p]._walk(match):
989 for f in self._dirs[p]._walk(match):
990 yield f
990 yield f
991
991
992 def matches(self, match):
992 def matches(self, match):
993 '''generate a new manifest filtered by the match argument'''
993 '''generate a new manifest filtered by the match argument'''
994 if match.always():
994 if match.always():
995 return self.copy()
995 return self.copy()
996
996
997 return self._matches(match)
997 return self._matches(match)
998
998
999 def _matches(self, match):
999 def _matches(self, match):
1000 '''recursively generate a new manifest filtered by the match argument.
1000 '''recursively generate a new manifest filtered by the match argument.
1001 '''
1001 '''
1002
1002
1003 visit = match.visitdir(self._dir[:-1] or '.')
1003 visit = match.visitdir(self._dir[:-1] or '.')
1004 if visit == 'all':
1004 if visit == 'all':
1005 return self.copy()
1005 return self.copy()
1006 ret = treemanifest(self._dir)
1006 ret = treemanifest(self._dir)
1007 if not visit:
1007 if not visit:
1008 return ret
1008 return ret
1009
1009
1010 self._load()
1010 self._load()
1011 for fn in self._files:
1011 for fn in self._files:
1012 fullp = self._subpath(fn)
1012 fullp = self._subpath(fn)
1013 if not match(fullp):
1013 if not match(fullp):
1014 continue
1014 continue
1015 ret._files[fn] = self._files[fn]
1015 ret._files[fn] = self._files[fn]
1016 if fn in self._flags:
1016 if fn in self._flags:
1017 ret._flags[fn] = self._flags[fn]
1017 ret._flags[fn] = self._flags[fn]
1018
1018
1019 for dir, subm in self._dirs.iteritems():
1019 for dir, subm in self._dirs.iteritems():
1020 m = subm._matches(match)
1020 m = subm._matches(match)
1021 if not m._isempty():
1021 if not m._isempty():
1022 ret._dirs[dir] = m
1022 ret._dirs[dir] = m
1023
1023
1024 if not ret._isempty():
1024 if not ret._isempty():
1025 ret._dirty = True
1025 ret._dirty = True
1026 return ret
1026 return ret
1027
1027
1028 def diff(self, m2, clean=False):
1028 def diff(self, m2, clean=False):
1029 '''Finds changes between the current manifest and m2.
1029 '''Finds changes between the current manifest and m2.
1030
1030
1031 Args:
1031 Args:
1032 m2: the manifest to which this manifest should be compared.
1032 m2: the manifest to which this manifest should be compared.
1033 clean: if true, include files unchanged between these manifests
1033 clean: if true, include files unchanged between these manifests
1034 with a None value in the returned dictionary.
1034 with a None value in the returned dictionary.
1035
1035
1036 The result is returned as a dict with filename as key and
1036 The result is returned as a dict with filename as key and
1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1039 in the current/other manifest. Where the file does not exist,
1039 in the current/other manifest. Where the file does not exist,
1040 the nodeid will be None and the flags will be the empty
1040 the nodeid will be None and the flags will be the empty
1041 string.
1041 string.
1042 '''
1042 '''
1043 result = {}
1043 result = {}
1044 emptytree = treemanifest()
1044 emptytree = treemanifest()
1045 def _diff(t1, t2):
1045 def _diff(t1, t2):
1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1047 return
1047 return
1048 t1._load()
1048 t1._load()
1049 t2._load()
1049 t2._load()
1050 for d, m1 in t1._dirs.iteritems():
1050 for d, m1 in t1._dirs.iteritems():
1051 m2 = t2._dirs.get(d, emptytree)
1051 m2 = t2._dirs.get(d, emptytree)
1052 _diff(m1, m2)
1052 _diff(m1, m2)
1053
1053
1054 for d, m2 in t2._dirs.iteritems():
1054 for d, m2 in t2._dirs.iteritems():
1055 if d not in t1._dirs:
1055 if d not in t1._dirs:
1056 _diff(emptytree, m2)
1056 _diff(emptytree, m2)
1057
1057
1058 for fn, n1 in t1._files.iteritems():
1058 for fn, n1 in t1._files.iteritems():
1059 fl1 = t1._flags.get(fn, '')
1059 fl1 = t1._flags.get(fn, '')
1060 n2 = t2._files.get(fn, None)
1060 n2 = t2._files.get(fn, None)
1061 fl2 = t2._flags.get(fn, '')
1061 fl2 = t2._flags.get(fn, '')
1062 if n1 != n2 or fl1 != fl2:
1062 if n1 != n2 or fl1 != fl2:
1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1064 elif clean:
1064 elif clean:
1065 result[t1._subpath(fn)] = None
1065 result[t1._subpath(fn)] = None
1066
1066
1067 for fn, n2 in t2._files.iteritems():
1067 for fn, n2 in t2._files.iteritems():
1068 if fn not in t1._files:
1068 if fn not in t1._files:
1069 fl2 = t2._flags.get(fn, '')
1069 fl2 = t2._flags.get(fn, '')
1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1071
1071
1072 _diff(self, m2)
1072 _diff(self, m2)
1073 return result
1073 return result
1074
1074
1075 def unmodifiedsince(self, m2):
1075 def unmodifiedsince(self, m2):
1076 return not self._dirty and not m2._dirty and self._node == m2._node
1076 return not self._dirty and not m2._dirty and self._node == m2._node
1077
1077
1078 def parse(self, text, readsubtree):
1078 def parse(self, text, readsubtree):
1079 for f, n, fl in _parse(text):
1079 for f, n, fl in _parse(text):
1080 if fl == 't':
1080 if fl == 't':
1081 f = f + '/'
1081 f = f + '/'
1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1083 elif '/' in f:
1083 elif '/' in f:
1084 # This is a flat manifest, so use __setitem__ and setflag rather
1084 # This is a flat manifest, so use __setitem__ and setflag rather
1085 # than assigning directly to _files and _flags, so we can
1085 # than assigning directly to _files and _flags, so we can
1086 # assign a path in a subdirectory, and to mark dirty (compared
1086 # assign a path in a subdirectory, and to mark dirty (compared
1087 # to nullid).
1087 # to nullid).
1088 self[f] = n
1088 self[f] = n
1089 if fl:
1089 if fl:
1090 self.setflag(f, fl)
1090 self.setflag(f, fl)
1091 else:
1091 else:
1092 # Assigning to _files and _flags avoids marking as dirty,
1092 # Assigning to _files and _flags avoids marking as dirty,
1093 # and should be a little faster.
1093 # and should be a little faster.
1094 self._files[f] = n
1094 self._files[f] = n
1095 if fl:
1095 if fl:
1096 self._flags[f] = fl
1096 self._flags[f] = fl
1097
1097
1098 def text(self, usemanifestv2=False):
1098 def text(self, usemanifestv2=False):
1099 """Get the full data of this manifest as a bytestring."""
1099 """Get the full data of this manifest as a bytestring."""
1100 self._load()
1100 self._load()
1101 return _text(self.iterentries(), usemanifestv2)
1101 return _text(self.iterentries(), usemanifestv2)
1102
1102
1103 def dirtext(self, usemanifestv2=False):
1103 def dirtext(self, usemanifestv2=False):
1104 """Get the full data of this directory as a bytestring. Make sure that
1104 """Get the full data of this directory as a bytestring. Make sure that
1105 any submanifests have been written first, so their nodeids are correct.
1105 any submanifests have been written first, so their nodeids are correct.
1106 """
1106 """
1107 self._load()
1107 self._load()
1108 flags = self.flags
1108 flags = self.flags
1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1111 return _text(sorted(dirs + files), usemanifestv2)
1111 return _text(sorted(dirs + files), usemanifestv2)
1112
1112
1113 def read(self, gettext, readsubtree):
1113 def read(self, gettext, readsubtree):
1114 def _load_for_read(s):
1114 def _load_for_read(s):
1115 s.parse(gettext(), readsubtree)
1115 s.parse(gettext(), readsubtree)
1116 s._dirty = False
1116 s._dirty = False
1117 self._loadfunc = _load_for_read
1117 self._loadfunc = _load_for_read
1118
1118
1119 def writesubtrees(self, m1, m2, writesubtree):
1119 def writesubtrees(self, m1, m2, writesubtree):
1120 self._load() # for consistency; should never have any effect here
1120 self._load() # for consistency; should never have any effect here
1121 m1._load()
1121 m1._load()
1122 m2._load()
1122 m2._load()
1123 emptytree = treemanifest()
1123 emptytree = treemanifest()
1124 for d, subm in self._dirs.iteritems():
1124 for d, subm in self._dirs.iteritems():
1125 subp1 = m1._dirs.get(d, emptytree)._node
1125 subp1 = m1._dirs.get(d, emptytree)._node
1126 subp2 = m2._dirs.get(d, emptytree)._node
1126 subp2 = m2._dirs.get(d, emptytree)._node
1127 if subp1 == revlog.nullid:
1127 if subp1 == revlog.nullid:
1128 subp1, subp2 = subp2, subp1
1128 subp1, subp2 = subp2, subp1
1129 writesubtree(subm, subp1, subp2)
1129 writesubtree(subm, subp1, subp2)
1130
1130
1131 class manifestrevlog(revlog.revlog):
1131 class manifestrevlog(revlog.revlog):
1132 '''A revlog that stores manifest texts. This is responsible for caching the
1132 '''A revlog that stores manifest texts. This is responsible for caching the
1133 full-text manifest contents.
1133 full-text manifest contents.
1134 '''
1134 '''
1135 def __init__(self, opener, dir='', dirlogcache=None):
1135 def __init__(self, opener, dir='', dirlogcache=None):
1136 # During normal operations, we expect to deal with not more than four
1136 # During normal operations, we expect to deal with not more than four
1137 # revs at a time (such as during commit --amend). When rebasing large
1137 # revs at a time (such as during commit --amend). When rebasing large
1138 # stacks of commits, the number can go up, hence the config knob below.
1138 # stacks of commits, the number can go up, hence the config knob below.
1139 cachesize = 4
1139 cachesize = 4
1140 usetreemanifest = False
1140 usetreemanifest = False
1141 usemanifestv2 = False
1141 usemanifestv2 = False
1142 opts = getattr(opener, 'options', None)
1142 opts = getattr(opener, 'options', None)
1143 if opts is not None:
1143 if opts is not None:
1144 cachesize = opts.get('manifestcachesize', cachesize)
1144 cachesize = opts.get('manifestcachesize', cachesize)
1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1147
1147
1148 self._treeondisk = usetreemanifest
1148 self._treeondisk = usetreemanifest
1149 self._usemanifestv2 = usemanifestv2
1149 self._usemanifestv2 = usemanifestv2
1150
1150
1151 self._fulltextcache = util.lrucachedict(cachesize)
1151 self._fulltextcache = util.lrucachedict(cachesize)
1152
1152
1153 indexfile = "00manifest.i"
1153 indexfile = "00manifest.i"
1154 if dir:
1154 if dir:
1155 assert self._treeondisk, 'opts is %r' % opts
1155 assert self._treeondisk, 'opts is %r' % opts
1156 if not dir.endswith('/'):
1156 if not dir.endswith('/'):
1157 dir = dir + '/'
1157 dir = dir + '/'
1158 indexfile = "meta/" + dir + "00manifest.i"
1158 indexfile = "meta/" + dir + "00manifest.i"
1159 self._dir = dir
1159 self._dir = dir
1160 # The dirlogcache is kept on the root manifest log
1160 # The dirlogcache is kept on the root manifest log
1161 if dir:
1161 if dir:
1162 self._dirlogcache = dirlogcache
1162 self._dirlogcache = dirlogcache
1163 else:
1163 else:
1164 self._dirlogcache = {'': self}
1164 self._dirlogcache = {'': self}
1165
1165
1166 super(manifestrevlog, self).__init__(opener, indexfile,
1166 super(manifestrevlog, self).__init__(opener, indexfile,
1167 checkambig=bool(dir))
1167 checkambig=bool(dir))
1168
1168
1169 @property
1169 @property
1170 def fulltextcache(self):
1170 def fulltextcache(self):
1171 return self._fulltextcache
1171 return self._fulltextcache
1172
1172
1173 def clearcaches(self):
1173 def clearcaches(self):
1174 super(manifestrevlog, self).clearcaches()
1174 super(manifestrevlog, self).clearcaches()
1175 self._fulltextcache.clear()
1175 self._fulltextcache.clear()
1176 self._dirlogcache = {'': self}
1176 self._dirlogcache = {'': self}
1177
1177
1178 def dirlog(self, dir):
1178 def dirlog(self, dir):
1179 if dir:
1179 if dir:
1180 assert self._treeondisk
1180 assert self._treeondisk
1181 if dir not in self._dirlogcache:
1181 if dir not in self._dirlogcache:
1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1183 self._dirlogcache)
1183 self._dirlogcache)
1184 return self._dirlogcache[dir]
1184 return self._dirlogcache[dir]
1185
1185
1186 def add(self, m, transaction, link, p1, p2, added, removed):
1186 def add(self, m, transaction, link, p1, p2, added, removed):
1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1188 and not self._usemanifestv2):
1188 and not self._usemanifestv2):
1189 # If our first parent is in the manifest cache, we can
1189 # If our first parent is in the manifest cache, we can
1190 # compute a delta here using properties we know about the
1190 # compute a delta here using properties we know about the
1191 # manifest up-front, which may save time later for the
1191 # manifest up-front, which may save time later for the
1192 # revlog layer.
1192 # revlog layer.
1193
1193
1194 _checkforbidden(added)
1194 _checkforbidden(added)
1195 # combine the changed lists into one sorted iterator
1195 # combine the changed lists into one sorted iterator
1196 work = heapq.merge([(x, False) for x in added],
1196 work = heapq.merge([(x, False) for x in added],
1197 [(x, True) for x in removed])
1197 [(x, True) for x in removed])
1198
1198
1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1200 cachedelta = self.rev(p1), deltatext
1200 cachedelta = self.rev(p1), deltatext
1201 text = util.buffer(arraytext)
1201 text = util.buffer(arraytext)
1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1203 else:
1203 else:
1204 # The first parent manifest isn't already loaded, so we'll
1204 # The first parent manifest isn't already loaded, so we'll
1205 # just encode a fulltext of the manifest and pass that
1205 # just encode a fulltext of the manifest and pass that
1206 # through to the revlog layer, and let it handle the delta
1206 # through to the revlog layer, and let it handle the delta
1207 # process.
1207 # process.
1208 if self._treeondisk:
1208 if self._treeondisk:
1209 m1 = self.read(p1)
1209 m1 = self.read(p1)
1210 m2 = self.read(p2)
1210 m2 = self.read(p2)
1211 n = self._addtree(m, transaction, link, m1, m2)
1211 n = self._addtree(m, transaction, link, m1, m2)
1212 arraytext = None
1212 arraytext = None
1213 else:
1213 else:
1214 text = m.text(self._usemanifestv2)
1214 text = m.text(self._usemanifestv2)
1215 n = self.addrevision(text, transaction, link, p1, p2)
1215 n = self.addrevision(text, transaction, link, p1, p2)
1216 arraytext = array.array('c', text)
1216 arraytext = array.array('c', text)
1217
1217
1218 if arraytext is not None:
1218 if arraytext is not None:
1219 self.fulltextcache[n] = arraytext
1219 self.fulltextcache[n] = arraytext
1220
1220
1221 return n
1221 return n
1222
1222
1223 def _addtree(self, m, transaction, link, m1, m2):
1223 def _addtree(self, m, transaction, link, m1, m2):
1224 # If the manifest is unchanged compared to one parent,
1224 # If the manifest is unchanged compared to one parent,
1225 # don't write a new revision
1225 # don't write a new revision
1226 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1226 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1227 return m.node()
1227 return m.node()
1228 def writesubtree(subm, subp1, subp2):
1228 def writesubtree(subm, subp1, subp2):
1229 sublog = self.dirlog(subm.dir())
1229 sublog = self.dirlog(subm.dir())
1230 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1230 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1231 m.writesubtrees(m1, m2, writesubtree)
1231 m.writesubtrees(m1, m2, writesubtree)
1232 text = m.dirtext(self._usemanifestv2)
1232 text = m.dirtext(self._usemanifestv2)
1233 # Double-check whether contents are unchanged to one parent
1233 # Double-check whether contents are unchanged to one parent
1234 if text == m1.dirtext(self._usemanifestv2):
1234 if text == m1.dirtext(self._usemanifestv2):
1235 n = m1.node()
1235 n = m1.node()
1236 elif text == m2.dirtext(self._usemanifestv2):
1236 elif text == m2.dirtext(self._usemanifestv2):
1237 n = m2.node()
1237 n = m2.node()
1238 else:
1238 else:
1239 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1239 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1240 # Save nodeid so parent manifest can calculate its nodeid
1240 # Save nodeid so parent manifest can calculate its nodeid
1241 m.setnode(n)
1241 m.setnode(n)
1242 return n
1242 return n
1243
1243
1244 class manifestlog(object):
1244 class manifestlog(object):
1245 """A collection class representing the collection of manifest snapshots
1245 """A collection class representing the collection of manifest snapshots
1246 referenced by commits in the repository.
1246 referenced by commits in the repository.
1247
1247
1248 In this situation, 'manifest' refers to the abstract concept of a snapshot
1248 In this situation, 'manifest' refers to the abstract concept of a snapshot
1249 of the list of files in the given commit. Consumers of the output of this
1249 of the list of files in the given commit. Consumers of the output of this
1250 class do not care about the implementation details of the actual manifests
1250 class do not care about the implementation details of the actual manifests
1251 they receive (i.e. tree or flat or lazily loaded, etc)."""
1251 they receive (i.e. tree or flat or lazily loaded, etc)."""
1252 def __init__(self, opener, repo):
1252 def __init__(self, opener, repo):
1253 self._repo = repo
1253 self._repo = repo
1254
1254
1255 usetreemanifest = False
1255 usetreemanifest = False
1256
1256
1257 opts = getattr(opener, 'options', None)
1257 opts = getattr(opener, 'options', None)
1258 if opts is not None:
1258 if opts is not None:
1259 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1259 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1260 self._treeinmem = usetreemanifest
1260 self._treeinmem = usetreemanifest
1261
1261
1262 self._oldmanifest = repo._constructmanifest()
1262 self._oldmanifest = repo._constructmanifest()
1263 self._revlog = self._oldmanifest
1263 self._revlog = self._oldmanifest
1264
1264
1265 # A cache of the manifestctx or treemanifestctx for each directory
1265 # A cache of the manifestctx or treemanifestctx for each directory
1266 self._dirmancache = {}
1266 self._dirmancache = {}
1267
1267
1268 # We'll separate this into it's own cache once oldmanifest is no longer
1268 # We'll separate this into it's own cache once oldmanifest is no longer
1269 # used
1269 # used
1270 self._mancache = self._oldmanifest._mancache
1270 self._mancache = self._oldmanifest._mancache
1271 self._dirmancache[''] = self._mancache
1271 self._dirmancache[''] = self._mancache
1272
1272
1273 # A future patch makes this use the same config value as the existing
1273 # A future patch makes this use the same config value as the existing
1274 # mancache
1274 # mancache
1275 self.cachesize = 4
1275 self.cachesize = 4
1276
1276
1277 def __getitem__(self, node):
1277 def __getitem__(self, node):
1278 """Retrieves the manifest instance for the given node. Throws a
1278 """Retrieves the manifest instance for the given node. Throws a
1279 LookupError if not found.
1279 LookupError if not found.
1280 """
1280 """
1281 return self.get('', node)
1281 return self.get('', node)
1282
1282
1283 def get(self, dir, node):
1283 def get(self, dir, node):
1284 """Retrieves the manifest instance for the given node. Throws a
1284 """Retrieves the manifest instance for the given node. Throws a
1285 LookupError if not found.
1285 LookupError if not found.
1286 """
1286 """
1287 if node in self._dirmancache.get(dir, ()):
1287 if node in self._dirmancache.get(dir, ()):
1288 cachemf = self._dirmancache[dir][node]
1288 cachemf = self._dirmancache[dir][node]
1289 # The old manifest may put non-ctx manifests in the cache, so
1289 # The old manifest may put non-ctx manifests in the cache, so
1290 # skip those since they don't implement the full api.
1290 # skip those since they don't implement the full api.
1291 if (isinstance(cachemf, manifestctx) or
1291 if (isinstance(cachemf, manifestctx) or
1292 isinstance(cachemf, treemanifestctx)):
1292 isinstance(cachemf, treemanifestctx)):
1293 return cachemf
1293 return cachemf
1294
1294
1295 if dir:
1295 if dir:
1296 if self._revlog._treeondisk:
1296 if self._revlog._treeondisk:
1297 dirlog = self._revlog.dirlog(dir)
1297 dirlog = self._revlog.dirlog(dir)
1298 if node not in dirlog.nodemap:
1298 if node not in dirlog.nodemap:
1299 raise LookupError(node, dirlog.indexfile,
1299 raise LookupError(node, dirlog.indexfile,
1300 _('no node'))
1300 _('no node'))
1301 m = treemanifestctx(self._repo, dir, node)
1301 m = treemanifestctx(self._repo, dir, node)
1302 else:
1302 else:
1303 raise error.Abort(
1303 raise error.Abort(
1304 _("cannot ask for manifest directory '%s' in a flat "
1304 _("cannot ask for manifest directory '%s' in a flat "
1305 "manifest") % dir)
1305 "manifest") % dir)
1306 else:
1306 else:
1307 if node not in self._revlog.nodemap:
1307 if node not in self._revlog.nodemap:
1308 raise LookupError(node, self._revlog.indexfile,
1308 raise LookupError(node, self._revlog.indexfile,
1309 _('no node'))
1309 _('no node'))
1310 if self._treeinmem:
1310 if self._treeinmem:
1311 m = treemanifestctx(self._repo, '', node)
1311 m = treemanifestctx(self._repo, '', node)
1312 else:
1312 else:
1313 m = manifestctx(self._repo, node)
1313 m = manifestctx(self._repo, node)
1314
1314
1315 if node != revlog.nullid:
1315 if node != revlog.nullid:
1316 mancache = self._dirmancache.get(dir)
1316 mancache = self._dirmancache.get(dir)
1317 if not mancache:
1317 if not mancache:
1318 mancache = util.lrucachedict(self.cachesize)
1318 mancache = util.lrucachedict(self.cachesize)
1319 self._dirmancache[dir] = mancache
1319 self._dirmancache[dir] = mancache
1320 mancache[node] = m
1320 mancache[node] = m
1321 return m
1321 return m
1322
1322
1323 def add(self, m, transaction, link, p1, p2, added, removed):
1323 def add(self, m, transaction, link, p1, p2, added, removed):
1324 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1324 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1325
1325
1326 class manifestctx(object):
1326 class manifestctx(object):
1327 """A class representing a single revision of a manifest, including its
1327 """A class representing a single revision of a manifest, including its
1328 contents, its parent revs, and its linkrev.
1328 contents, its parent revs, and its linkrev.
1329 """
1329 """
1330 def __init__(self, repo, node):
1330 def __init__(self, repo, node):
1331 self._repo = repo
1331 self._repo = repo
1332 self._data = None
1332 self._data = None
1333
1333
1334 self._node = node
1334 self._node = node
1335
1335
1336 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1336 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1337 # but let's add it later when something needs it and we can load it
1337 # but let's add it later when something needs it and we can load it
1338 # lazily.
1338 # lazily.
1339 #self.p1, self.p2 = revlog.parents(node)
1339 #self.p1, self.p2 = revlog.parents(node)
1340 #rev = revlog.rev(node)
1340 #rev = revlog.rev(node)
1341 #self.linkrev = revlog.linkrev(rev)
1341 #self.linkrev = revlog.linkrev(rev)
1342
1342
1343 def node(self):
1343 def node(self):
1344 return self._node
1344 return self._node
1345
1345
1346 def read(self):
1346 def read(self):
1347 if not self._data:
1347 if not self._data:
1348 if self._node == revlog.nullid:
1348 if self._node == revlog.nullid:
1349 self._data = manifestdict()
1349 self._data = manifestdict()
1350 else:
1350 else:
1351 rl = self._repo.manifestlog._revlog
1351 rl = self._repo.manifestlog._revlog
1352 text = rl.revision(self._node)
1352 text = rl.revision(self._node)
1353 arraytext = array.array('c', text)
1353 arraytext = array.array('c', text)
1354 rl._fulltextcache[self._node] = arraytext
1354 rl._fulltextcache[self._node] = arraytext
1355 self._data = manifestdict(text)
1355 self._data = manifestdict(text)
1356 return self._data
1356 return self._data
1357
1357
1358 def readfast(self, shallow=False):
1358 def readfast(self, shallow=False):
1359 '''Calls either readdelta or read, based on which would be less work.
1359 '''Calls either readdelta or read, based on which would be less work.
1360 readdelta is called if the delta is against the p1, and therefore can be
1360 readdelta is called if the delta is against the p1, and therefore can be
1361 read quickly.
1361 read quickly.
1362
1362
1363 If `shallow` is True, nothing changes since this is a flat manifest.
1363 If `shallow` is True, nothing changes since this is a flat manifest.
1364 '''
1364 '''
1365 rl = self._repo.manifestlog._revlog
1365 rl = self._repo.manifestlog._revlog
1366 r = rl.rev(self._node)
1366 r = rl.rev(self._node)
1367 deltaparent = rl.deltaparent(r)
1367 deltaparent = rl.deltaparent(r)
1368 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1368 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1369 return self.readdelta()
1369 return self.readdelta()
1370 return self.read()
1370 return self.read()
1371
1371
1372 def readdelta(self, shallow=False):
1372 def readdelta(self, shallow=False):
1373 '''Returns a manifest containing just the entries that are present
1373 '''Returns a manifest containing just the entries that are present
1374 in this manifest, but not in its p1 manifest. This is efficient to read
1374 in this manifest, but not in its p1 manifest. This is efficient to read
1375 if the revlog delta is already p1.
1375 if the revlog delta is already p1.
1376
1376
1377 Changing the value of `shallow` has no effect on flat manifests.
1377 Changing the value of `shallow` has no effect on flat manifests.
1378 '''
1378 '''
1379 revlog = self._repo.manifestlog._revlog
1379 revlog = self._repo.manifestlog._revlog
1380 if revlog._usemanifestv2:
1380 if revlog._usemanifestv2:
1381 # Need to perform a slow delta
1381 # Need to perform a slow delta
1382 r0 = revlog.deltaparent(revlog.rev(self._node))
1382 r0 = revlog.deltaparent(revlog.rev(self._node))
1383 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1383 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1384 m1 = self.read()
1384 m1 = self.read()
1385 md = manifestdict()
1385 md = manifestdict()
1386 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1386 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1387 if n1:
1387 if n1:
1388 md[f] = n1
1388 md[f] = n1
1389 if fl1:
1389 if fl1:
1390 md.setflag(f, fl1)
1390 md.setflag(f, fl1)
1391 return md
1391 return md
1392
1392
1393 r = revlog.rev(self._node)
1393 r = revlog.rev(self._node)
1394 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1394 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1395 return manifestdict(d)
1395 return manifestdict(d)
1396
1396
1397 def find(self, key):
1398 return self.read().find(key)
1399
1397 class treemanifestctx(object):
1400 class treemanifestctx(object):
1398 def __init__(self, repo, dir, node):
1401 def __init__(self, repo, dir, node):
1399 self._repo = repo
1402 self._repo = repo
1400 self._dir = dir
1403 self._dir = dir
1401 self._data = None
1404 self._data = None
1402
1405
1403 self._node = node
1406 self._node = node
1404
1407
1405 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1408 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1406 # we can instantiate treemanifestctx objects for directories we don't
1409 # we can instantiate treemanifestctx objects for directories we don't
1407 # have on disk.
1410 # have on disk.
1408 #self.p1, self.p2 = revlog.parents(node)
1411 #self.p1, self.p2 = revlog.parents(node)
1409 #rev = revlog.rev(node)
1412 #rev = revlog.rev(node)
1410 #self.linkrev = revlog.linkrev(rev)
1413 #self.linkrev = revlog.linkrev(rev)
1411
1414
1412 def _revlog(self):
1415 def _revlog(self):
1413 return self._repo.manifestlog._revlog.dirlog(self._dir)
1416 return self._repo.manifestlog._revlog.dirlog(self._dir)
1414
1417
1415 def read(self):
1418 def read(self):
1416 if not self._data:
1419 if not self._data:
1417 rl = self._revlog()
1420 rl = self._revlog()
1418 if self._node == revlog.nullid:
1421 if self._node == revlog.nullid:
1419 self._data = treemanifest()
1422 self._data = treemanifest()
1420 elif rl._treeondisk:
1423 elif rl._treeondisk:
1421 m = treemanifest(dir=self._dir)
1424 m = treemanifest(dir=self._dir)
1422 def gettext():
1425 def gettext():
1423 return rl.revision(self._node)
1426 return rl.revision(self._node)
1424 def readsubtree(dir, subm):
1427 def readsubtree(dir, subm):
1425 return treemanifestctx(self._repo, dir, subm).read()
1428 return treemanifestctx(self._repo, dir, subm).read()
1426 m.read(gettext, readsubtree)
1429 m.read(gettext, readsubtree)
1427 m.setnode(self._node)
1430 m.setnode(self._node)
1428 self._data = m
1431 self._data = m
1429 else:
1432 else:
1430 text = rl.revision(self._node)
1433 text = rl.revision(self._node)
1431 arraytext = array.array('c', text)
1434 arraytext = array.array('c', text)
1432 rl.fulltextcache[self._node] = arraytext
1435 rl.fulltextcache[self._node] = arraytext
1433 self._data = treemanifest(dir=self._dir, text=text)
1436 self._data = treemanifest(dir=self._dir, text=text)
1434
1437
1435 return self._data
1438 return self._data
1436
1439
1437 def node(self):
1440 def node(self):
1438 return self._node
1441 return self._node
1439
1442
1440 def readdelta(self, shallow=False):
1443 def readdelta(self, shallow=False):
1441 '''Returns a manifest containing just the entries that are present
1444 '''Returns a manifest containing just the entries that are present
1442 in this manifest, but not in its p1 manifest. This is efficient to read
1445 in this manifest, but not in its p1 manifest. This is efficient to read
1443 if the revlog delta is already p1.
1446 if the revlog delta is already p1.
1444
1447
1445 If `shallow` is True, this will read the delta for this directory,
1448 If `shallow` is True, this will read the delta for this directory,
1446 without recursively reading subdirectory manifests. Instead, any
1449 without recursively reading subdirectory manifests. Instead, any
1447 subdirectory entry will be reported as it appears in the manifest, i.e.
1450 subdirectory entry will be reported as it appears in the manifest, i.e.
1448 the subdirectory will be reported among files and distinguished only by
1451 the subdirectory will be reported among files and distinguished only by
1449 its 't' flag.
1452 its 't' flag.
1450 '''
1453 '''
1451 revlog = self._revlog()
1454 revlog = self._revlog()
1452 if shallow and not revlog._usemanifestv2:
1455 if shallow and not revlog._usemanifestv2:
1453 r = revlog.rev(self._node)
1456 r = revlog.rev(self._node)
1454 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1457 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1455 return manifestdict(d)
1458 return manifestdict(d)
1456 else:
1459 else:
1457 # Need to perform a slow delta
1460 # Need to perform a slow delta
1458 r0 = revlog.deltaparent(revlog.rev(self._node))
1461 r0 = revlog.deltaparent(revlog.rev(self._node))
1459 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1462 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1460 m1 = self.read()
1463 m1 = self.read()
1461 md = treemanifest(dir=self._dir)
1464 md = treemanifest(dir=self._dir)
1462 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1465 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1463 if n1:
1466 if n1:
1464 md[f] = n1
1467 md[f] = n1
1465 if fl1:
1468 if fl1:
1466 md.setflag(f, fl1)
1469 md.setflag(f, fl1)
1467 return md
1470 return md
1468
1471
1469 def readfast(self, shallow=False):
1472 def readfast(self, shallow=False):
1470 '''Calls either readdelta or read, based on which would be less work.
1473 '''Calls either readdelta or read, based on which would be less work.
1471 readdelta is called if the delta is against the p1, and therefore can be
1474 readdelta is called if the delta is against the p1, and therefore can be
1472 read quickly.
1475 read quickly.
1473
1476
1474 If `shallow` is True, it only returns the entries from this manifest,
1477 If `shallow` is True, it only returns the entries from this manifest,
1475 and not any submanifests.
1478 and not any submanifests.
1476 '''
1479 '''
1477 rl = self._revlog()
1480 rl = self._revlog()
1478 r = rl.rev(self._node)
1481 r = rl.rev(self._node)
1479 deltaparent = rl.deltaparent(r)
1482 deltaparent = rl.deltaparent(r)
1480 if (deltaparent != revlog.nullrev and
1483 if (deltaparent != revlog.nullrev and
1481 deltaparent in rl.parentrevs(r)):
1484 deltaparent in rl.parentrevs(r)):
1482 return self.readdelta(shallow=shallow)
1485 return self.readdelta(shallow=shallow)
1483
1486
1484 if shallow:
1487 if shallow:
1485 return manifestdict(rl.revision(self._node))
1488 return manifestdict(rl.revision(self._node))
1486 else:
1489 else:
1487 return self.read()
1490 return self.read()
1488
1491
1492 def find(self, key):
1493 return self.read().find(key)
1494
1489 class manifest(manifestrevlog):
1495 class manifest(manifestrevlog):
1490 def __init__(self, opener, dir='', dirlogcache=None):
1496 def __init__(self, opener, dir='', dirlogcache=None):
1491 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1497 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1492 manifest.manifest only. External users should create a root manifest
1498 manifest.manifest only. External users should create a root manifest
1493 log with manifest.manifest(opener) and call dirlog() on it.
1499 log with manifest.manifest(opener) and call dirlog() on it.
1494 '''
1500 '''
1495 # During normal operations, we expect to deal with not more than four
1501 # During normal operations, we expect to deal with not more than four
1496 # revs at a time (such as during commit --amend). When rebasing large
1502 # revs at a time (such as during commit --amend). When rebasing large
1497 # stacks of commits, the number can go up, hence the config knob below.
1503 # stacks of commits, the number can go up, hence the config knob below.
1498 cachesize = 4
1504 cachesize = 4
1499 usetreemanifest = False
1505 usetreemanifest = False
1500 opts = getattr(opener, 'options', None)
1506 opts = getattr(opener, 'options', None)
1501 if opts is not None:
1507 if opts is not None:
1502 cachesize = opts.get('manifestcachesize', cachesize)
1508 cachesize = opts.get('manifestcachesize', cachesize)
1503 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1509 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1504 self._mancache = util.lrucachedict(cachesize)
1510 self._mancache = util.lrucachedict(cachesize)
1505 self._treeinmem = usetreemanifest
1511 self._treeinmem = usetreemanifest
1506 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1512 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1507
1513
1508 def _newmanifest(self, data=''):
1514 def _newmanifest(self, data=''):
1509 if self._treeinmem:
1515 if self._treeinmem:
1510 return treemanifest(self._dir, data)
1516 return treemanifest(self._dir, data)
1511 return manifestdict(data)
1517 return manifestdict(data)
1512
1518
1513 def dirlog(self, dir):
1519 def dirlog(self, dir):
1514 """This overrides the base revlog implementation to allow construction
1520 """This overrides the base revlog implementation to allow construction
1515 'manifest' types instead of manifestrevlog types. This is only needed
1521 'manifest' types instead of manifestrevlog types. This is only needed
1516 until we migrate off the 'manifest' type."""
1522 until we migrate off the 'manifest' type."""
1517 if dir:
1523 if dir:
1518 assert self._treeondisk
1524 assert self._treeondisk
1519 if dir not in self._dirlogcache:
1525 if dir not in self._dirlogcache:
1520 self._dirlogcache[dir] = manifest(self.opener, dir,
1526 self._dirlogcache[dir] = manifest(self.opener, dir,
1521 self._dirlogcache)
1527 self._dirlogcache)
1522 return self._dirlogcache[dir]
1528 return self._dirlogcache[dir]
1523
1529
1524 def read(self, node):
1530 def read(self, node):
1525 if node == revlog.nullid:
1531 if node == revlog.nullid:
1526 return self._newmanifest() # don't upset local cache
1532 return self._newmanifest() # don't upset local cache
1527 if node in self._mancache:
1533 if node in self._mancache:
1528 cached = self._mancache[node]
1534 cached = self._mancache[node]
1529 if (isinstance(cached, manifestctx) or
1535 if (isinstance(cached, manifestctx) or
1530 isinstance(cached, treemanifestctx)):
1536 isinstance(cached, treemanifestctx)):
1531 cached = cached.read()
1537 cached = cached.read()
1532 return cached
1538 return cached
1533 if self._treeondisk:
1539 if self._treeondisk:
1534 def gettext():
1540 def gettext():
1535 return self.revision(node)
1541 return self.revision(node)
1536 def readsubtree(dir, subm):
1542 def readsubtree(dir, subm):
1537 return self.dirlog(dir).read(subm)
1543 return self.dirlog(dir).read(subm)
1538 m = self._newmanifest()
1544 m = self._newmanifest()
1539 m.read(gettext, readsubtree)
1545 m.read(gettext, readsubtree)
1540 m.setnode(node)
1546 m.setnode(node)
1541 arraytext = None
1547 arraytext = None
1542 else:
1548 else:
1543 text = self.revision(node)
1549 text = self.revision(node)
1544 m = self._newmanifest(text)
1550 m = self._newmanifest(text)
1545 arraytext = array.array('c', text)
1551 arraytext = array.array('c', text)
1546 self._mancache[node] = m
1552 self._mancache[node] = m
1547 if arraytext is not None:
1553 if arraytext is not None:
1548 self.fulltextcache[node] = arraytext
1554 self.fulltextcache[node] = arraytext
1549 return m
1555 return m
1550
1556
1551 def find(self, node, f):
1552 '''look up entry for a single file efficiently.
1553 return (node, flags) pair if found, (None, None) if not.'''
1554 m = self.read(node)
1555 try:
1556 return m.find(f)
1557 except KeyError:
1558 return None, None
1559
1560 def clearcaches(self):
1557 def clearcaches(self):
1561 super(manifest, self).clearcaches()
1558 super(manifest, self).clearcaches()
1562 self._mancache.clear()
1559 self._mancache.clear()
General Comments 0
You need to be logged in to leave comments. Login now