##// END OF EJS Templates
revert: add an experimental config to use inverted selection...
Laurent Charignon -
r25424:69609f43 default
parent child Browse files
Show More
@@ -1,3352 +1,3363 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import crecord as crecordmod
17 import crecord as crecordmod
18 import lock as lockmod
18 import lock as lockmod
19
19
20 def ishunk(x):
20 def ishunk(x):
21 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
21 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
22 return isinstance(x, hunkclasses)
22 return isinstance(x, hunkclasses)
23
23
24 def newandmodified(chunks, originalchunks):
24 def newandmodified(chunks, originalchunks):
25 newlyaddedandmodifiedfiles = set()
25 newlyaddedandmodifiedfiles = set()
26 for chunk in chunks:
26 for chunk in chunks:
27 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
27 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
28 originalchunks:
28 originalchunks:
29 newlyaddedandmodifiedfiles.add(chunk.header.filename())
29 newlyaddedandmodifiedfiles.add(chunk.header.filename())
30 return newlyaddedandmodifiedfiles
30 return newlyaddedandmodifiedfiles
31
31
32 def parsealiases(cmd):
32 def parsealiases(cmd):
33 return cmd.lstrip("^").split("|")
33 return cmd.lstrip("^").split("|")
34
34
35 def setupwrapcolorwrite(ui):
35 def setupwrapcolorwrite(ui):
36 # wrap ui.write so diff output can be labeled/colorized
36 # wrap ui.write so diff output can be labeled/colorized
37 def wrapwrite(orig, *args, **kw):
37 def wrapwrite(orig, *args, **kw):
38 label = kw.pop('label', '')
38 label = kw.pop('label', '')
39 for chunk, l in patch.difflabel(lambda: args):
39 for chunk, l in patch.difflabel(lambda: args):
40 orig(chunk, label=label + l)
40 orig(chunk, label=label + l)
41
41
42 oldwrite = ui.write
42 oldwrite = ui.write
43 def wrap(*args, **kwargs):
43 def wrap(*args, **kwargs):
44 return wrapwrite(oldwrite, *args, **kwargs)
44 return wrapwrite(oldwrite, *args, **kwargs)
45 setattr(ui, 'write', wrap)
45 setattr(ui, 'write', wrap)
46 return oldwrite
46 return oldwrite
47
47
48 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
48 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
49 if usecurses:
49 if usecurses:
50 if testfile:
50 if testfile:
51 recordfn = crecordmod.testdecorator(testfile,
51 recordfn = crecordmod.testdecorator(testfile,
52 crecordmod.testchunkselector)
52 crecordmod.testchunkselector)
53 else:
53 else:
54 recordfn = crecordmod.chunkselector
54 recordfn = crecordmod.chunkselector
55
55
56 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
56 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
57
57
58 else:
58 else:
59 return patch.filterpatch(ui, originalhunks, operation)
59 return patch.filterpatch(ui, originalhunks, operation)
60
60
61 def recordfilter(ui, originalhunks, operation=None):
61 def recordfilter(ui, originalhunks, operation=None):
62 """ Prompts the user to filter the originalhunks and return a list of
62 """ Prompts the user to filter the originalhunks and return a list of
63 selected hunks.
63 selected hunks.
64 *operation* is used for ui purposes to indicate the user
64 *operation* is used for ui purposes to indicate the user
65 what kind of filtering they are doing: reverting, commiting, shelving, etc.
65 what kind of filtering they are doing: reverting, commiting, shelving, etc.
66 *operation* has to be a translated string.
66 *operation* has to be a translated string.
67 """
67 """
68 usecurses = ui.configbool('experimental', 'crecord', False)
68 usecurses = ui.configbool('experimental', 'crecord', False)
69 testfile = ui.config('experimental', 'crecordtest', None)
69 testfile = ui.config('experimental', 'crecordtest', None)
70 oldwrite = setupwrapcolorwrite(ui)
70 oldwrite = setupwrapcolorwrite(ui)
71 try:
71 try:
72 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
72 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
73 operation)
73 operation)
74 finally:
74 finally:
75 ui.write = oldwrite
75 ui.write = oldwrite
76 return newchunks
76 return newchunks
77
77
78 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
78 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
79 filterfn, *pats, **opts):
79 filterfn, *pats, **opts):
80 import merge as mergemod
80 import merge as mergemod
81
81
82 if not ui.interactive():
82 if not ui.interactive():
83 raise util.Abort(_('running non-interactively, use %s instead') %
83 raise util.Abort(_('running non-interactively, use %s instead') %
84 cmdsuggest)
84 cmdsuggest)
85
85
86 # make sure username is set before going interactive
86 # make sure username is set before going interactive
87 if not opts.get('user'):
87 if not opts.get('user'):
88 ui.username() # raise exception, username not provided
88 ui.username() # raise exception, username not provided
89
89
90 def recordfunc(ui, repo, message, match, opts):
90 def recordfunc(ui, repo, message, match, opts):
91 """This is generic record driver.
91 """This is generic record driver.
92
92
93 Its job is to interactively filter local changes, and
93 Its job is to interactively filter local changes, and
94 accordingly prepare working directory into a state in which the
94 accordingly prepare working directory into a state in which the
95 job can be delegated to a non-interactive commit command such as
95 job can be delegated to a non-interactive commit command such as
96 'commit' or 'qrefresh'.
96 'commit' or 'qrefresh'.
97
97
98 After the actual job is done by non-interactive command, the
98 After the actual job is done by non-interactive command, the
99 working directory is restored to its original state.
99 working directory is restored to its original state.
100
100
101 In the end we'll record interesting changes, and everything else
101 In the end we'll record interesting changes, and everything else
102 will be left in place, so the user can continue working.
102 will be left in place, so the user can continue working.
103 """
103 """
104
104
105 checkunfinished(repo, commit=True)
105 checkunfinished(repo, commit=True)
106 merge = len(repo[None].parents()) > 1
106 merge = len(repo[None].parents()) > 1
107 if merge:
107 if merge:
108 raise util.Abort(_('cannot partially commit a merge '
108 raise util.Abort(_('cannot partially commit a merge '
109 '(use "hg commit" instead)'))
109 '(use "hg commit" instead)'))
110
110
111 status = repo.status(match=match)
111 status = repo.status(match=match)
112 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
112 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
113 diffopts.nodates = True
113 diffopts.nodates = True
114 diffopts.git = True
114 diffopts.git = True
115 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
115 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
116 originalchunks = patch.parsepatch(originaldiff)
116 originalchunks = patch.parsepatch(originaldiff)
117
117
118 # 1. filter patch, so we have intending-to apply subset of it
118 # 1. filter patch, so we have intending-to apply subset of it
119 try:
119 try:
120 chunks = filterfn(ui, originalchunks)
120 chunks = filterfn(ui, originalchunks)
121 except patch.PatchError, err:
121 except patch.PatchError, err:
122 raise util.Abort(_('error parsing patch: %s') % err)
122 raise util.Abort(_('error parsing patch: %s') % err)
123
123
124 # We need to keep a backup of files that have been newly added and
124 # We need to keep a backup of files that have been newly added and
125 # modified during the recording process because there is a previous
125 # modified during the recording process because there is a previous
126 # version without the edit in the workdir
126 # version without the edit in the workdir
127 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
127 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
128 contenders = set()
128 contenders = set()
129 for h in chunks:
129 for h in chunks:
130 try:
130 try:
131 contenders.update(set(h.files()))
131 contenders.update(set(h.files()))
132 except AttributeError:
132 except AttributeError:
133 pass
133 pass
134
134
135 changed = status.modified + status.added + status.removed
135 changed = status.modified + status.added + status.removed
136 newfiles = [f for f in changed if f in contenders]
136 newfiles = [f for f in changed if f in contenders]
137 if not newfiles:
137 if not newfiles:
138 ui.status(_('no changes to record\n'))
138 ui.status(_('no changes to record\n'))
139 return 0
139 return 0
140
140
141 modified = set(status.modified)
141 modified = set(status.modified)
142
142
143 # 2. backup changed files, so we can restore them in the end
143 # 2. backup changed files, so we can restore them in the end
144
144
145 if backupall:
145 if backupall:
146 tobackup = changed
146 tobackup = changed
147 else:
147 else:
148 tobackup = [f for f in newfiles if f in modified or f in \
148 tobackup = [f for f in newfiles if f in modified or f in \
149 newlyaddedandmodifiedfiles]
149 newlyaddedandmodifiedfiles]
150 backups = {}
150 backups = {}
151 if tobackup:
151 if tobackup:
152 backupdir = repo.join('record-backups')
152 backupdir = repo.join('record-backups')
153 try:
153 try:
154 os.mkdir(backupdir)
154 os.mkdir(backupdir)
155 except OSError, err:
155 except OSError, err:
156 if err.errno != errno.EEXIST:
156 if err.errno != errno.EEXIST:
157 raise
157 raise
158 try:
158 try:
159 # backup continues
159 # backup continues
160 for f in tobackup:
160 for f in tobackup:
161 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
161 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
162 dir=backupdir)
162 dir=backupdir)
163 os.close(fd)
163 os.close(fd)
164 ui.debug('backup %r as %r\n' % (f, tmpname))
164 ui.debug('backup %r as %r\n' % (f, tmpname))
165 util.copyfile(repo.wjoin(f), tmpname)
165 util.copyfile(repo.wjoin(f), tmpname)
166 shutil.copystat(repo.wjoin(f), tmpname)
166 shutil.copystat(repo.wjoin(f), tmpname)
167 backups[f] = tmpname
167 backups[f] = tmpname
168
168
169 fp = cStringIO.StringIO()
169 fp = cStringIO.StringIO()
170 for c in chunks:
170 for c in chunks:
171 fname = c.filename()
171 fname = c.filename()
172 if fname in backups:
172 if fname in backups:
173 c.write(fp)
173 c.write(fp)
174 dopatch = fp.tell()
174 dopatch = fp.tell()
175 fp.seek(0)
175 fp.seek(0)
176
176
177 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
177 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
178 # 3a. apply filtered patch to clean repo (clean)
178 # 3a. apply filtered patch to clean repo (clean)
179 if backups:
179 if backups:
180 # Equivalent to hg.revert
180 # Equivalent to hg.revert
181 choices = lambda key: key in backups
181 choices = lambda key: key in backups
182 mergemod.update(repo, repo.dirstate.p1(),
182 mergemod.update(repo, repo.dirstate.p1(),
183 False, True, choices)
183 False, True, choices)
184
184
185 # 3b. (apply)
185 # 3b. (apply)
186 if dopatch:
186 if dopatch:
187 try:
187 try:
188 ui.debug('applying patch\n')
188 ui.debug('applying patch\n')
189 ui.debug(fp.getvalue())
189 ui.debug(fp.getvalue())
190 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
190 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
191 except patch.PatchError, err:
191 except patch.PatchError, err:
192 raise util.Abort(str(err))
192 raise util.Abort(str(err))
193 del fp
193 del fp
194
194
195 # 4. We prepared working directory according to filtered
195 # 4. We prepared working directory according to filtered
196 # patch. Now is the time to delegate the job to
196 # patch. Now is the time to delegate the job to
197 # commit/qrefresh or the like!
197 # commit/qrefresh or the like!
198
198
199 # Make all of the pathnames absolute.
199 # Make all of the pathnames absolute.
200 newfiles = [repo.wjoin(nf) for nf in newfiles]
200 newfiles = [repo.wjoin(nf) for nf in newfiles]
201 return commitfunc(ui, repo, *newfiles, **opts)
201 return commitfunc(ui, repo, *newfiles, **opts)
202 finally:
202 finally:
203 # 5. finally restore backed-up files
203 # 5. finally restore backed-up files
204 try:
204 try:
205 for realname, tmpname in backups.iteritems():
205 for realname, tmpname in backups.iteritems():
206 ui.debug('restoring %r to %r\n' % (tmpname, realname))
206 ui.debug('restoring %r to %r\n' % (tmpname, realname))
207 util.copyfile(tmpname, repo.wjoin(realname))
207 util.copyfile(tmpname, repo.wjoin(realname))
208 # Our calls to copystat() here and above are a
208 # Our calls to copystat() here and above are a
209 # hack to trick any editors that have f open that
209 # hack to trick any editors that have f open that
210 # we haven't modified them.
210 # we haven't modified them.
211 #
211 #
212 # Also note that this racy as an editor could
212 # Also note that this racy as an editor could
213 # notice the file's mtime before we've finished
213 # notice the file's mtime before we've finished
214 # writing it.
214 # writing it.
215 shutil.copystat(tmpname, repo.wjoin(realname))
215 shutil.copystat(tmpname, repo.wjoin(realname))
216 os.unlink(tmpname)
216 os.unlink(tmpname)
217 if tobackup:
217 if tobackup:
218 os.rmdir(backupdir)
218 os.rmdir(backupdir)
219 except OSError:
219 except OSError:
220 pass
220 pass
221
221
222 return commit(ui, repo, recordfunc, pats, opts)
222 return commit(ui, repo, recordfunc, pats, opts)
223
223
224 def findpossible(cmd, table, strict=False):
224 def findpossible(cmd, table, strict=False):
225 """
225 """
226 Return cmd -> (aliases, command table entry)
226 Return cmd -> (aliases, command table entry)
227 for each matching command.
227 for each matching command.
228 Return debug commands (or their aliases) only if no normal command matches.
228 Return debug commands (or their aliases) only if no normal command matches.
229 """
229 """
230 choice = {}
230 choice = {}
231 debugchoice = {}
231 debugchoice = {}
232
232
233 if cmd in table:
233 if cmd in table:
234 # short-circuit exact matches, "log" alias beats "^log|history"
234 # short-circuit exact matches, "log" alias beats "^log|history"
235 keys = [cmd]
235 keys = [cmd]
236 else:
236 else:
237 keys = table.keys()
237 keys = table.keys()
238
238
239 allcmds = []
239 allcmds = []
240 for e in keys:
240 for e in keys:
241 aliases = parsealiases(e)
241 aliases = parsealiases(e)
242 allcmds.extend(aliases)
242 allcmds.extend(aliases)
243 found = None
243 found = None
244 if cmd in aliases:
244 if cmd in aliases:
245 found = cmd
245 found = cmd
246 elif not strict:
246 elif not strict:
247 for a in aliases:
247 for a in aliases:
248 if a.startswith(cmd):
248 if a.startswith(cmd):
249 found = a
249 found = a
250 break
250 break
251 if found is not None:
251 if found is not None:
252 if aliases[0].startswith("debug") or found.startswith("debug"):
252 if aliases[0].startswith("debug") or found.startswith("debug"):
253 debugchoice[found] = (aliases, table[e])
253 debugchoice[found] = (aliases, table[e])
254 else:
254 else:
255 choice[found] = (aliases, table[e])
255 choice[found] = (aliases, table[e])
256
256
257 if not choice and debugchoice:
257 if not choice and debugchoice:
258 choice = debugchoice
258 choice = debugchoice
259
259
260 return choice, allcmds
260 return choice, allcmds
261
261
262 def findcmd(cmd, table, strict=True):
262 def findcmd(cmd, table, strict=True):
263 """Return (aliases, command table entry) for command string."""
263 """Return (aliases, command table entry) for command string."""
264 choice, allcmds = findpossible(cmd, table, strict)
264 choice, allcmds = findpossible(cmd, table, strict)
265
265
266 if cmd in choice:
266 if cmd in choice:
267 return choice[cmd]
267 return choice[cmd]
268
268
269 if len(choice) > 1:
269 if len(choice) > 1:
270 clist = choice.keys()
270 clist = choice.keys()
271 clist.sort()
271 clist.sort()
272 raise error.AmbiguousCommand(cmd, clist)
272 raise error.AmbiguousCommand(cmd, clist)
273
273
274 if choice:
274 if choice:
275 return choice.values()[0]
275 return choice.values()[0]
276
276
277 raise error.UnknownCommand(cmd, allcmds)
277 raise error.UnknownCommand(cmd, allcmds)
278
278
279 def findrepo(p):
279 def findrepo(p):
280 while not os.path.isdir(os.path.join(p, ".hg")):
280 while not os.path.isdir(os.path.join(p, ".hg")):
281 oldp, p = p, os.path.dirname(p)
281 oldp, p = p, os.path.dirname(p)
282 if p == oldp:
282 if p == oldp:
283 return None
283 return None
284
284
285 return p
285 return p
286
286
287 def bailifchanged(repo, merge=True):
287 def bailifchanged(repo, merge=True):
288 if merge and repo.dirstate.p2() != nullid:
288 if merge and repo.dirstate.p2() != nullid:
289 raise util.Abort(_('outstanding uncommitted merge'))
289 raise util.Abort(_('outstanding uncommitted merge'))
290 modified, added, removed, deleted = repo.status()[:4]
290 modified, added, removed, deleted = repo.status()[:4]
291 if modified or added or removed or deleted:
291 if modified or added or removed or deleted:
292 raise util.Abort(_('uncommitted changes'))
292 raise util.Abort(_('uncommitted changes'))
293 ctx = repo[None]
293 ctx = repo[None]
294 for s in sorted(ctx.substate):
294 for s in sorted(ctx.substate):
295 ctx.sub(s).bailifchanged()
295 ctx.sub(s).bailifchanged()
296
296
297 def logmessage(ui, opts):
297 def logmessage(ui, opts):
298 """ get the log message according to -m and -l option """
298 """ get the log message according to -m and -l option """
299 message = opts.get('message')
299 message = opts.get('message')
300 logfile = opts.get('logfile')
300 logfile = opts.get('logfile')
301
301
302 if message and logfile:
302 if message and logfile:
303 raise util.Abort(_('options --message and --logfile are mutually '
303 raise util.Abort(_('options --message and --logfile are mutually '
304 'exclusive'))
304 'exclusive'))
305 if not message and logfile:
305 if not message and logfile:
306 try:
306 try:
307 if logfile == '-':
307 if logfile == '-':
308 message = ui.fin.read()
308 message = ui.fin.read()
309 else:
309 else:
310 message = '\n'.join(util.readfile(logfile).splitlines())
310 message = '\n'.join(util.readfile(logfile).splitlines())
311 except IOError, inst:
311 except IOError, inst:
312 raise util.Abort(_("can't read commit message '%s': %s") %
312 raise util.Abort(_("can't read commit message '%s': %s") %
313 (logfile, inst.strerror))
313 (logfile, inst.strerror))
314 return message
314 return message
315
315
316 def mergeeditform(ctxorbool, baseformname):
316 def mergeeditform(ctxorbool, baseformname):
317 """return appropriate editform name (referencing a committemplate)
317 """return appropriate editform name (referencing a committemplate)
318
318
319 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
319 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
320 merging is committed.
320 merging is committed.
321
321
322 This returns baseformname with '.merge' appended if it is a merge,
322 This returns baseformname with '.merge' appended if it is a merge,
323 otherwise '.normal' is appended.
323 otherwise '.normal' is appended.
324 """
324 """
325 if isinstance(ctxorbool, bool):
325 if isinstance(ctxorbool, bool):
326 if ctxorbool:
326 if ctxorbool:
327 return baseformname + ".merge"
327 return baseformname + ".merge"
328 elif 1 < len(ctxorbool.parents()):
328 elif 1 < len(ctxorbool.parents()):
329 return baseformname + ".merge"
329 return baseformname + ".merge"
330
330
331 return baseformname + ".normal"
331 return baseformname + ".normal"
332
332
333 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
333 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
334 editform='', **opts):
334 editform='', **opts):
335 """get appropriate commit message editor according to '--edit' option
335 """get appropriate commit message editor according to '--edit' option
336
336
337 'finishdesc' is a function to be called with edited commit message
337 'finishdesc' is a function to be called with edited commit message
338 (= 'description' of the new changeset) just after editing, but
338 (= 'description' of the new changeset) just after editing, but
339 before checking empty-ness. It should return actual text to be
339 before checking empty-ness. It should return actual text to be
340 stored into history. This allows to change description before
340 stored into history. This allows to change description before
341 storing.
341 storing.
342
342
343 'extramsg' is a extra message to be shown in the editor instead of
343 'extramsg' is a extra message to be shown in the editor instead of
344 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
344 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
345 is automatically added.
345 is automatically added.
346
346
347 'editform' is a dot-separated list of names, to distinguish
347 'editform' is a dot-separated list of names, to distinguish
348 the purpose of commit text editing.
348 the purpose of commit text editing.
349
349
350 'getcommiteditor' returns 'commitforceeditor' regardless of
350 'getcommiteditor' returns 'commitforceeditor' regardless of
351 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
351 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
352 they are specific for usage in MQ.
352 they are specific for usage in MQ.
353 """
353 """
354 if edit or finishdesc or extramsg:
354 if edit or finishdesc or extramsg:
355 return lambda r, c, s: commitforceeditor(r, c, s,
355 return lambda r, c, s: commitforceeditor(r, c, s,
356 finishdesc=finishdesc,
356 finishdesc=finishdesc,
357 extramsg=extramsg,
357 extramsg=extramsg,
358 editform=editform)
358 editform=editform)
359 elif editform:
359 elif editform:
360 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
360 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
361 else:
361 else:
362 return commiteditor
362 return commiteditor
363
363
364 def loglimit(opts):
364 def loglimit(opts):
365 """get the log limit according to option -l/--limit"""
365 """get the log limit according to option -l/--limit"""
366 limit = opts.get('limit')
366 limit = opts.get('limit')
367 if limit:
367 if limit:
368 try:
368 try:
369 limit = int(limit)
369 limit = int(limit)
370 except ValueError:
370 except ValueError:
371 raise util.Abort(_('limit must be a positive integer'))
371 raise util.Abort(_('limit must be a positive integer'))
372 if limit <= 0:
372 if limit <= 0:
373 raise util.Abort(_('limit must be positive'))
373 raise util.Abort(_('limit must be positive'))
374 else:
374 else:
375 limit = None
375 limit = None
376 return limit
376 return limit
377
377
378 def makefilename(repo, pat, node, desc=None,
378 def makefilename(repo, pat, node, desc=None,
379 total=None, seqno=None, revwidth=None, pathname=None):
379 total=None, seqno=None, revwidth=None, pathname=None):
380 node_expander = {
380 node_expander = {
381 'H': lambda: hex(node),
381 'H': lambda: hex(node),
382 'R': lambda: str(repo.changelog.rev(node)),
382 'R': lambda: str(repo.changelog.rev(node)),
383 'h': lambda: short(node),
383 'h': lambda: short(node),
384 'm': lambda: re.sub('[^\w]', '_', str(desc))
384 'm': lambda: re.sub('[^\w]', '_', str(desc))
385 }
385 }
386 expander = {
386 expander = {
387 '%': lambda: '%',
387 '%': lambda: '%',
388 'b': lambda: os.path.basename(repo.root),
388 'b': lambda: os.path.basename(repo.root),
389 }
389 }
390
390
391 try:
391 try:
392 if node:
392 if node:
393 expander.update(node_expander)
393 expander.update(node_expander)
394 if node:
394 if node:
395 expander['r'] = (lambda:
395 expander['r'] = (lambda:
396 str(repo.changelog.rev(node)).zfill(revwidth or 0))
396 str(repo.changelog.rev(node)).zfill(revwidth or 0))
397 if total is not None:
397 if total is not None:
398 expander['N'] = lambda: str(total)
398 expander['N'] = lambda: str(total)
399 if seqno is not None:
399 if seqno is not None:
400 expander['n'] = lambda: str(seqno)
400 expander['n'] = lambda: str(seqno)
401 if total is not None and seqno is not None:
401 if total is not None and seqno is not None:
402 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
402 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
403 if pathname is not None:
403 if pathname is not None:
404 expander['s'] = lambda: os.path.basename(pathname)
404 expander['s'] = lambda: os.path.basename(pathname)
405 expander['d'] = lambda: os.path.dirname(pathname) or '.'
405 expander['d'] = lambda: os.path.dirname(pathname) or '.'
406 expander['p'] = lambda: pathname
406 expander['p'] = lambda: pathname
407
407
408 newname = []
408 newname = []
409 patlen = len(pat)
409 patlen = len(pat)
410 i = 0
410 i = 0
411 while i < patlen:
411 while i < patlen:
412 c = pat[i]
412 c = pat[i]
413 if c == '%':
413 if c == '%':
414 i += 1
414 i += 1
415 c = pat[i]
415 c = pat[i]
416 c = expander[c]()
416 c = expander[c]()
417 newname.append(c)
417 newname.append(c)
418 i += 1
418 i += 1
419 return ''.join(newname)
419 return ''.join(newname)
420 except KeyError, inst:
420 except KeyError, inst:
421 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
421 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
422 inst.args[0])
422 inst.args[0])
423
423
424 def makefileobj(repo, pat, node=None, desc=None, total=None,
424 def makefileobj(repo, pat, node=None, desc=None, total=None,
425 seqno=None, revwidth=None, mode='wb', modemap=None,
425 seqno=None, revwidth=None, mode='wb', modemap=None,
426 pathname=None):
426 pathname=None):
427
427
428 writable = mode not in ('r', 'rb')
428 writable = mode not in ('r', 'rb')
429
429
430 if not pat or pat == '-':
430 if not pat or pat == '-':
431 if writable:
431 if writable:
432 fp = repo.ui.fout
432 fp = repo.ui.fout
433 else:
433 else:
434 fp = repo.ui.fin
434 fp = repo.ui.fin
435 if util.safehasattr(fp, 'fileno'):
435 if util.safehasattr(fp, 'fileno'):
436 return os.fdopen(os.dup(fp.fileno()), mode)
436 return os.fdopen(os.dup(fp.fileno()), mode)
437 else:
437 else:
438 # if this fp can't be duped properly, return
438 # if this fp can't be duped properly, return
439 # a dummy object that can be closed
439 # a dummy object that can be closed
440 class wrappedfileobj(object):
440 class wrappedfileobj(object):
441 noop = lambda x: None
441 noop = lambda x: None
442 def __init__(self, f):
442 def __init__(self, f):
443 self.f = f
443 self.f = f
444 def __getattr__(self, attr):
444 def __getattr__(self, attr):
445 if attr == 'close':
445 if attr == 'close':
446 return self.noop
446 return self.noop
447 else:
447 else:
448 return getattr(self.f, attr)
448 return getattr(self.f, attr)
449
449
450 return wrappedfileobj(fp)
450 return wrappedfileobj(fp)
451 if util.safehasattr(pat, 'write') and writable:
451 if util.safehasattr(pat, 'write') and writable:
452 return pat
452 return pat
453 if util.safehasattr(pat, 'read') and 'r' in mode:
453 if util.safehasattr(pat, 'read') and 'r' in mode:
454 return pat
454 return pat
455 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
455 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
456 if modemap is not None:
456 if modemap is not None:
457 mode = modemap.get(fn, mode)
457 mode = modemap.get(fn, mode)
458 if mode == 'wb':
458 if mode == 'wb':
459 modemap[fn] = 'ab'
459 modemap[fn] = 'ab'
460 return open(fn, mode)
460 return open(fn, mode)
461
461
462 def openrevlog(repo, cmd, file_, opts):
462 def openrevlog(repo, cmd, file_, opts):
463 """opens the changelog, manifest, a filelog or a given revlog"""
463 """opens the changelog, manifest, a filelog or a given revlog"""
464 cl = opts['changelog']
464 cl = opts['changelog']
465 mf = opts['manifest']
465 mf = opts['manifest']
466 dir = opts['dir']
466 dir = opts['dir']
467 msg = None
467 msg = None
468 if cl and mf:
468 if cl and mf:
469 msg = _('cannot specify --changelog and --manifest at the same time')
469 msg = _('cannot specify --changelog and --manifest at the same time')
470 elif cl and dir:
470 elif cl and dir:
471 msg = _('cannot specify --changelog and --dir at the same time')
471 msg = _('cannot specify --changelog and --dir at the same time')
472 elif cl or mf:
472 elif cl or mf:
473 if file_:
473 if file_:
474 msg = _('cannot specify filename with --changelog or --manifest')
474 msg = _('cannot specify filename with --changelog or --manifest')
475 elif not repo:
475 elif not repo:
476 msg = _('cannot specify --changelog or --manifest or --dir '
476 msg = _('cannot specify --changelog or --manifest or --dir '
477 'without a repository')
477 'without a repository')
478 if msg:
478 if msg:
479 raise util.Abort(msg)
479 raise util.Abort(msg)
480
480
481 r = None
481 r = None
482 if repo:
482 if repo:
483 if cl:
483 if cl:
484 r = repo.unfiltered().changelog
484 r = repo.unfiltered().changelog
485 elif dir:
485 elif dir:
486 if 'treemanifest' not in repo.requirements:
486 if 'treemanifest' not in repo.requirements:
487 raise util.Abort(_("--dir can only be used on repos with "
487 raise util.Abort(_("--dir can only be used on repos with "
488 "treemanifest enabled"))
488 "treemanifest enabled"))
489 dirlog = repo.dirlog(file_)
489 dirlog = repo.dirlog(file_)
490 if len(dirlog):
490 if len(dirlog):
491 r = dirlog
491 r = dirlog
492 elif mf:
492 elif mf:
493 r = repo.manifest
493 r = repo.manifest
494 elif file_:
494 elif file_:
495 filelog = repo.file(file_)
495 filelog = repo.file(file_)
496 if len(filelog):
496 if len(filelog):
497 r = filelog
497 r = filelog
498 if not r:
498 if not r:
499 if not file_:
499 if not file_:
500 raise error.CommandError(cmd, _('invalid arguments'))
500 raise error.CommandError(cmd, _('invalid arguments'))
501 if not os.path.isfile(file_):
501 if not os.path.isfile(file_):
502 raise util.Abort(_("revlog '%s' not found") % file_)
502 raise util.Abort(_("revlog '%s' not found") % file_)
503 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
503 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
504 file_[:-2] + ".i")
504 file_[:-2] + ".i")
505 return r
505 return r
506
506
507 def copy(ui, repo, pats, opts, rename=False):
507 def copy(ui, repo, pats, opts, rename=False):
508 # called with the repo lock held
508 # called with the repo lock held
509 #
509 #
510 # hgsep => pathname that uses "/" to separate directories
510 # hgsep => pathname that uses "/" to separate directories
511 # ossep => pathname that uses os.sep to separate directories
511 # ossep => pathname that uses os.sep to separate directories
512 cwd = repo.getcwd()
512 cwd = repo.getcwd()
513 targets = {}
513 targets = {}
514 after = opts.get("after")
514 after = opts.get("after")
515 dryrun = opts.get("dry_run")
515 dryrun = opts.get("dry_run")
516 wctx = repo[None]
516 wctx = repo[None]
517
517
518 def walkpat(pat):
518 def walkpat(pat):
519 srcs = []
519 srcs = []
520 if after:
520 if after:
521 badstates = '?'
521 badstates = '?'
522 else:
522 else:
523 badstates = '?r'
523 badstates = '?r'
524 m = scmutil.match(repo[None], [pat], opts, globbed=True)
524 m = scmutil.match(repo[None], [pat], opts, globbed=True)
525 for abs in repo.walk(m):
525 for abs in repo.walk(m):
526 state = repo.dirstate[abs]
526 state = repo.dirstate[abs]
527 rel = m.rel(abs)
527 rel = m.rel(abs)
528 exact = m.exact(abs)
528 exact = m.exact(abs)
529 if state in badstates:
529 if state in badstates:
530 if exact and state == '?':
530 if exact and state == '?':
531 ui.warn(_('%s: not copying - file is not managed\n') % rel)
531 ui.warn(_('%s: not copying - file is not managed\n') % rel)
532 if exact and state == 'r':
532 if exact and state == 'r':
533 ui.warn(_('%s: not copying - file has been marked for'
533 ui.warn(_('%s: not copying - file has been marked for'
534 ' remove\n') % rel)
534 ' remove\n') % rel)
535 continue
535 continue
536 # abs: hgsep
536 # abs: hgsep
537 # rel: ossep
537 # rel: ossep
538 srcs.append((abs, rel, exact))
538 srcs.append((abs, rel, exact))
539 return srcs
539 return srcs
540
540
541 # abssrc: hgsep
541 # abssrc: hgsep
542 # relsrc: ossep
542 # relsrc: ossep
543 # otarget: ossep
543 # otarget: ossep
544 def copyfile(abssrc, relsrc, otarget, exact):
544 def copyfile(abssrc, relsrc, otarget, exact):
545 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
545 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
546 if '/' in abstarget:
546 if '/' in abstarget:
547 # We cannot normalize abstarget itself, this would prevent
547 # We cannot normalize abstarget itself, this would prevent
548 # case only renames, like a => A.
548 # case only renames, like a => A.
549 abspath, absname = abstarget.rsplit('/', 1)
549 abspath, absname = abstarget.rsplit('/', 1)
550 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
550 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
551 reltarget = repo.pathto(abstarget, cwd)
551 reltarget = repo.pathto(abstarget, cwd)
552 target = repo.wjoin(abstarget)
552 target = repo.wjoin(abstarget)
553 src = repo.wjoin(abssrc)
553 src = repo.wjoin(abssrc)
554 state = repo.dirstate[abstarget]
554 state = repo.dirstate[abstarget]
555
555
556 scmutil.checkportable(ui, abstarget)
556 scmutil.checkportable(ui, abstarget)
557
557
558 # check for collisions
558 # check for collisions
559 prevsrc = targets.get(abstarget)
559 prevsrc = targets.get(abstarget)
560 if prevsrc is not None:
560 if prevsrc is not None:
561 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
561 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
562 (reltarget, repo.pathto(abssrc, cwd),
562 (reltarget, repo.pathto(abssrc, cwd),
563 repo.pathto(prevsrc, cwd)))
563 repo.pathto(prevsrc, cwd)))
564 return
564 return
565
565
566 # check for overwrites
566 # check for overwrites
567 exists = os.path.lexists(target)
567 exists = os.path.lexists(target)
568 samefile = False
568 samefile = False
569 if exists and abssrc != abstarget:
569 if exists and abssrc != abstarget:
570 if (repo.dirstate.normalize(abssrc) ==
570 if (repo.dirstate.normalize(abssrc) ==
571 repo.dirstate.normalize(abstarget)):
571 repo.dirstate.normalize(abstarget)):
572 if not rename:
572 if not rename:
573 ui.warn(_("%s: can't copy - same file\n") % reltarget)
573 ui.warn(_("%s: can't copy - same file\n") % reltarget)
574 return
574 return
575 exists = False
575 exists = False
576 samefile = True
576 samefile = True
577
577
578 if not after and exists or after and state in 'mn':
578 if not after and exists or after and state in 'mn':
579 if not opts['force']:
579 if not opts['force']:
580 ui.warn(_('%s: not overwriting - file exists\n') %
580 ui.warn(_('%s: not overwriting - file exists\n') %
581 reltarget)
581 reltarget)
582 return
582 return
583
583
584 if after:
584 if after:
585 if not exists:
585 if not exists:
586 if rename:
586 if rename:
587 ui.warn(_('%s: not recording move - %s does not exist\n') %
587 ui.warn(_('%s: not recording move - %s does not exist\n') %
588 (relsrc, reltarget))
588 (relsrc, reltarget))
589 else:
589 else:
590 ui.warn(_('%s: not recording copy - %s does not exist\n') %
590 ui.warn(_('%s: not recording copy - %s does not exist\n') %
591 (relsrc, reltarget))
591 (relsrc, reltarget))
592 return
592 return
593 elif not dryrun:
593 elif not dryrun:
594 try:
594 try:
595 if exists:
595 if exists:
596 os.unlink(target)
596 os.unlink(target)
597 targetdir = os.path.dirname(target) or '.'
597 targetdir = os.path.dirname(target) or '.'
598 if not os.path.isdir(targetdir):
598 if not os.path.isdir(targetdir):
599 os.makedirs(targetdir)
599 os.makedirs(targetdir)
600 if samefile:
600 if samefile:
601 tmp = target + "~hgrename"
601 tmp = target + "~hgrename"
602 os.rename(src, tmp)
602 os.rename(src, tmp)
603 os.rename(tmp, target)
603 os.rename(tmp, target)
604 else:
604 else:
605 util.copyfile(src, target)
605 util.copyfile(src, target)
606 srcexists = True
606 srcexists = True
607 except IOError, inst:
607 except IOError, inst:
608 if inst.errno == errno.ENOENT:
608 if inst.errno == errno.ENOENT:
609 ui.warn(_('%s: deleted in working directory\n') % relsrc)
609 ui.warn(_('%s: deleted in working directory\n') % relsrc)
610 srcexists = False
610 srcexists = False
611 else:
611 else:
612 ui.warn(_('%s: cannot copy - %s\n') %
612 ui.warn(_('%s: cannot copy - %s\n') %
613 (relsrc, inst.strerror))
613 (relsrc, inst.strerror))
614 return True # report a failure
614 return True # report a failure
615
615
616 if ui.verbose or not exact:
616 if ui.verbose or not exact:
617 if rename:
617 if rename:
618 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
618 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
619 else:
619 else:
620 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
620 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
621
621
622 targets[abstarget] = abssrc
622 targets[abstarget] = abssrc
623
623
624 # fix up dirstate
624 # fix up dirstate
625 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
625 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
626 dryrun=dryrun, cwd=cwd)
626 dryrun=dryrun, cwd=cwd)
627 if rename and not dryrun:
627 if rename and not dryrun:
628 if not after and srcexists and not samefile:
628 if not after and srcexists and not samefile:
629 util.unlinkpath(repo.wjoin(abssrc))
629 util.unlinkpath(repo.wjoin(abssrc))
630 wctx.forget([abssrc])
630 wctx.forget([abssrc])
631
631
632 # pat: ossep
632 # pat: ossep
633 # dest ossep
633 # dest ossep
634 # srcs: list of (hgsep, hgsep, ossep, bool)
634 # srcs: list of (hgsep, hgsep, ossep, bool)
635 # return: function that takes hgsep and returns ossep
635 # return: function that takes hgsep and returns ossep
636 def targetpathfn(pat, dest, srcs):
636 def targetpathfn(pat, dest, srcs):
637 if os.path.isdir(pat):
637 if os.path.isdir(pat):
638 abspfx = pathutil.canonpath(repo.root, cwd, pat)
638 abspfx = pathutil.canonpath(repo.root, cwd, pat)
639 abspfx = util.localpath(abspfx)
639 abspfx = util.localpath(abspfx)
640 if destdirexists:
640 if destdirexists:
641 striplen = len(os.path.split(abspfx)[0])
641 striplen = len(os.path.split(abspfx)[0])
642 else:
642 else:
643 striplen = len(abspfx)
643 striplen = len(abspfx)
644 if striplen:
644 if striplen:
645 striplen += len(os.sep)
645 striplen += len(os.sep)
646 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
646 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
647 elif destdirexists:
647 elif destdirexists:
648 res = lambda p: os.path.join(dest,
648 res = lambda p: os.path.join(dest,
649 os.path.basename(util.localpath(p)))
649 os.path.basename(util.localpath(p)))
650 else:
650 else:
651 res = lambda p: dest
651 res = lambda p: dest
652 return res
652 return res
653
653
654 # pat: ossep
654 # pat: ossep
655 # dest ossep
655 # dest ossep
656 # srcs: list of (hgsep, hgsep, ossep, bool)
656 # srcs: list of (hgsep, hgsep, ossep, bool)
657 # return: function that takes hgsep and returns ossep
657 # return: function that takes hgsep and returns ossep
658 def targetpathafterfn(pat, dest, srcs):
658 def targetpathafterfn(pat, dest, srcs):
659 if matchmod.patkind(pat):
659 if matchmod.patkind(pat):
660 # a mercurial pattern
660 # a mercurial pattern
661 res = lambda p: os.path.join(dest,
661 res = lambda p: os.path.join(dest,
662 os.path.basename(util.localpath(p)))
662 os.path.basename(util.localpath(p)))
663 else:
663 else:
664 abspfx = pathutil.canonpath(repo.root, cwd, pat)
664 abspfx = pathutil.canonpath(repo.root, cwd, pat)
665 if len(abspfx) < len(srcs[0][0]):
665 if len(abspfx) < len(srcs[0][0]):
666 # A directory. Either the target path contains the last
666 # A directory. Either the target path contains the last
667 # component of the source path or it does not.
667 # component of the source path or it does not.
668 def evalpath(striplen):
668 def evalpath(striplen):
669 score = 0
669 score = 0
670 for s in srcs:
670 for s in srcs:
671 t = os.path.join(dest, util.localpath(s[0])[striplen:])
671 t = os.path.join(dest, util.localpath(s[0])[striplen:])
672 if os.path.lexists(t):
672 if os.path.lexists(t):
673 score += 1
673 score += 1
674 return score
674 return score
675
675
676 abspfx = util.localpath(abspfx)
676 abspfx = util.localpath(abspfx)
677 striplen = len(abspfx)
677 striplen = len(abspfx)
678 if striplen:
678 if striplen:
679 striplen += len(os.sep)
679 striplen += len(os.sep)
680 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
680 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
681 score = evalpath(striplen)
681 score = evalpath(striplen)
682 striplen1 = len(os.path.split(abspfx)[0])
682 striplen1 = len(os.path.split(abspfx)[0])
683 if striplen1:
683 if striplen1:
684 striplen1 += len(os.sep)
684 striplen1 += len(os.sep)
685 if evalpath(striplen1) > score:
685 if evalpath(striplen1) > score:
686 striplen = striplen1
686 striplen = striplen1
687 res = lambda p: os.path.join(dest,
687 res = lambda p: os.path.join(dest,
688 util.localpath(p)[striplen:])
688 util.localpath(p)[striplen:])
689 else:
689 else:
690 # a file
690 # a file
691 if destdirexists:
691 if destdirexists:
692 res = lambda p: os.path.join(dest,
692 res = lambda p: os.path.join(dest,
693 os.path.basename(util.localpath(p)))
693 os.path.basename(util.localpath(p)))
694 else:
694 else:
695 res = lambda p: dest
695 res = lambda p: dest
696 return res
696 return res
697
697
698 pats = scmutil.expandpats(pats)
698 pats = scmutil.expandpats(pats)
699 if not pats:
699 if not pats:
700 raise util.Abort(_('no source or destination specified'))
700 raise util.Abort(_('no source or destination specified'))
701 if len(pats) == 1:
701 if len(pats) == 1:
702 raise util.Abort(_('no destination specified'))
702 raise util.Abort(_('no destination specified'))
703 dest = pats.pop()
703 dest = pats.pop()
704 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
704 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
705 if not destdirexists:
705 if not destdirexists:
706 if len(pats) > 1 or matchmod.patkind(pats[0]):
706 if len(pats) > 1 or matchmod.patkind(pats[0]):
707 raise util.Abort(_('with multiple sources, destination must be an '
707 raise util.Abort(_('with multiple sources, destination must be an '
708 'existing directory'))
708 'existing directory'))
709 if util.endswithsep(dest):
709 if util.endswithsep(dest):
710 raise util.Abort(_('destination %s is not a directory') % dest)
710 raise util.Abort(_('destination %s is not a directory') % dest)
711
711
712 tfn = targetpathfn
712 tfn = targetpathfn
713 if after:
713 if after:
714 tfn = targetpathafterfn
714 tfn = targetpathafterfn
715 copylist = []
715 copylist = []
716 for pat in pats:
716 for pat in pats:
717 srcs = walkpat(pat)
717 srcs = walkpat(pat)
718 if not srcs:
718 if not srcs:
719 continue
719 continue
720 copylist.append((tfn(pat, dest, srcs), srcs))
720 copylist.append((tfn(pat, dest, srcs), srcs))
721 if not copylist:
721 if not copylist:
722 raise util.Abort(_('no files to copy'))
722 raise util.Abort(_('no files to copy'))
723
723
724 errors = 0
724 errors = 0
725 for targetpath, srcs in copylist:
725 for targetpath, srcs in copylist:
726 for abssrc, relsrc, exact in srcs:
726 for abssrc, relsrc, exact in srcs:
727 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
727 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
728 errors += 1
728 errors += 1
729
729
730 if errors:
730 if errors:
731 ui.warn(_('(consider using --after)\n'))
731 ui.warn(_('(consider using --after)\n'))
732
732
733 return errors != 0
733 return errors != 0
734
734
735 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
735 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
736 runargs=None, appendpid=False):
736 runargs=None, appendpid=False):
737 '''Run a command as a service.'''
737 '''Run a command as a service.'''
738
738
739 def writepid(pid):
739 def writepid(pid):
740 if opts['pid_file']:
740 if opts['pid_file']:
741 if appendpid:
741 if appendpid:
742 mode = 'a'
742 mode = 'a'
743 else:
743 else:
744 mode = 'w'
744 mode = 'w'
745 fp = open(opts['pid_file'], mode)
745 fp = open(opts['pid_file'], mode)
746 fp.write(str(pid) + '\n')
746 fp.write(str(pid) + '\n')
747 fp.close()
747 fp.close()
748
748
749 if opts['daemon'] and not opts['daemon_pipefds']:
749 if opts['daemon'] and not opts['daemon_pipefds']:
750 # Signal child process startup with file removal
750 # Signal child process startup with file removal
751 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
751 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
752 os.close(lockfd)
752 os.close(lockfd)
753 try:
753 try:
754 if not runargs:
754 if not runargs:
755 runargs = util.hgcmd() + sys.argv[1:]
755 runargs = util.hgcmd() + sys.argv[1:]
756 runargs.append('--daemon-pipefds=%s' % lockpath)
756 runargs.append('--daemon-pipefds=%s' % lockpath)
757 # Don't pass --cwd to the child process, because we've already
757 # Don't pass --cwd to the child process, because we've already
758 # changed directory.
758 # changed directory.
759 for i in xrange(1, len(runargs)):
759 for i in xrange(1, len(runargs)):
760 if runargs[i].startswith('--cwd='):
760 if runargs[i].startswith('--cwd='):
761 del runargs[i]
761 del runargs[i]
762 break
762 break
763 elif runargs[i].startswith('--cwd'):
763 elif runargs[i].startswith('--cwd'):
764 del runargs[i:i + 2]
764 del runargs[i:i + 2]
765 break
765 break
766 def condfn():
766 def condfn():
767 return not os.path.exists(lockpath)
767 return not os.path.exists(lockpath)
768 pid = util.rundetached(runargs, condfn)
768 pid = util.rundetached(runargs, condfn)
769 if pid < 0:
769 if pid < 0:
770 raise util.Abort(_('child process failed to start'))
770 raise util.Abort(_('child process failed to start'))
771 writepid(pid)
771 writepid(pid)
772 finally:
772 finally:
773 try:
773 try:
774 os.unlink(lockpath)
774 os.unlink(lockpath)
775 except OSError, e:
775 except OSError, e:
776 if e.errno != errno.ENOENT:
776 if e.errno != errno.ENOENT:
777 raise
777 raise
778 if parentfn:
778 if parentfn:
779 return parentfn(pid)
779 return parentfn(pid)
780 else:
780 else:
781 return
781 return
782
782
783 if initfn:
783 if initfn:
784 initfn()
784 initfn()
785
785
786 if not opts['daemon']:
786 if not opts['daemon']:
787 writepid(os.getpid())
787 writepid(os.getpid())
788
788
789 if opts['daemon_pipefds']:
789 if opts['daemon_pipefds']:
790 lockpath = opts['daemon_pipefds']
790 lockpath = opts['daemon_pipefds']
791 try:
791 try:
792 os.setsid()
792 os.setsid()
793 except AttributeError:
793 except AttributeError:
794 pass
794 pass
795 os.unlink(lockpath)
795 os.unlink(lockpath)
796 util.hidewindow()
796 util.hidewindow()
797 sys.stdout.flush()
797 sys.stdout.flush()
798 sys.stderr.flush()
798 sys.stderr.flush()
799
799
800 nullfd = os.open(os.devnull, os.O_RDWR)
800 nullfd = os.open(os.devnull, os.O_RDWR)
801 logfilefd = nullfd
801 logfilefd = nullfd
802 if logfile:
802 if logfile:
803 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
803 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
804 os.dup2(nullfd, 0)
804 os.dup2(nullfd, 0)
805 os.dup2(logfilefd, 1)
805 os.dup2(logfilefd, 1)
806 os.dup2(logfilefd, 2)
806 os.dup2(logfilefd, 2)
807 if nullfd not in (0, 1, 2):
807 if nullfd not in (0, 1, 2):
808 os.close(nullfd)
808 os.close(nullfd)
809 if logfile and logfilefd not in (0, 1, 2):
809 if logfile and logfilefd not in (0, 1, 2):
810 os.close(logfilefd)
810 os.close(logfilefd)
811
811
812 if runfn:
812 if runfn:
813 return runfn()
813 return runfn()
814
814
815 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
815 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
816 """Utility function used by commands.import to import a single patch
816 """Utility function used by commands.import to import a single patch
817
817
818 This function is explicitly defined here to help the evolve extension to
818 This function is explicitly defined here to help the evolve extension to
819 wrap this part of the import logic.
819 wrap this part of the import logic.
820
820
821 The API is currently a bit ugly because it a simple code translation from
821 The API is currently a bit ugly because it a simple code translation from
822 the import command. Feel free to make it better.
822 the import command. Feel free to make it better.
823
823
824 :hunk: a patch (as a binary string)
824 :hunk: a patch (as a binary string)
825 :parents: nodes that will be parent of the created commit
825 :parents: nodes that will be parent of the created commit
826 :opts: the full dict of option passed to the import command
826 :opts: the full dict of option passed to the import command
827 :msgs: list to save commit message to.
827 :msgs: list to save commit message to.
828 (used in case we need to save it when failing)
828 (used in case we need to save it when failing)
829 :updatefunc: a function that update a repo to a given node
829 :updatefunc: a function that update a repo to a given node
830 updatefunc(<repo>, <node>)
830 updatefunc(<repo>, <node>)
831 """
831 """
832 tmpname, message, user, date, branch, nodeid, p1, p2 = \
832 tmpname, message, user, date, branch, nodeid, p1, p2 = \
833 patch.extract(ui, hunk)
833 patch.extract(ui, hunk)
834
834
835 update = not opts.get('bypass')
835 update = not opts.get('bypass')
836 strip = opts["strip"]
836 strip = opts["strip"]
837 prefix = opts["prefix"]
837 prefix = opts["prefix"]
838 sim = float(opts.get('similarity') or 0)
838 sim = float(opts.get('similarity') or 0)
839 if not tmpname:
839 if not tmpname:
840 return (None, None, False)
840 return (None, None, False)
841 msg = _('applied to working directory')
841 msg = _('applied to working directory')
842
842
843 rejects = False
843 rejects = False
844 dsguard = None
844 dsguard = None
845
845
846 try:
846 try:
847 cmdline_message = logmessage(ui, opts)
847 cmdline_message = logmessage(ui, opts)
848 if cmdline_message:
848 if cmdline_message:
849 # pickup the cmdline msg
849 # pickup the cmdline msg
850 message = cmdline_message
850 message = cmdline_message
851 elif message:
851 elif message:
852 # pickup the patch msg
852 # pickup the patch msg
853 message = message.strip()
853 message = message.strip()
854 else:
854 else:
855 # launch the editor
855 # launch the editor
856 message = None
856 message = None
857 ui.debug('message:\n%s\n' % message)
857 ui.debug('message:\n%s\n' % message)
858
858
859 if len(parents) == 1:
859 if len(parents) == 1:
860 parents.append(repo[nullid])
860 parents.append(repo[nullid])
861 if opts.get('exact'):
861 if opts.get('exact'):
862 if not nodeid or not p1:
862 if not nodeid or not p1:
863 raise util.Abort(_('not a Mercurial patch'))
863 raise util.Abort(_('not a Mercurial patch'))
864 p1 = repo[p1]
864 p1 = repo[p1]
865 p2 = repo[p2 or nullid]
865 p2 = repo[p2 or nullid]
866 elif p2:
866 elif p2:
867 try:
867 try:
868 p1 = repo[p1]
868 p1 = repo[p1]
869 p2 = repo[p2]
869 p2 = repo[p2]
870 # Without any options, consider p2 only if the
870 # Without any options, consider p2 only if the
871 # patch is being applied on top of the recorded
871 # patch is being applied on top of the recorded
872 # first parent.
872 # first parent.
873 if p1 != parents[0]:
873 if p1 != parents[0]:
874 p1 = parents[0]
874 p1 = parents[0]
875 p2 = repo[nullid]
875 p2 = repo[nullid]
876 except error.RepoError:
876 except error.RepoError:
877 p1, p2 = parents
877 p1, p2 = parents
878 if p2.node() == nullid:
878 if p2.node() == nullid:
879 ui.warn(_("warning: import the patch as a normal revision\n"
879 ui.warn(_("warning: import the patch as a normal revision\n"
880 "(use --exact to import the patch as a merge)\n"))
880 "(use --exact to import the patch as a merge)\n"))
881 else:
881 else:
882 p1, p2 = parents
882 p1, p2 = parents
883
883
884 n = None
884 n = None
885 if update:
885 if update:
886 dsguard = dirstateguard(repo, 'tryimportone')
886 dsguard = dirstateguard(repo, 'tryimportone')
887 if p1 != parents[0]:
887 if p1 != parents[0]:
888 updatefunc(repo, p1.node())
888 updatefunc(repo, p1.node())
889 if p2 != parents[1]:
889 if p2 != parents[1]:
890 repo.setparents(p1.node(), p2.node())
890 repo.setparents(p1.node(), p2.node())
891
891
892 if opts.get('exact') or opts.get('import_branch'):
892 if opts.get('exact') or opts.get('import_branch'):
893 repo.dirstate.setbranch(branch or 'default')
893 repo.dirstate.setbranch(branch or 'default')
894
894
895 partial = opts.get('partial', False)
895 partial = opts.get('partial', False)
896 files = set()
896 files = set()
897 try:
897 try:
898 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
898 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
899 files=files, eolmode=None, similarity=sim / 100.0)
899 files=files, eolmode=None, similarity=sim / 100.0)
900 except patch.PatchError, e:
900 except patch.PatchError, e:
901 if not partial:
901 if not partial:
902 raise util.Abort(str(e))
902 raise util.Abort(str(e))
903 if partial:
903 if partial:
904 rejects = True
904 rejects = True
905
905
906 files = list(files)
906 files = list(files)
907 if opts.get('no_commit'):
907 if opts.get('no_commit'):
908 if message:
908 if message:
909 msgs.append(message)
909 msgs.append(message)
910 else:
910 else:
911 if opts.get('exact') or p2:
911 if opts.get('exact') or p2:
912 # If you got here, you either use --force and know what
912 # If you got here, you either use --force and know what
913 # you are doing or used --exact or a merge patch while
913 # you are doing or used --exact or a merge patch while
914 # being updated to its first parent.
914 # being updated to its first parent.
915 m = None
915 m = None
916 else:
916 else:
917 m = scmutil.matchfiles(repo, files or [])
917 m = scmutil.matchfiles(repo, files or [])
918 editform = mergeeditform(repo[None], 'import.normal')
918 editform = mergeeditform(repo[None], 'import.normal')
919 if opts.get('exact'):
919 if opts.get('exact'):
920 editor = None
920 editor = None
921 else:
921 else:
922 editor = getcommiteditor(editform=editform, **opts)
922 editor = getcommiteditor(editform=editform, **opts)
923 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
923 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
924 try:
924 try:
925 if partial:
925 if partial:
926 repo.ui.setconfig('ui', 'allowemptycommit', True)
926 repo.ui.setconfig('ui', 'allowemptycommit', True)
927 n = repo.commit(message, opts.get('user') or user,
927 n = repo.commit(message, opts.get('user') or user,
928 opts.get('date') or date, match=m,
928 opts.get('date') or date, match=m,
929 editor=editor)
929 editor=editor)
930 finally:
930 finally:
931 repo.ui.restoreconfig(allowemptyback)
931 repo.ui.restoreconfig(allowemptyback)
932 dsguard.close()
932 dsguard.close()
933 else:
933 else:
934 if opts.get('exact') or opts.get('import_branch'):
934 if opts.get('exact') or opts.get('import_branch'):
935 branch = branch or 'default'
935 branch = branch or 'default'
936 else:
936 else:
937 branch = p1.branch()
937 branch = p1.branch()
938 store = patch.filestore()
938 store = patch.filestore()
939 try:
939 try:
940 files = set()
940 files = set()
941 try:
941 try:
942 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
942 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
943 files, eolmode=None)
943 files, eolmode=None)
944 except patch.PatchError, e:
944 except patch.PatchError, e:
945 raise util.Abort(str(e))
945 raise util.Abort(str(e))
946 if opts.get('exact'):
946 if opts.get('exact'):
947 editor = None
947 editor = None
948 else:
948 else:
949 editor = getcommiteditor(editform='import.bypass')
949 editor = getcommiteditor(editform='import.bypass')
950 memctx = context.makememctx(repo, (p1.node(), p2.node()),
950 memctx = context.makememctx(repo, (p1.node(), p2.node()),
951 message,
951 message,
952 opts.get('user') or user,
952 opts.get('user') or user,
953 opts.get('date') or date,
953 opts.get('date') or date,
954 branch, files, store,
954 branch, files, store,
955 editor=editor)
955 editor=editor)
956 n = memctx.commit()
956 n = memctx.commit()
957 finally:
957 finally:
958 store.close()
958 store.close()
959 if opts.get('exact') and opts.get('no_commit'):
959 if opts.get('exact') and opts.get('no_commit'):
960 # --exact with --no-commit is still useful in that it does merge
960 # --exact with --no-commit is still useful in that it does merge
961 # and branch bits
961 # and branch bits
962 ui.warn(_("warning: can't check exact import with --no-commit\n"))
962 ui.warn(_("warning: can't check exact import with --no-commit\n"))
963 elif opts.get('exact') and hex(n) != nodeid:
963 elif opts.get('exact') and hex(n) != nodeid:
964 raise util.Abort(_('patch is damaged or loses information'))
964 raise util.Abort(_('patch is damaged or loses information'))
965 if n:
965 if n:
966 # i18n: refers to a short changeset id
966 # i18n: refers to a short changeset id
967 msg = _('created %s') % short(n)
967 msg = _('created %s') % short(n)
968 return (msg, n, rejects)
968 return (msg, n, rejects)
969 finally:
969 finally:
970 lockmod.release(dsguard)
970 lockmod.release(dsguard)
971 os.unlink(tmpname)
971 os.unlink(tmpname)
972
972
973 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
973 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
974 opts=None):
974 opts=None):
975 '''export changesets as hg patches.'''
975 '''export changesets as hg patches.'''
976
976
977 total = len(revs)
977 total = len(revs)
978 revwidth = max([len(str(rev)) for rev in revs])
978 revwidth = max([len(str(rev)) for rev in revs])
979 filemode = {}
979 filemode = {}
980
980
981 def single(rev, seqno, fp):
981 def single(rev, seqno, fp):
982 ctx = repo[rev]
982 ctx = repo[rev]
983 node = ctx.node()
983 node = ctx.node()
984 parents = [p.node() for p in ctx.parents() if p]
984 parents = [p.node() for p in ctx.parents() if p]
985 branch = ctx.branch()
985 branch = ctx.branch()
986 if switch_parent:
986 if switch_parent:
987 parents.reverse()
987 parents.reverse()
988
988
989 if parents:
989 if parents:
990 prev = parents[0]
990 prev = parents[0]
991 else:
991 else:
992 prev = nullid
992 prev = nullid
993
993
994 shouldclose = False
994 shouldclose = False
995 if not fp and len(template) > 0:
995 if not fp and len(template) > 0:
996 desc_lines = ctx.description().rstrip().split('\n')
996 desc_lines = ctx.description().rstrip().split('\n')
997 desc = desc_lines[0] #Commit always has a first line.
997 desc = desc_lines[0] #Commit always has a first line.
998 fp = makefileobj(repo, template, node, desc=desc, total=total,
998 fp = makefileobj(repo, template, node, desc=desc, total=total,
999 seqno=seqno, revwidth=revwidth, mode='wb',
999 seqno=seqno, revwidth=revwidth, mode='wb',
1000 modemap=filemode)
1000 modemap=filemode)
1001 if fp != template:
1001 if fp != template:
1002 shouldclose = True
1002 shouldclose = True
1003 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1003 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1004 repo.ui.note("%s\n" % fp.name)
1004 repo.ui.note("%s\n" % fp.name)
1005
1005
1006 if not fp:
1006 if not fp:
1007 write = repo.ui.write
1007 write = repo.ui.write
1008 else:
1008 else:
1009 def write(s, **kw):
1009 def write(s, **kw):
1010 fp.write(s)
1010 fp.write(s)
1011
1011
1012 write("# HG changeset patch\n")
1012 write("# HG changeset patch\n")
1013 write("# User %s\n" % ctx.user())
1013 write("# User %s\n" % ctx.user())
1014 write("# Date %d %d\n" % ctx.date())
1014 write("# Date %d %d\n" % ctx.date())
1015 write("# %s\n" % util.datestr(ctx.date()))
1015 write("# %s\n" % util.datestr(ctx.date()))
1016 if branch and branch != 'default':
1016 if branch and branch != 'default':
1017 write("# Branch %s\n" % branch)
1017 write("# Branch %s\n" % branch)
1018 write("# Node ID %s\n" % hex(node))
1018 write("# Node ID %s\n" % hex(node))
1019 write("# Parent %s\n" % hex(prev))
1019 write("# Parent %s\n" % hex(prev))
1020 if len(parents) > 1:
1020 if len(parents) > 1:
1021 write("# Parent %s\n" % hex(parents[1]))
1021 write("# Parent %s\n" % hex(parents[1]))
1022 write(ctx.description().rstrip())
1022 write(ctx.description().rstrip())
1023 write("\n\n")
1023 write("\n\n")
1024
1024
1025 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
1025 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
1026 write(chunk, label=label)
1026 write(chunk, label=label)
1027
1027
1028 if shouldclose:
1028 if shouldclose:
1029 fp.close()
1029 fp.close()
1030
1030
1031 for seqno, rev in enumerate(revs):
1031 for seqno, rev in enumerate(revs):
1032 single(rev, seqno + 1, fp)
1032 single(rev, seqno + 1, fp)
1033
1033
1034 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1034 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1035 changes=None, stat=False, fp=None, prefix='',
1035 changes=None, stat=False, fp=None, prefix='',
1036 root='', listsubrepos=False):
1036 root='', listsubrepos=False):
1037 '''show diff or diffstat.'''
1037 '''show diff or diffstat.'''
1038 if fp is None:
1038 if fp is None:
1039 write = ui.write
1039 write = ui.write
1040 else:
1040 else:
1041 def write(s, **kw):
1041 def write(s, **kw):
1042 fp.write(s)
1042 fp.write(s)
1043
1043
1044 if root:
1044 if root:
1045 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1045 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1046 else:
1046 else:
1047 relroot = ''
1047 relroot = ''
1048 if relroot != '':
1048 if relroot != '':
1049 # XXX relative roots currently don't work if the root is within a
1049 # XXX relative roots currently don't work if the root is within a
1050 # subrepo
1050 # subrepo
1051 uirelroot = match.uipath(relroot)
1051 uirelroot = match.uipath(relroot)
1052 relroot += '/'
1052 relroot += '/'
1053 for matchroot in match.files():
1053 for matchroot in match.files():
1054 if not matchroot.startswith(relroot):
1054 if not matchroot.startswith(relroot):
1055 ui.warn(_('warning: %s not inside relative root %s\n') % (
1055 ui.warn(_('warning: %s not inside relative root %s\n') % (
1056 match.uipath(matchroot), uirelroot))
1056 match.uipath(matchroot), uirelroot))
1057
1057
1058 if stat:
1058 if stat:
1059 diffopts = diffopts.copy(context=0)
1059 diffopts = diffopts.copy(context=0)
1060 width = 80
1060 width = 80
1061 if not ui.plain():
1061 if not ui.plain():
1062 width = ui.termwidth()
1062 width = ui.termwidth()
1063 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1063 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1064 prefix=prefix, relroot=relroot)
1064 prefix=prefix, relroot=relroot)
1065 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1065 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1066 width=width,
1066 width=width,
1067 git=diffopts.git):
1067 git=diffopts.git):
1068 write(chunk, label=label)
1068 write(chunk, label=label)
1069 else:
1069 else:
1070 for chunk, label in patch.diffui(repo, node1, node2, match,
1070 for chunk, label in patch.diffui(repo, node1, node2, match,
1071 changes, diffopts, prefix=prefix,
1071 changes, diffopts, prefix=prefix,
1072 relroot=relroot):
1072 relroot=relroot):
1073 write(chunk, label=label)
1073 write(chunk, label=label)
1074
1074
1075 if listsubrepos:
1075 if listsubrepos:
1076 ctx1 = repo[node1]
1076 ctx1 = repo[node1]
1077 ctx2 = repo[node2]
1077 ctx2 = repo[node2]
1078 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1078 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1079 tempnode2 = node2
1079 tempnode2 = node2
1080 try:
1080 try:
1081 if node2 is not None:
1081 if node2 is not None:
1082 tempnode2 = ctx2.substate[subpath][1]
1082 tempnode2 = ctx2.substate[subpath][1]
1083 except KeyError:
1083 except KeyError:
1084 # A subrepo that existed in node1 was deleted between node1 and
1084 # A subrepo that existed in node1 was deleted between node1 and
1085 # node2 (inclusive). Thus, ctx2's substate won't contain that
1085 # node2 (inclusive). Thus, ctx2's substate won't contain that
1086 # subpath. The best we can do is to ignore it.
1086 # subpath. The best we can do is to ignore it.
1087 tempnode2 = None
1087 tempnode2 = None
1088 submatch = matchmod.narrowmatcher(subpath, match)
1088 submatch = matchmod.narrowmatcher(subpath, match)
1089 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1089 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1090 stat=stat, fp=fp, prefix=prefix)
1090 stat=stat, fp=fp, prefix=prefix)
1091
1091
1092 class changeset_printer(object):
1092 class changeset_printer(object):
1093 '''show changeset information when templating not requested.'''
1093 '''show changeset information when templating not requested.'''
1094
1094
1095 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1095 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1096 self.ui = ui
1096 self.ui = ui
1097 self.repo = repo
1097 self.repo = repo
1098 self.buffered = buffered
1098 self.buffered = buffered
1099 self.matchfn = matchfn
1099 self.matchfn = matchfn
1100 self.diffopts = diffopts
1100 self.diffopts = diffopts
1101 self.header = {}
1101 self.header = {}
1102 self.hunk = {}
1102 self.hunk = {}
1103 self.lastheader = None
1103 self.lastheader = None
1104 self.footer = None
1104 self.footer = None
1105
1105
1106 def flush(self, rev):
1106 def flush(self, rev):
1107 if rev in self.header:
1107 if rev in self.header:
1108 h = self.header[rev]
1108 h = self.header[rev]
1109 if h != self.lastheader:
1109 if h != self.lastheader:
1110 self.lastheader = h
1110 self.lastheader = h
1111 self.ui.write(h)
1111 self.ui.write(h)
1112 del self.header[rev]
1112 del self.header[rev]
1113 if rev in self.hunk:
1113 if rev in self.hunk:
1114 self.ui.write(self.hunk[rev])
1114 self.ui.write(self.hunk[rev])
1115 del self.hunk[rev]
1115 del self.hunk[rev]
1116 return 1
1116 return 1
1117 return 0
1117 return 0
1118
1118
1119 def close(self):
1119 def close(self):
1120 if self.footer:
1120 if self.footer:
1121 self.ui.write(self.footer)
1121 self.ui.write(self.footer)
1122
1122
1123 def show(self, ctx, copies=None, matchfn=None, **props):
1123 def show(self, ctx, copies=None, matchfn=None, **props):
1124 if self.buffered:
1124 if self.buffered:
1125 self.ui.pushbuffer()
1125 self.ui.pushbuffer()
1126 self._show(ctx, copies, matchfn, props)
1126 self._show(ctx, copies, matchfn, props)
1127 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1127 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1128 else:
1128 else:
1129 self._show(ctx, copies, matchfn, props)
1129 self._show(ctx, copies, matchfn, props)
1130
1130
1131 def _show(self, ctx, copies, matchfn, props):
1131 def _show(self, ctx, copies, matchfn, props):
1132 '''show a single changeset or file revision'''
1132 '''show a single changeset or file revision'''
1133 changenode = ctx.node()
1133 changenode = ctx.node()
1134 rev = ctx.rev()
1134 rev = ctx.rev()
1135 if self.ui.debugflag:
1135 if self.ui.debugflag:
1136 hexfunc = hex
1136 hexfunc = hex
1137 else:
1137 else:
1138 hexfunc = short
1138 hexfunc = short
1139 if rev is None:
1139 if rev is None:
1140 pctx = ctx.p1()
1140 pctx = ctx.p1()
1141 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1141 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1142 else:
1142 else:
1143 revnode = (rev, hexfunc(changenode))
1143 revnode = (rev, hexfunc(changenode))
1144
1144
1145 if self.ui.quiet:
1145 if self.ui.quiet:
1146 self.ui.write("%d:%s\n" % revnode, label='log.node')
1146 self.ui.write("%d:%s\n" % revnode, label='log.node')
1147 return
1147 return
1148
1148
1149 date = util.datestr(ctx.date())
1149 date = util.datestr(ctx.date())
1150
1150
1151 # i18n: column positioning for "hg log"
1151 # i18n: column positioning for "hg log"
1152 self.ui.write(_("changeset: %d:%s\n") % revnode,
1152 self.ui.write(_("changeset: %d:%s\n") % revnode,
1153 label='log.changeset changeset.%s' % ctx.phasestr())
1153 label='log.changeset changeset.%s' % ctx.phasestr())
1154
1154
1155 # branches are shown first before any other names due to backwards
1155 # branches are shown first before any other names due to backwards
1156 # compatibility
1156 # compatibility
1157 branch = ctx.branch()
1157 branch = ctx.branch()
1158 # don't show the default branch name
1158 # don't show the default branch name
1159 if branch != 'default':
1159 if branch != 'default':
1160 # i18n: column positioning for "hg log"
1160 # i18n: column positioning for "hg log"
1161 self.ui.write(_("branch: %s\n") % branch,
1161 self.ui.write(_("branch: %s\n") % branch,
1162 label='log.branch')
1162 label='log.branch')
1163
1163
1164 for name, ns in self.repo.names.iteritems():
1164 for name, ns in self.repo.names.iteritems():
1165 # branches has special logic already handled above, so here we just
1165 # branches has special logic already handled above, so here we just
1166 # skip it
1166 # skip it
1167 if name == 'branches':
1167 if name == 'branches':
1168 continue
1168 continue
1169 # we will use the templatename as the color name since those two
1169 # we will use the templatename as the color name since those two
1170 # should be the same
1170 # should be the same
1171 for name in ns.names(self.repo, changenode):
1171 for name in ns.names(self.repo, changenode):
1172 self.ui.write(ns.logfmt % name,
1172 self.ui.write(ns.logfmt % name,
1173 label='log.%s' % ns.colorname)
1173 label='log.%s' % ns.colorname)
1174 if self.ui.debugflag:
1174 if self.ui.debugflag:
1175 # i18n: column positioning for "hg log"
1175 # i18n: column positioning for "hg log"
1176 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1176 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1177 label='log.phase')
1177 label='log.phase')
1178 for pctx in self._meaningful_parentrevs(ctx):
1178 for pctx in self._meaningful_parentrevs(ctx):
1179 label = 'log.parent changeset.%s' % pctx.phasestr()
1179 label = 'log.parent changeset.%s' % pctx.phasestr()
1180 # i18n: column positioning for "hg log"
1180 # i18n: column positioning for "hg log"
1181 self.ui.write(_("parent: %d:%s\n")
1181 self.ui.write(_("parent: %d:%s\n")
1182 % (pctx.rev(), hexfunc(pctx.node())),
1182 % (pctx.rev(), hexfunc(pctx.node())),
1183 label=label)
1183 label=label)
1184
1184
1185 if self.ui.debugflag and rev is not None:
1185 if self.ui.debugflag and rev is not None:
1186 mnode = ctx.manifestnode()
1186 mnode = ctx.manifestnode()
1187 # i18n: column positioning for "hg log"
1187 # i18n: column positioning for "hg log"
1188 self.ui.write(_("manifest: %d:%s\n") %
1188 self.ui.write(_("manifest: %d:%s\n") %
1189 (self.repo.manifest.rev(mnode), hex(mnode)),
1189 (self.repo.manifest.rev(mnode), hex(mnode)),
1190 label='ui.debug log.manifest')
1190 label='ui.debug log.manifest')
1191 # i18n: column positioning for "hg log"
1191 # i18n: column positioning for "hg log"
1192 self.ui.write(_("user: %s\n") % ctx.user(),
1192 self.ui.write(_("user: %s\n") % ctx.user(),
1193 label='log.user')
1193 label='log.user')
1194 # i18n: column positioning for "hg log"
1194 # i18n: column positioning for "hg log"
1195 self.ui.write(_("date: %s\n") % date,
1195 self.ui.write(_("date: %s\n") % date,
1196 label='log.date')
1196 label='log.date')
1197
1197
1198 if self.ui.debugflag:
1198 if self.ui.debugflag:
1199 files = ctx.p1().status(ctx)[:3]
1199 files = ctx.p1().status(ctx)[:3]
1200 for key, value in zip([# i18n: column positioning for "hg log"
1200 for key, value in zip([# i18n: column positioning for "hg log"
1201 _("files:"),
1201 _("files:"),
1202 # i18n: column positioning for "hg log"
1202 # i18n: column positioning for "hg log"
1203 _("files+:"),
1203 _("files+:"),
1204 # i18n: column positioning for "hg log"
1204 # i18n: column positioning for "hg log"
1205 _("files-:")], files):
1205 _("files-:")], files):
1206 if value:
1206 if value:
1207 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1207 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1208 label='ui.debug log.files')
1208 label='ui.debug log.files')
1209 elif ctx.files() and self.ui.verbose:
1209 elif ctx.files() and self.ui.verbose:
1210 # i18n: column positioning for "hg log"
1210 # i18n: column positioning for "hg log"
1211 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1211 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1212 label='ui.note log.files')
1212 label='ui.note log.files')
1213 if copies and self.ui.verbose:
1213 if copies and self.ui.verbose:
1214 copies = ['%s (%s)' % c for c in copies]
1214 copies = ['%s (%s)' % c for c in copies]
1215 # i18n: column positioning for "hg log"
1215 # i18n: column positioning for "hg log"
1216 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1216 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1217 label='ui.note log.copies')
1217 label='ui.note log.copies')
1218
1218
1219 extra = ctx.extra()
1219 extra = ctx.extra()
1220 if extra and self.ui.debugflag:
1220 if extra and self.ui.debugflag:
1221 for key, value in sorted(extra.items()):
1221 for key, value in sorted(extra.items()):
1222 # i18n: column positioning for "hg log"
1222 # i18n: column positioning for "hg log"
1223 self.ui.write(_("extra: %s=%s\n")
1223 self.ui.write(_("extra: %s=%s\n")
1224 % (key, value.encode('string_escape')),
1224 % (key, value.encode('string_escape')),
1225 label='ui.debug log.extra')
1225 label='ui.debug log.extra')
1226
1226
1227 description = ctx.description().strip()
1227 description = ctx.description().strip()
1228 if description:
1228 if description:
1229 if self.ui.verbose:
1229 if self.ui.verbose:
1230 self.ui.write(_("description:\n"),
1230 self.ui.write(_("description:\n"),
1231 label='ui.note log.description')
1231 label='ui.note log.description')
1232 self.ui.write(description,
1232 self.ui.write(description,
1233 label='ui.note log.description')
1233 label='ui.note log.description')
1234 self.ui.write("\n\n")
1234 self.ui.write("\n\n")
1235 else:
1235 else:
1236 # i18n: column positioning for "hg log"
1236 # i18n: column positioning for "hg log"
1237 self.ui.write(_("summary: %s\n") %
1237 self.ui.write(_("summary: %s\n") %
1238 description.splitlines()[0],
1238 description.splitlines()[0],
1239 label='log.summary')
1239 label='log.summary')
1240 self.ui.write("\n")
1240 self.ui.write("\n")
1241
1241
1242 self.showpatch(changenode, matchfn)
1242 self.showpatch(changenode, matchfn)
1243
1243
1244 def showpatch(self, node, matchfn):
1244 def showpatch(self, node, matchfn):
1245 if not matchfn:
1245 if not matchfn:
1246 matchfn = self.matchfn
1246 matchfn = self.matchfn
1247 if matchfn:
1247 if matchfn:
1248 stat = self.diffopts.get('stat')
1248 stat = self.diffopts.get('stat')
1249 diff = self.diffopts.get('patch')
1249 diff = self.diffopts.get('patch')
1250 diffopts = patch.diffallopts(self.ui, self.diffopts)
1250 diffopts = patch.diffallopts(self.ui, self.diffopts)
1251 prev = self.repo.changelog.parents(node)[0]
1251 prev = self.repo.changelog.parents(node)[0]
1252 if stat:
1252 if stat:
1253 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1253 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1254 match=matchfn, stat=True)
1254 match=matchfn, stat=True)
1255 if diff:
1255 if diff:
1256 if stat:
1256 if stat:
1257 self.ui.write("\n")
1257 self.ui.write("\n")
1258 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1258 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1259 match=matchfn, stat=False)
1259 match=matchfn, stat=False)
1260 self.ui.write("\n")
1260 self.ui.write("\n")
1261
1261
1262 def _meaningful_parentrevs(self, ctx):
1262 def _meaningful_parentrevs(self, ctx):
1263 """Return list of meaningful (or all if debug) parentrevs for rev.
1263 """Return list of meaningful (or all if debug) parentrevs for rev.
1264
1264
1265 For merges (two non-nullrev revisions) both parents are meaningful.
1265 For merges (two non-nullrev revisions) both parents are meaningful.
1266 Otherwise the first parent revision is considered meaningful if it
1266 Otherwise the first parent revision is considered meaningful if it
1267 is not the preceding revision.
1267 is not the preceding revision.
1268 """
1268 """
1269 parents = ctx.parents()
1269 parents = ctx.parents()
1270 if len(parents) > 1:
1270 if len(parents) > 1:
1271 return parents
1271 return parents
1272 if self.ui.debugflag:
1272 if self.ui.debugflag:
1273 return [parents[0], self.repo['null']]
1273 return [parents[0], self.repo['null']]
1274 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1274 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1275 return []
1275 return []
1276 return parents
1276 return parents
1277
1277
1278 class jsonchangeset(changeset_printer):
1278 class jsonchangeset(changeset_printer):
1279 '''format changeset information.'''
1279 '''format changeset information.'''
1280
1280
1281 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1281 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1282 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1282 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1283 self.cache = {}
1283 self.cache = {}
1284 self._first = True
1284 self._first = True
1285
1285
1286 def close(self):
1286 def close(self):
1287 if not self._first:
1287 if not self._first:
1288 self.ui.write("\n]\n")
1288 self.ui.write("\n]\n")
1289 else:
1289 else:
1290 self.ui.write("[]\n")
1290 self.ui.write("[]\n")
1291
1291
1292 def _show(self, ctx, copies, matchfn, props):
1292 def _show(self, ctx, copies, matchfn, props):
1293 '''show a single changeset or file revision'''
1293 '''show a single changeset or file revision'''
1294 rev = ctx.rev()
1294 rev = ctx.rev()
1295 if rev is None:
1295 if rev is None:
1296 jrev = jnode = 'null'
1296 jrev = jnode = 'null'
1297 else:
1297 else:
1298 jrev = str(rev)
1298 jrev = str(rev)
1299 jnode = '"%s"' % hex(ctx.node())
1299 jnode = '"%s"' % hex(ctx.node())
1300 j = encoding.jsonescape
1300 j = encoding.jsonescape
1301
1301
1302 if self._first:
1302 if self._first:
1303 self.ui.write("[\n {")
1303 self.ui.write("[\n {")
1304 self._first = False
1304 self._first = False
1305 else:
1305 else:
1306 self.ui.write(",\n {")
1306 self.ui.write(",\n {")
1307
1307
1308 if self.ui.quiet:
1308 if self.ui.quiet:
1309 self.ui.write('\n "rev": %s' % jrev)
1309 self.ui.write('\n "rev": %s' % jrev)
1310 self.ui.write(',\n "node": %s' % jnode)
1310 self.ui.write(',\n "node": %s' % jnode)
1311 self.ui.write('\n }')
1311 self.ui.write('\n }')
1312 return
1312 return
1313
1313
1314 self.ui.write('\n "rev": %s' % jrev)
1314 self.ui.write('\n "rev": %s' % jrev)
1315 self.ui.write(',\n "node": %s' % jnode)
1315 self.ui.write(',\n "node": %s' % jnode)
1316 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1316 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1317 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1317 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1318 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1318 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1319 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1319 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1320 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1320 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1321
1321
1322 self.ui.write(',\n "bookmarks": [%s]' %
1322 self.ui.write(',\n "bookmarks": [%s]' %
1323 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1323 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1324 self.ui.write(',\n "tags": [%s]' %
1324 self.ui.write(',\n "tags": [%s]' %
1325 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1325 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1326 self.ui.write(',\n "parents": [%s]' %
1326 self.ui.write(',\n "parents": [%s]' %
1327 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1327 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1328
1328
1329 if self.ui.debugflag:
1329 if self.ui.debugflag:
1330 if rev is None:
1330 if rev is None:
1331 jmanifestnode = 'null'
1331 jmanifestnode = 'null'
1332 else:
1332 else:
1333 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1333 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1334 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1334 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1335
1335
1336 self.ui.write(',\n "extra": {%s}' %
1336 self.ui.write(',\n "extra": {%s}' %
1337 ", ".join('"%s": "%s"' % (j(k), j(v))
1337 ", ".join('"%s": "%s"' % (j(k), j(v))
1338 for k, v in ctx.extra().items()))
1338 for k, v in ctx.extra().items()))
1339
1339
1340 files = ctx.p1().status(ctx)
1340 files = ctx.p1().status(ctx)
1341 self.ui.write(',\n "modified": [%s]' %
1341 self.ui.write(',\n "modified": [%s]' %
1342 ", ".join('"%s"' % j(f) for f in files[0]))
1342 ", ".join('"%s"' % j(f) for f in files[0]))
1343 self.ui.write(',\n "added": [%s]' %
1343 self.ui.write(',\n "added": [%s]' %
1344 ", ".join('"%s"' % j(f) for f in files[1]))
1344 ", ".join('"%s"' % j(f) for f in files[1]))
1345 self.ui.write(',\n "removed": [%s]' %
1345 self.ui.write(',\n "removed": [%s]' %
1346 ", ".join('"%s"' % j(f) for f in files[2]))
1346 ", ".join('"%s"' % j(f) for f in files[2]))
1347
1347
1348 elif self.ui.verbose:
1348 elif self.ui.verbose:
1349 self.ui.write(',\n "files": [%s]' %
1349 self.ui.write(',\n "files": [%s]' %
1350 ", ".join('"%s"' % j(f) for f in ctx.files()))
1350 ", ".join('"%s"' % j(f) for f in ctx.files()))
1351
1351
1352 if copies:
1352 if copies:
1353 self.ui.write(',\n "copies": {%s}' %
1353 self.ui.write(',\n "copies": {%s}' %
1354 ", ".join('"%s": "%s"' % (j(k), j(v))
1354 ", ".join('"%s": "%s"' % (j(k), j(v))
1355 for k, v in copies))
1355 for k, v in copies))
1356
1356
1357 matchfn = self.matchfn
1357 matchfn = self.matchfn
1358 if matchfn:
1358 if matchfn:
1359 stat = self.diffopts.get('stat')
1359 stat = self.diffopts.get('stat')
1360 diff = self.diffopts.get('patch')
1360 diff = self.diffopts.get('patch')
1361 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1361 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1362 node, prev = ctx.node(), ctx.p1().node()
1362 node, prev = ctx.node(), ctx.p1().node()
1363 if stat:
1363 if stat:
1364 self.ui.pushbuffer()
1364 self.ui.pushbuffer()
1365 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1365 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1366 match=matchfn, stat=True)
1366 match=matchfn, stat=True)
1367 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1367 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1368 if diff:
1368 if diff:
1369 self.ui.pushbuffer()
1369 self.ui.pushbuffer()
1370 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1370 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1371 match=matchfn, stat=False)
1371 match=matchfn, stat=False)
1372 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1372 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1373
1373
1374 self.ui.write("\n }")
1374 self.ui.write("\n }")
1375
1375
1376 class changeset_templater(changeset_printer):
1376 class changeset_templater(changeset_printer):
1377 '''format changeset information.'''
1377 '''format changeset information.'''
1378
1378
1379 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1379 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1380 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1380 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1381 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1381 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1382 defaulttempl = {
1382 defaulttempl = {
1383 'parent': '{rev}:{node|formatnode} ',
1383 'parent': '{rev}:{node|formatnode} ',
1384 'manifest': '{rev}:{node|formatnode}',
1384 'manifest': '{rev}:{node|formatnode}',
1385 'file_copy': '{name} ({source})',
1385 'file_copy': '{name} ({source})',
1386 'extra': '{key}={value|stringescape}'
1386 'extra': '{key}={value|stringescape}'
1387 }
1387 }
1388 # filecopy is preserved for compatibility reasons
1388 # filecopy is preserved for compatibility reasons
1389 defaulttempl['filecopy'] = defaulttempl['file_copy']
1389 defaulttempl['filecopy'] = defaulttempl['file_copy']
1390 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1390 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1391 cache=defaulttempl)
1391 cache=defaulttempl)
1392 if tmpl:
1392 if tmpl:
1393 self.t.cache['changeset'] = tmpl
1393 self.t.cache['changeset'] = tmpl
1394
1394
1395 self.cache = {}
1395 self.cache = {}
1396
1396
1397 def _show(self, ctx, copies, matchfn, props):
1397 def _show(self, ctx, copies, matchfn, props):
1398 '''show a single changeset or file revision'''
1398 '''show a single changeset or file revision'''
1399
1399
1400 showlist = templatekw.showlist
1400 showlist = templatekw.showlist
1401
1401
1402 # showparents() behaviour depends on ui trace level which
1402 # showparents() behaviour depends on ui trace level which
1403 # causes unexpected behaviours at templating level and makes
1403 # causes unexpected behaviours at templating level and makes
1404 # it harder to extract it in a standalone function. Its
1404 # it harder to extract it in a standalone function. Its
1405 # behaviour cannot be changed so leave it here for now.
1405 # behaviour cannot be changed so leave it here for now.
1406 def showparents(**args):
1406 def showparents(**args):
1407 ctx = args['ctx']
1407 ctx = args['ctx']
1408 parents = [[('rev', p.rev()),
1408 parents = [[('rev', p.rev()),
1409 ('node', p.hex()),
1409 ('node', p.hex()),
1410 ('phase', p.phasestr())]
1410 ('phase', p.phasestr())]
1411 for p in self._meaningful_parentrevs(ctx)]
1411 for p in self._meaningful_parentrevs(ctx)]
1412 return showlist('parent', parents, **args)
1412 return showlist('parent', parents, **args)
1413
1413
1414 props = props.copy()
1414 props = props.copy()
1415 props.update(templatekw.keywords)
1415 props.update(templatekw.keywords)
1416 props['parents'] = showparents
1416 props['parents'] = showparents
1417 props['templ'] = self.t
1417 props['templ'] = self.t
1418 props['ctx'] = ctx
1418 props['ctx'] = ctx
1419 props['repo'] = self.repo
1419 props['repo'] = self.repo
1420 props['revcache'] = {'copies': copies}
1420 props['revcache'] = {'copies': copies}
1421 props['cache'] = self.cache
1421 props['cache'] = self.cache
1422
1422
1423 # find correct templates for current mode
1423 # find correct templates for current mode
1424
1424
1425 tmplmodes = [
1425 tmplmodes = [
1426 (True, None),
1426 (True, None),
1427 (self.ui.verbose, 'verbose'),
1427 (self.ui.verbose, 'verbose'),
1428 (self.ui.quiet, 'quiet'),
1428 (self.ui.quiet, 'quiet'),
1429 (self.ui.debugflag, 'debug'),
1429 (self.ui.debugflag, 'debug'),
1430 ]
1430 ]
1431
1431
1432 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1432 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1433 for mode, postfix in tmplmodes:
1433 for mode, postfix in tmplmodes:
1434 for type in types:
1434 for type in types:
1435 cur = postfix and ('%s_%s' % (type, postfix)) or type
1435 cur = postfix and ('%s_%s' % (type, postfix)) or type
1436 if mode and cur in self.t:
1436 if mode and cur in self.t:
1437 types[type] = cur
1437 types[type] = cur
1438
1438
1439 try:
1439 try:
1440
1440
1441 # write header
1441 # write header
1442 if types['header']:
1442 if types['header']:
1443 h = templater.stringify(self.t(types['header'], **props))
1443 h = templater.stringify(self.t(types['header'], **props))
1444 if self.buffered:
1444 if self.buffered:
1445 self.header[ctx.rev()] = h
1445 self.header[ctx.rev()] = h
1446 else:
1446 else:
1447 if self.lastheader != h:
1447 if self.lastheader != h:
1448 self.lastheader = h
1448 self.lastheader = h
1449 self.ui.write(h)
1449 self.ui.write(h)
1450
1450
1451 # write changeset metadata, then patch if requested
1451 # write changeset metadata, then patch if requested
1452 key = types['changeset']
1452 key = types['changeset']
1453 self.ui.write(templater.stringify(self.t(key, **props)))
1453 self.ui.write(templater.stringify(self.t(key, **props)))
1454 self.showpatch(ctx.node(), matchfn)
1454 self.showpatch(ctx.node(), matchfn)
1455
1455
1456 if types['footer']:
1456 if types['footer']:
1457 if not self.footer:
1457 if not self.footer:
1458 self.footer = templater.stringify(self.t(types['footer'],
1458 self.footer = templater.stringify(self.t(types['footer'],
1459 **props))
1459 **props))
1460
1460
1461 except KeyError, inst:
1461 except KeyError, inst:
1462 msg = _("%s: no key named '%s'")
1462 msg = _("%s: no key named '%s'")
1463 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1463 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1464 except SyntaxError, inst:
1464 except SyntaxError, inst:
1465 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1465 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1466
1466
1467 def gettemplate(ui, tmpl, style):
1467 def gettemplate(ui, tmpl, style):
1468 """
1468 """
1469 Find the template matching the given template spec or style.
1469 Find the template matching the given template spec or style.
1470 """
1470 """
1471
1471
1472 # ui settings
1472 # ui settings
1473 if not tmpl and not style: # template are stronger than style
1473 if not tmpl and not style: # template are stronger than style
1474 tmpl = ui.config('ui', 'logtemplate')
1474 tmpl = ui.config('ui', 'logtemplate')
1475 if tmpl:
1475 if tmpl:
1476 try:
1476 try:
1477 tmpl = templater.unquotestring(tmpl)
1477 tmpl = templater.unquotestring(tmpl)
1478 except SyntaxError:
1478 except SyntaxError:
1479 pass
1479 pass
1480 return tmpl, None
1480 return tmpl, None
1481 else:
1481 else:
1482 style = util.expandpath(ui.config('ui', 'style', ''))
1482 style = util.expandpath(ui.config('ui', 'style', ''))
1483
1483
1484 if not tmpl and style:
1484 if not tmpl and style:
1485 mapfile = style
1485 mapfile = style
1486 if not os.path.split(mapfile)[0]:
1486 if not os.path.split(mapfile)[0]:
1487 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1487 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1488 or templater.templatepath(mapfile))
1488 or templater.templatepath(mapfile))
1489 if mapname:
1489 if mapname:
1490 mapfile = mapname
1490 mapfile = mapname
1491 return None, mapfile
1491 return None, mapfile
1492
1492
1493 if not tmpl:
1493 if not tmpl:
1494 return None, None
1494 return None, None
1495
1495
1496 # looks like a literal template?
1496 # looks like a literal template?
1497 if '{' in tmpl:
1497 if '{' in tmpl:
1498 return tmpl, None
1498 return tmpl, None
1499
1499
1500 # perhaps a stock style?
1500 # perhaps a stock style?
1501 if not os.path.split(tmpl)[0]:
1501 if not os.path.split(tmpl)[0]:
1502 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1502 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1503 or templater.templatepath(tmpl))
1503 or templater.templatepath(tmpl))
1504 if mapname and os.path.isfile(mapname):
1504 if mapname and os.path.isfile(mapname):
1505 return None, mapname
1505 return None, mapname
1506
1506
1507 # perhaps it's a reference to [templates]
1507 # perhaps it's a reference to [templates]
1508 t = ui.config('templates', tmpl)
1508 t = ui.config('templates', tmpl)
1509 if t:
1509 if t:
1510 try:
1510 try:
1511 tmpl = templater.unquotestring(t)
1511 tmpl = templater.unquotestring(t)
1512 except SyntaxError:
1512 except SyntaxError:
1513 tmpl = t
1513 tmpl = t
1514 return tmpl, None
1514 return tmpl, None
1515
1515
1516 if tmpl == 'list':
1516 if tmpl == 'list':
1517 ui.write(_("available styles: %s\n") % templater.stylelist())
1517 ui.write(_("available styles: %s\n") % templater.stylelist())
1518 raise util.Abort(_("specify a template"))
1518 raise util.Abort(_("specify a template"))
1519
1519
1520 # perhaps it's a path to a map or a template
1520 # perhaps it's a path to a map or a template
1521 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1521 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1522 # is it a mapfile for a style?
1522 # is it a mapfile for a style?
1523 if os.path.basename(tmpl).startswith("map-"):
1523 if os.path.basename(tmpl).startswith("map-"):
1524 return None, os.path.realpath(tmpl)
1524 return None, os.path.realpath(tmpl)
1525 tmpl = open(tmpl).read()
1525 tmpl = open(tmpl).read()
1526 return tmpl, None
1526 return tmpl, None
1527
1527
1528 # constant string?
1528 # constant string?
1529 return tmpl, None
1529 return tmpl, None
1530
1530
1531 def show_changeset(ui, repo, opts, buffered=False):
1531 def show_changeset(ui, repo, opts, buffered=False):
1532 """show one changeset using template or regular display.
1532 """show one changeset using template or regular display.
1533
1533
1534 Display format will be the first non-empty hit of:
1534 Display format will be the first non-empty hit of:
1535 1. option 'template'
1535 1. option 'template'
1536 2. option 'style'
1536 2. option 'style'
1537 3. [ui] setting 'logtemplate'
1537 3. [ui] setting 'logtemplate'
1538 4. [ui] setting 'style'
1538 4. [ui] setting 'style'
1539 If all of these values are either the unset or the empty string,
1539 If all of these values are either the unset or the empty string,
1540 regular display via changeset_printer() is done.
1540 regular display via changeset_printer() is done.
1541 """
1541 """
1542 # options
1542 # options
1543 matchfn = None
1543 matchfn = None
1544 if opts.get('patch') or opts.get('stat'):
1544 if opts.get('patch') or opts.get('stat'):
1545 matchfn = scmutil.matchall(repo)
1545 matchfn = scmutil.matchall(repo)
1546
1546
1547 if opts.get('template') == 'json':
1547 if opts.get('template') == 'json':
1548 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1548 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1549
1549
1550 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1550 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1551
1551
1552 if not tmpl and not mapfile:
1552 if not tmpl and not mapfile:
1553 return changeset_printer(ui, repo, matchfn, opts, buffered)
1553 return changeset_printer(ui, repo, matchfn, opts, buffered)
1554
1554
1555 try:
1555 try:
1556 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1556 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1557 buffered)
1557 buffered)
1558 except SyntaxError, inst:
1558 except SyntaxError, inst:
1559 raise util.Abort(inst.args[0])
1559 raise util.Abort(inst.args[0])
1560 return t
1560 return t
1561
1561
1562 def showmarker(ui, marker):
1562 def showmarker(ui, marker):
1563 """utility function to display obsolescence marker in a readable way
1563 """utility function to display obsolescence marker in a readable way
1564
1564
1565 To be used by debug function."""
1565 To be used by debug function."""
1566 ui.write(hex(marker.precnode()))
1566 ui.write(hex(marker.precnode()))
1567 for repl in marker.succnodes():
1567 for repl in marker.succnodes():
1568 ui.write(' ')
1568 ui.write(' ')
1569 ui.write(hex(repl))
1569 ui.write(hex(repl))
1570 ui.write(' %X ' % marker.flags())
1570 ui.write(' %X ' % marker.flags())
1571 parents = marker.parentnodes()
1571 parents = marker.parentnodes()
1572 if parents is not None:
1572 if parents is not None:
1573 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1573 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1574 ui.write('(%s) ' % util.datestr(marker.date()))
1574 ui.write('(%s) ' % util.datestr(marker.date()))
1575 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1575 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1576 sorted(marker.metadata().items())
1576 sorted(marker.metadata().items())
1577 if t[0] != 'date')))
1577 if t[0] != 'date')))
1578 ui.write('\n')
1578 ui.write('\n')
1579
1579
1580 def finddate(ui, repo, date):
1580 def finddate(ui, repo, date):
1581 """Find the tipmost changeset that matches the given date spec"""
1581 """Find the tipmost changeset that matches the given date spec"""
1582
1582
1583 df = util.matchdate(date)
1583 df = util.matchdate(date)
1584 m = scmutil.matchall(repo)
1584 m = scmutil.matchall(repo)
1585 results = {}
1585 results = {}
1586
1586
1587 def prep(ctx, fns):
1587 def prep(ctx, fns):
1588 d = ctx.date()
1588 d = ctx.date()
1589 if df(d[0]):
1589 if df(d[0]):
1590 results[ctx.rev()] = d
1590 results[ctx.rev()] = d
1591
1591
1592 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1592 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1593 rev = ctx.rev()
1593 rev = ctx.rev()
1594 if rev in results:
1594 if rev in results:
1595 ui.status(_("found revision %s from %s\n") %
1595 ui.status(_("found revision %s from %s\n") %
1596 (rev, util.datestr(results[rev])))
1596 (rev, util.datestr(results[rev])))
1597 return str(rev)
1597 return str(rev)
1598
1598
1599 raise util.Abort(_("revision matching date not found"))
1599 raise util.Abort(_("revision matching date not found"))
1600
1600
1601 def increasingwindows(windowsize=8, sizelimit=512):
1601 def increasingwindows(windowsize=8, sizelimit=512):
1602 while True:
1602 while True:
1603 yield windowsize
1603 yield windowsize
1604 if windowsize < sizelimit:
1604 if windowsize < sizelimit:
1605 windowsize *= 2
1605 windowsize *= 2
1606
1606
1607 class FileWalkError(Exception):
1607 class FileWalkError(Exception):
1608 pass
1608 pass
1609
1609
1610 def walkfilerevs(repo, match, follow, revs, fncache):
1610 def walkfilerevs(repo, match, follow, revs, fncache):
1611 '''Walks the file history for the matched files.
1611 '''Walks the file history for the matched files.
1612
1612
1613 Returns the changeset revs that are involved in the file history.
1613 Returns the changeset revs that are involved in the file history.
1614
1614
1615 Throws FileWalkError if the file history can't be walked using
1615 Throws FileWalkError if the file history can't be walked using
1616 filelogs alone.
1616 filelogs alone.
1617 '''
1617 '''
1618 wanted = set()
1618 wanted = set()
1619 copies = []
1619 copies = []
1620 minrev, maxrev = min(revs), max(revs)
1620 minrev, maxrev = min(revs), max(revs)
1621 def filerevgen(filelog, last):
1621 def filerevgen(filelog, last):
1622 """
1622 """
1623 Only files, no patterns. Check the history of each file.
1623 Only files, no patterns. Check the history of each file.
1624
1624
1625 Examines filelog entries within minrev, maxrev linkrev range
1625 Examines filelog entries within minrev, maxrev linkrev range
1626 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1626 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1627 tuples in backwards order
1627 tuples in backwards order
1628 """
1628 """
1629 cl_count = len(repo)
1629 cl_count = len(repo)
1630 revs = []
1630 revs = []
1631 for j in xrange(0, last + 1):
1631 for j in xrange(0, last + 1):
1632 linkrev = filelog.linkrev(j)
1632 linkrev = filelog.linkrev(j)
1633 if linkrev < minrev:
1633 if linkrev < minrev:
1634 continue
1634 continue
1635 # only yield rev for which we have the changelog, it can
1635 # only yield rev for which we have the changelog, it can
1636 # happen while doing "hg log" during a pull or commit
1636 # happen while doing "hg log" during a pull or commit
1637 if linkrev >= cl_count:
1637 if linkrev >= cl_count:
1638 break
1638 break
1639
1639
1640 parentlinkrevs = []
1640 parentlinkrevs = []
1641 for p in filelog.parentrevs(j):
1641 for p in filelog.parentrevs(j):
1642 if p != nullrev:
1642 if p != nullrev:
1643 parentlinkrevs.append(filelog.linkrev(p))
1643 parentlinkrevs.append(filelog.linkrev(p))
1644 n = filelog.node(j)
1644 n = filelog.node(j)
1645 revs.append((linkrev, parentlinkrevs,
1645 revs.append((linkrev, parentlinkrevs,
1646 follow and filelog.renamed(n)))
1646 follow and filelog.renamed(n)))
1647
1647
1648 return reversed(revs)
1648 return reversed(revs)
1649 def iterfiles():
1649 def iterfiles():
1650 pctx = repo['.']
1650 pctx = repo['.']
1651 for filename in match.files():
1651 for filename in match.files():
1652 if follow:
1652 if follow:
1653 if filename not in pctx:
1653 if filename not in pctx:
1654 raise util.Abort(_('cannot follow file not in parent '
1654 raise util.Abort(_('cannot follow file not in parent '
1655 'revision: "%s"') % filename)
1655 'revision: "%s"') % filename)
1656 yield filename, pctx[filename].filenode()
1656 yield filename, pctx[filename].filenode()
1657 else:
1657 else:
1658 yield filename, None
1658 yield filename, None
1659 for filename_node in copies:
1659 for filename_node in copies:
1660 yield filename_node
1660 yield filename_node
1661
1661
1662 for file_, node in iterfiles():
1662 for file_, node in iterfiles():
1663 filelog = repo.file(file_)
1663 filelog = repo.file(file_)
1664 if not len(filelog):
1664 if not len(filelog):
1665 if node is None:
1665 if node is None:
1666 # A zero count may be a directory or deleted file, so
1666 # A zero count may be a directory or deleted file, so
1667 # try to find matching entries on the slow path.
1667 # try to find matching entries on the slow path.
1668 if follow:
1668 if follow:
1669 raise util.Abort(
1669 raise util.Abort(
1670 _('cannot follow nonexistent file: "%s"') % file_)
1670 _('cannot follow nonexistent file: "%s"') % file_)
1671 raise FileWalkError("Cannot walk via filelog")
1671 raise FileWalkError("Cannot walk via filelog")
1672 else:
1672 else:
1673 continue
1673 continue
1674
1674
1675 if node is None:
1675 if node is None:
1676 last = len(filelog) - 1
1676 last = len(filelog) - 1
1677 else:
1677 else:
1678 last = filelog.rev(node)
1678 last = filelog.rev(node)
1679
1679
1680 # keep track of all ancestors of the file
1680 # keep track of all ancestors of the file
1681 ancestors = set([filelog.linkrev(last)])
1681 ancestors = set([filelog.linkrev(last)])
1682
1682
1683 # iterate from latest to oldest revision
1683 # iterate from latest to oldest revision
1684 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1684 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1685 if not follow:
1685 if not follow:
1686 if rev > maxrev:
1686 if rev > maxrev:
1687 continue
1687 continue
1688 else:
1688 else:
1689 # Note that last might not be the first interesting
1689 # Note that last might not be the first interesting
1690 # rev to us:
1690 # rev to us:
1691 # if the file has been changed after maxrev, we'll
1691 # if the file has been changed after maxrev, we'll
1692 # have linkrev(last) > maxrev, and we still need
1692 # have linkrev(last) > maxrev, and we still need
1693 # to explore the file graph
1693 # to explore the file graph
1694 if rev not in ancestors:
1694 if rev not in ancestors:
1695 continue
1695 continue
1696 # XXX insert 1327 fix here
1696 # XXX insert 1327 fix here
1697 if flparentlinkrevs:
1697 if flparentlinkrevs:
1698 ancestors.update(flparentlinkrevs)
1698 ancestors.update(flparentlinkrevs)
1699
1699
1700 fncache.setdefault(rev, []).append(file_)
1700 fncache.setdefault(rev, []).append(file_)
1701 wanted.add(rev)
1701 wanted.add(rev)
1702 if copied:
1702 if copied:
1703 copies.append(copied)
1703 copies.append(copied)
1704
1704
1705 return wanted
1705 return wanted
1706
1706
1707 class _followfilter(object):
1707 class _followfilter(object):
1708 def __init__(self, repo, onlyfirst=False):
1708 def __init__(self, repo, onlyfirst=False):
1709 self.repo = repo
1709 self.repo = repo
1710 self.startrev = nullrev
1710 self.startrev = nullrev
1711 self.roots = set()
1711 self.roots = set()
1712 self.onlyfirst = onlyfirst
1712 self.onlyfirst = onlyfirst
1713
1713
1714 def match(self, rev):
1714 def match(self, rev):
1715 def realparents(rev):
1715 def realparents(rev):
1716 if self.onlyfirst:
1716 if self.onlyfirst:
1717 return self.repo.changelog.parentrevs(rev)[0:1]
1717 return self.repo.changelog.parentrevs(rev)[0:1]
1718 else:
1718 else:
1719 return filter(lambda x: x != nullrev,
1719 return filter(lambda x: x != nullrev,
1720 self.repo.changelog.parentrevs(rev))
1720 self.repo.changelog.parentrevs(rev))
1721
1721
1722 if self.startrev == nullrev:
1722 if self.startrev == nullrev:
1723 self.startrev = rev
1723 self.startrev = rev
1724 return True
1724 return True
1725
1725
1726 if rev > self.startrev:
1726 if rev > self.startrev:
1727 # forward: all descendants
1727 # forward: all descendants
1728 if not self.roots:
1728 if not self.roots:
1729 self.roots.add(self.startrev)
1729 self.roots.add(self.startrev)
1730 for parent in realparents(rev):
1730 for parent in realparents(rev):
1731 if parent in self.roots:
1731 if parent in self.roots:
1732 self.roots.add(rev)
1732 self.roots.add(rev)
1733 return True
1733 return True
1734 else:
1734 else:
1735 # backwards: all parents
1735 # backwards: all parents
1736 if not self.roots:
1736 if not self.roots:
1737 self.roots.update(realparents(self.startrev))
1737 self.roots.update(realparents(self.startrev))
1738 if rev in self.roots:
1738 if rev in self.roots:
1739 self.roots.remove(rev)
1739 self.roots.remove(rev)
1740 self.roots.update(realparents(rev))
1740 self.roots.update(realparents(rev))
1741 return True
1741 return True
1742
1742
1743 return False
1743 return False
1744
1744
1745 def walkchangerevs(repo, match, opts, prepare):
1745 def walkchangerevs(repo, match, opts, prepare):
1746 '''Iterate over files and the revs in which they changed.
1746 '''Iterate over files and the revs in which they changed.
1747
1747
1748 Callers most commonly need to iterate backwards over the history
1748 Callers most commonly need to iterate backwards over the history
1749 in which they are interested. Doing so has awful (quadratic-looking)
1749 in which they are interested. Doing so has awful (quadratic-looking)
1750 performance, so we use iterators in a "windowed" way.
1750 performance, so we use iterators in a "windowed" way.
1751
1751
1752 We walk a window of revisions in the desired order. Within the
1752 We walk a window of revisions in the desired order. Within the
1753 window, we first walk forwards to gather data, then in the desired
1753 window, we first walk forwards to gather data, then in the desired
1754 order (usually backwards) to display it.
1754 order (usually backwards) to display it.
1755
1755
1756 This function returns an iterator yielding contexts. Before
1756 This function returns an iterator yielding contexts. Before
1757 yielding each context, the iterator will first call the prepare
1757 yielding each context, the iterator will first call the prepare
1758 function on each context in the window in forward order.'''
1758 function on each context in the window in forward order.'''
1759
1759
1760 follow = opts.get('follow') or opts.get('follow_first')
1760 follow = opts.get('follow') or opts.get('follow_first')
1761 revs = _logrevs(repo, opts)
1761 revs = _logrevs(repo, opts)
1762 if not revs:
1762 if not revs:
1763 return []
1763 return []
1764 wanted = set()
1764 wanted = set()
1765 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1765 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1766 opts.get('removed'))
1766 opts.get('removed'))
1767 fncache = {}
1767 fncache = {}
1768 change = repo.changectx
1768 change = repo.changectx
1769
1769
1770 # First step is to fill wanted, the set of revisions that we want to yield.
1770 # First step is to fill wanted, the set of revisions that we want to yield.
1771 # When it does not induce extra cost, we also fill fncache for revisions in
1771 # When it does not induce extra cost, we also fill fncache for revisions in
1772 # wanted: a cache of filenames that were changed (ctx.files()) and that
1772 # wanted: a cache of filenames that were changed (ctx.files()) and that
1773 # match the file filtering conditions.
1773 # match the file filtering conditions.
1774
1774
1775 if match.always():
1775 if match.always():
1776 # No files, no patterns. Display all revs.
1776 # No files, no patterns. Display all revs.
1777 wanted = revs
1777 wanted = revs
1778 elif not slowpath:
1778 elif not slowpath:
1779 # We only have to read through the filelog to find wanted revisions
1779 # We only have to read through the filelog to find wanted revisions
1780
1780
1781 try:
1781 try:
1782 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1782 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1783 except FileWalkError:
1783 except FileWalkError:
1784 slowpath = True
1784 slowpath = True
1785
1785
1786 # We decided to fall back to the slowpath because at least one
1786 # We decided to fall back to the slowpath because at least one
1787 # of the paths was not a file. Check to see if at least one of them
1787 # of the paths was not a file. Check to see if at least one of them
1788 # existed in history, otherwise simply return
1788 # existed in history, otherwise simply return
1789 for path in match.files():
1789 for path in match.files():
1790 if path == '.' or path in repo.store:
1790 if path == '.' or path in repo.store:
1791 break
1791 break
1792 else:
1792 else:
1793 return []
1793 return []
1794
1794
1795 if slowpath:
1795 if slowpath:
1796 # We have to read the changelog to match filenames against
1796 # We have to read the changelog to match filenames against
1797 # changed files
1797 # changed files
1798
1798
1799 if follow:
1799 if follow:
1800 raise util.Abort(_('can only follow copies/renames for explicit '
1800 raise util.Abort(_('can only follow copies/renames for explicit '
1801 'filenames'))
1801 'filenames'))
1802
1802
1803 # The slow path checks files modified in every changeset.
1803 # The slow path checks files modified in every changeset.
1804 # This is really slow on large repos, so compute the set lazily.
1804 # This is really slow on large repos, so compute the set lazily.
1805 class lazywantedset(object):
1805 class lazywantedset(object):
1806 def __init__(self):
1806 def __init__(self):
1807 self.set = set()
1807 self.set = set()
1808 self.revs = set(revs)
1808 self.revs = set(revs)
1809
1809
1810 # No need to worry about locality here because it will be accessed
1810 # No need to worry about locality here because it will be accessed
1811 # in the same order as the increasing window below.
1811 # in the same order as the increasing window below.
1812 def __contains__(self, value):
1812 def __contains__(self, value):
1813 if value in self.set:
1813 if value in self.set:
1814 return True
1814 return True
1815 elif not value in self.revs:
1815 elif not value in self.revs:
1816 return False
1816 return False
1817 else:
1817 else:
1818 self.revs.discard(value)
1818 self.revs.discard(value)
1819 ctx = change(value)
1819 ctx = change(value)
1820 matches = filter(match, ctx.files())
1820 matches = filter(match, ctx.files())
1821 if matches:
1821 if matches:
1822 fncache[value] = matches
1822 fncache[value] = matches
1823 self.set.add(value)
1823 self.set.add(value)
1824 return True
1824 return True
1825 return False
1825 return False
1826
1826
1827 def discard(self, value):
1827 def discard(self, value):
1828 self.revs.discard(value)
1828 self.revs.discard(value)
1829 self.set.discard(value)
1829 self.set.discard(value)
1830
1830
1831 wanted = lazywantedset()
1831 wanted = lazywantedset()
1832
1832
1833 # it might be worthwhile to do this in the iterator if the rev range
1833 # it might be worthwhile to do this in the iterator if the rev range
1834 # is descending and the prune args are all within that range
1834 # is descending and the prune args are all within that range
1835 for rev in opts.get('prune', ()):
1835 for rev in opts.get('prune', ()):
1836 rev = repo[rev].rev()
1836 rev = repo[rev].rev()
1837 ff = _followfilter(repo)
1837 ff = _followfilter(repo)
1838 stop = min(revs[0], revs[-1])
1838 stop = min(revs[0], revs[-1])
1839 for x in xrange(rev, stop - 1, -1):
1839 for x in xrange(rev, stop - 1, -1):
1840 if ff.match(x):
1840 if ff.match(x):
1841 wanted = wanted - [x]
1841 wanted = wanted - [x]
1842
1842
1843 # Now that wanted is correctly initialized, we can iterate over the
1843 # Now that wanted is correctly initialized, we can iterate over the
1844 # revision range, yielding only revisions in wanted.
1844 # revision range, yielding only revisions in wanted.
1845 def iterate():
1845 def iterate():
1846 if follow and match.always():
1846 if follow and match.always():
1847 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1847 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1848 def want(rev):
1848 def want(rev):
1849 return ff.match(rev) and rev in wanted
1849 return ff.match(rev) and rev in wanted
1850 else:
1850 else:
1851 def want(rev):
1851 def want(rev):
1852 return rev in wanted
1852 return rev in wanted
1853
1853
1854 it = iter(revs)
1854 it = iter(revs)
1855 stopiteration = False
1855 stopiteration = False
1856 for windowsize in increasingwindows():
1856 for windowsize in increasingwindows():
1857 nrevs = []
1857 nrevs = []
1858 for i in xrange(windowsize):
1858 for i in xrange(windowsize):
1859 rev = next(it, None)
1859 rev = next(it, None)
1860 if rev is None:
1860 if rev is None:
1861 stopiteration = True
1861 stopiteration = True
1862 break
1862 break
1863 elif want(rev):
1863 elif want(rev):
1864 nrevs.append(rev)
1864 nrevs.append(rev)
1865 for rev in sorted(nrevs):
1865 for rev in sorted(nrevs):
1866 fns = fncache.get(rev)
1866 fns = fncache.get(rev)
1867 ctx = change(rev)
1867 ctx = change(rev)
1868 if not fns:
1868 if not fns:
1869 def fns_generator():
1869 def fns_generator():
1870 for f in ctx.files():
1870 for f in ctx.files():
1871 if match(f):
1871 if match(f):
1872 yield f
1872 yield f
1873 fns = fns_generator()
1873 fns = fns_generator()
1874 prepare(ctx, fns)
1874 prepare(ctx, fns)
1875 for rev in nrevs:
1875 for rev in nrevs:
1876 yield change(rev)
1876 yield change(rev)
1877
1877
1878 if stopiteration:
1878 if stopiteration:
1879 break
1879 break
1880
1880
1881 return iterate()
1881 return iterate()
1882
1882
1883 def _makefollowlogfilematcher(repo, files, followfirst):
1883 def _makefollowlogfilematcher(repo, files, followfirst):
1884 # When displaying a revision with --patch --follow FILE, we have
1884 # When displaying a revision with --patch --follow FILE, we have
1885 # to know which file of the revision must be diffed. With
1885 # to know which file of the revision must be diffed. With
1886 # --follow, we want the names of the ancestors of FILE in the
1886 # --follow, we want the names of the ancestors of FILE in the
1887 # revision, stored in "fcache". "fcache" is populated by
1887 # revision, stored in "fcache". "fcache" is populated by
1888 # reproducing the graph traversal already done by --follow revset
1888 # reproducing the graph traversal already done by --follow revset
1889 # and relating linkrevs to file names (which is not "correct" but
1889 # and relating linkrevs to file names (which is not "correct" but
1890 # good enough).
1890 # good enough).
1891 fcache = {}
1891 fcache = {}
1892 fcacheready = [False]
1892 fcacheready = [False]
1893 pctx = repo['.']
1893 pctx = repo['.']
1894
1894
1895 def populate():
1895 def populate():
1896 for fn in files:
1896 for fn in files:
1897 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1897 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1898 for c in i:
1898 for c in i:
1899 fcache.setdefault(c.linkrev(), set()).add(c.path())
1899 fcache.setdefault(c.linkrev(), set()).add(c.path())
1900
1900
1901 def filematcher(rev):
1901 def filematcher(rev):
1902 if not fcacheready[0]:
1902 if not fcacheready[0]:
1903 # Lazy initialization
1903 # Lazy initialization
1904 fcacheready[0] = True
1904 fcacheready[0] = True
1905 populate()
1905 populate()
1906 return scmutil.matchfiles(repo, fcache.get(rev, []))
1906 return scmutil.matchfiles(repo, fcache.get(rev, []))
1907
1907
1908 return filematcher
1908 return filematcher
1909
1909
1910 def _makenofollowlogfilematcher(repo, pats, opts):
1910 def _makenofollowlogfilematcher(repo, pats, opts):
1911 '''hook for extensions to override the filematcher for non-follow cases'''
1911 '''hook for extensions to override the filematcher for non-follow cases'''
1912 return None
1912 return None
1913
1913
1914 def _makelogrevset(repo, pats, opts, revs):
1914 def _makelogrevset(repo, pats, opts, revs):
1915 """Return (expr, filematcher) where expr is a revset string built
1915 """Return (expr, filematcher) where expr is a revset string built
1916 from log options and file patterns or None. If --stat or --patch
1916 from log options and file patterns or None. If --stat or --patch
1917 are not passed filematcher is None. Otherwise it is a callable
1917 are not passed filematcher is None. Otherwise it is a callable
1918 taking a revision number and returning a match objects filtering
1918 taking a revision number and returning a match objects filtering
1919 the files to be detailed when displaying the revision.
1919 the files to be detailed when displaying the revision.
1920 """
1920 """
1921 opt2revset = {
1921 opt2revset = {
1922 'no_merges': ('not merge()', None),
1922 'no_merges': ('not merge()', None),
1923 'only_merges': ('merge()', None),
1923 'only_merges': ('merge()', None),
1924 '_ancestors': ('ancestors(%(val)s)', None),
1924 '_ancestors': ('ancestors(%(val)s)', None),
1925 '_fancestors': ('_firstancestors(%(val)s)', None),
1925 '_fancestors': ('_firstancestors(%(val)s)', None),
1926 '_descendants': ('descendants(%(val)s)', None),
1926 '_descendants': ('descendants(%(val)s)', None),
1927 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1927 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1928 '_matchfiles': ('_matchfiles(%(val)s)', None),
1928 '_matchfiles': ('_matchfiles(%(val)s)', None),
1929 'date': ('date(%(val)r)', None),
1929 'date': ('date(%(val)r)', None),
1930 'branch': ('branch(%(val)r)', ' or '),
1930 'branch': ('branch(%(val)r)', ' or '),
1931 '_patslog': ('filelog(%(val)r)', ' or '),
1931 '_patslog': ('filelog(%(val)r)', ' or '),
1932 '_patsfollow': ('follow(%(val)r)', ' or '),
1932 '_patsfollow': ('follow(%(val)r)', ' or '),
1933 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1933 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1934 'keyword': ('keyword(%(val)r)', ' or '),
1934 'keyword': ('keyword(%(val)r)', ' or '),
1935 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1935 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1936 'user': ('user(%(val)r)', ' or '),
1936 'user': ('user(%(val)r)', ' or '),
1937 }
1937 }
1938
1938
1939 opts = dict(opts)
1939 opts = dict(opts)
1940 # follow or not follow?
1940 # follow or not follow?
1941 follow = opts.get('follow') or opts.get('follow_first')
1941 follow = opts.get('follow') or opts.get('follow_first')
1942 if opts.get('follow_first'):
1942 if opts.get('follow_first'):
1943 followfirst = 1
1943 followfirst = 1
1944 else:
1944 else:
1945 followfirst = 0
1945 followfirst = 0
1946 # --follow with FILE behaviour depends on revs...
1946 # --follow with FILE behaviour depends on revs...
1947 it = iter(revs)
1947 it = iter(revs)
1948 startrev = it.next()
1948 startrev = it.next()
1949 followdescendants = startrev < next(it, startrev)
1949 followdescendants = startrev < next(it, startrev)
1950
1950
1951 # branch and only_branch are really aliases and must be handled at
1951 # branch and only_branch are really aliases and must be handled at
1952 # the same time
1952 # the same time
1953 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1953 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1954 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1954 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1955 # pats/include/exclude are passed to match.match() directly in
1955 # pats/include/exclude are passed to match.match() directly in
1956 # _matchfiles() revset but walkchangerevs() builds its matcher with
1956 # _matchfiles() revset but walkchangerevs() builds its matcher with
1957 # scmutil.match(). The difference is input pats are globbed on
1957 # scmutil.match(). The difference is input pats are globbed on
1958 # platforms without shell expansion (windows).
1958 # platforms without shell expansion (windows).
1959 wctx = repo[None]
1959 wctx = repo[None]
1960 match, pats = scmutil.matchandpats(wctx, pats, opts)
1960 match, pats = scmutil.matchandpats(wctx, pats, opts)
1961 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1961 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1962 opts.get('removed'))
1962 opts.get('removed'))
1963 if not slowpath:
1963 if not slowpath:
1964 for f in match.files():
1964 for f in match.files():
1965 if follow and f not in wctx:
1965 if follow and f not in wctx:
1966 # If the file exists, it may be a directory, so let it
1966 # If the file exists, it may be a directory, so let it
1967 # take the slow path.
1967 # take the slow path.
1968 if os.path.exists(repo.wjoin(f)):
1968 if os.path.exists(repo.wjoin(f)):
1969 slowpath = True
1969 slowpath = True
1970 continue
1970 continue
1971 else:
1971 else:
1972 raise util.Abort(_('cannot follow file not in parent '
1972 raise util.Abort(_('cannot follow file not in parent '
1973 'revision: "%s"') % f)
1973 'revision: "%s"') % f)
1974 filelog = repo.file(f)
1974 filelog = repo.file(f)
1975 if not filelog:
1975 if not filelog:
1976 # A zero count may be a directory or deleted file, so
1976 # A zero count may be a directory or deleted file, so
1977 # try to find matching entries on the slow path.
1977 # try to find matching entries on the slow path.
1978 if follow:
1978 if follow:
1979 raise util.Abort(
1979 raise util.Abort(
1980 _('cannot follow nonexistent file: "%s"') % f)
1980 _('cannot follow nonexistent file: "%s"') % f)
1981 slowpath = True
1981 slowpath = True
1982
1982
1983 # We decided to fall back to the slowpath because at least one
1983 # We decided to fall back to the slowpath because at least one
1984 # of the paths was not a file. Check to see if at least one of them
1984 # of the paths was not a file. Check to see if at least one of them
1985 # existed in history - in that case, we'll continue down the
1985 # existed in history - in that case, we'll continue down the
1986 # slowpath; otherwise, we can turn off the slowpath
1986 # slowpath; otherwise, we can turn off the slowpath
1987 if slowpath:
1987 if slowpath:
1988 for path in match.files():
1988 for path in match.files():
1989 if path == '.' or path in repo.store:
1989 if path == '.' or path in repo.store:
1990 break
1990 break
1991 else:
1991 else:
1992 slowpath = False
1992 slowpath = False
1993
1993
1994 fpats = ('_patsfollow', '_patsfollowfirst')
1994 fpats = ('_patsfollow', '_patsfollowfirst')
1995 fnopats = (('_ancestors', '_fancestors'),
1995 fnopats = (('_ancestors', '_fancestors'),
1996 ('_descendants', '_fdescendants'))
1996 ('_descendants', '_fdescendants'))
1997 if slowpath:
1997 if slowpath:
1998 # See walkchangerevs() slow path.
1998 # See walkchangerevs() slow path.
1999 #
1999 #
2000 # pats/include/exclude cannot be represented as separate
2000 # pats/include/exclude cannot be represented as separate
2001 # revset expressions as their filtering logic applies at file
2001 # revset expressions as their filtering logic applies at file
2002 # level. For instance "-I a -X a" matches a revision touching
2002 # level. For instance "-I a -X a" matches a revision touching
2003 # "a" and "b" while "file(a) and not file(b)" does
2003 # "a" and "b" while "file(a) and not file(b)" does
2004 # not. Besides, filesets are evaluated against the working
2004 # not. Besides, filesets are evaluated against the working
2005 # directory.
2005 # directory.
2006 matchargs = ['r:', 'd:relpath']
2006 matchargs = ['r:', 'd:relpath']
2007 for p in pats:
2007 for p in pats:
2008 matchargs.append('p:' + p)
2008 matchargs.append('p:' + p)
2009 for p in opts.get('include', []):
2009 for p in opts.get('include', []):
2010 matchargs.append('i:' + p)
2010 matchargs.append('i:' + p)
2011 for p in opts.get('exclude', []):
2011 for p in opts.get('exclude', []):
2012 matchargs.append('x:' + p)
2012 matchargs.append('x:' + p)
2013 matchargs = ','.join(('%r' % p) for p in matchargs)
2013 matchargs = ','.join(('%r' % p) for p in matchargs)
2014 opts['_matchfiles'] = matchargs
2014 opts['_matchfiles'] = matchargs
2015 if follow:
2015 if follow:
2016 opts[fnopats[0][followfirst]] = '.'
2016 opts[fnopats[0][followfirst]] = '.'
2017 else:
2017 else:
2018 if follow:
2018 if follow:
2019 if pats:
2019 if pats:
2020 # follow() revset interprets its file argument as a
2020 # follow() revset interprets its file argument as a
2021 # manifest entry, so use match.files(), not pats.
2021 # manifest entry, so use match.files(), not pats.
2022 opts[fpats[followfirst]] = list(match.files())
2022 opts[fpats[followfirst]] = list(match.files())
2023 else:
2023 else:
2024 op = fnopats[followdescendants][followfirst]
2024 op = fnopats[followdescendants][followfirst]
2025 opts[op] = 'rev(%d)' % startrev
2025 opts[op] = 'rev(%d)' % startrev
2026 else:
2026 else:
2027 opts['_patslog'] = list(pats)
2027 opts['_patslog'] = list(pats)
2028
2028
2029 filematcher = None
2029 filematcher = None
2030 if opts.get('patch') or opts.get('stat'):
2030 if opts.get('patch') or opts.get('stat'):
2031 # When following files, track renames via a special matcher.
2031 # When following files, track renames via a special matcher.
2032 # If we're forced to take the slowpath it means we're following
2032 # If we're forced to take the slowpath it means we're following
2033 # at least one pattern/directory, so don't bother with rename tracking.
2033 # at least one pattern/directory, so don't bother with rename tracking.
2034 if follow and not match.always() and not slowpath:
2034 if follow and not match.always() and not slowpath:
2035 # _makefollowlogfilematcher expects its files argument to be
2035 # _makefollowlogfilematcher expects its files argument to be
2036 # relative to the repo root, so use match.files(), not pats.
2036 # relative to the repo root, so use match.files(), not pats.
2037 filematcher = _makefollowlogfilematcher(repo, match.files(),
2037 filematcher = _makefollowlogfilematcher(repo, match.files(),
2038 followfirst)
2038 followfirst)
2039 else:
2039 else:
2040 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2040 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2041 if filematcher is None:
2041 if filematcher is None:
2042 filematcher = lambda rev: match
2042 filematcher = lambda rev: match
2043
2043
2044 expr = []
2044 expr = []
2045 for op, val in sorted(opts.iteritems()):
2045 for op, val in sorted(opts.iteritems()):
2046 if not val:
2046 if not val:
2047 continue
2047 continue
2048 if op not in opt2revset:
2048 if op not in opt2revset:
2049 continue
2049 continue
2050 revop, andor = opt2revset[op]
2050 revop, andor = opt2revset[op]
2051 if '%(val)' not in revop:
2051 if '%(val)' not in revop:
2052 expr.append(revop)
2052 expr.append(revop)
2053 else:
2053 else:
2054 if not isinstance(val, list):
2054 if not isinstance(val, list):
2055 e = revop % {'val': val}
2055 e = revop % {'val': val}
2056 else:
2056 else:
2057 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2057 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2058 expr.append(e)
2058 expr.append(e)
2059
2059
2060 if expr:
2060 if expr:
2061 expr = '(' + ' and '.join(expr) + ')'
2061 expr = '(' + ' and '.join(expr) + ')'
2062 else:
2062 else:
2063 expr = None
2063 expr = None
2064 return expr, filematcher
2064 return expr, filematcher
2065
2065
2066 def _logrevs(repo, opts):
2066 def _logrevs(repo, opts):
2067 # Default --rev value depends on --follow but --follow behaviour
2067 # Default --rev value depends on --follow but --follow behaviour
2068 # depends on revisions resolved from --rev...
2068 # depends on revisions resolved from --rev...
2069 follow = opts.get('follow') or opts.get('follow_first')
2069 follow = opts.get('follow') or opts.get('follow_first')
2070 if opts.get('rev'):
2070 if opts.get('rev'):
2071 revs = scmutil.revrange(repo, opts['rev'])
2071 revs = scmutil.revrange(repo, opts['rev'])
2072 elif follow and repo.dirstate.p1() == nullid:
2072 elif follow and repo.dirstate.p1() == nullid:
2073 revs = revset.baseset()
2073 revs = revset.baseset()
2074 elif follow:
2074 elif follow:
2075 revs = repo.revs('reverse(:.)')
2075 revs = repo.revs('reverse(:.)')
2076 else:
2076 else:
2077 revs = revset.spanset(repo)
2077 revs = revset.spanset(repo)
2078 revs.reverse()
2078 revs.reverse()
2079 return revs
2079 return revs
2080
2080
2081 def getgraphlogrevs(repo, pats, opts):
2081 def getgraphlogrevs(repo, pats, opts):
2082 """Return (revs, expr, filematcher) where revs is an iterable of
2082 """Return (revs, expr, filematcher) where revs is an iterable of
2083 revision numbers, expr is a revset string built from log options
2083 revision numbers, expr is a revset string built from log options
2084 and file patterns or None, and used to filter 'revs'. If --stat or
2084 and file patterns or None, and used to filter 'revs'. If --stat or
2085 --patch are not passed filematcher is None. Otherwise it is a
2085 --patch are not passed filematcher is None. Otherwise it is a
2086 callable taking a revision number and returning a match objects
2086 callable taking a revision number and returning a match objects
2087 filtering the files to be detailed when displaying the revision.
2087 filtering the files to be detailed when displaying the revision.
2088 """
2088 """
2089 limit = loglimit(opts)
2089 limit = loglimit(opts)
2090 revs = _logrevs(repo, opts)
2090 revs = _logrevs(repo, opts)
2091 if not revs:
2091 if not revs:
2092 return revset.baseset(), None, None
2092 return revset.baseset(), None, None
2093 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2093 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2094 if opts.get('rev'):
2094 if opts.get('rev'):
2095 # User-specified revs might be unsorted, but don't sort before
2095 # User-specified revs might be unsorted, but don't sort before
2096 # _makelogrevset because it might depend on the order of revs
2096 # _makelogrevset because it might depend on the order of revs
2097 revs.sort(reverse=True)
2097 revs.sort(reverse=True)
2098 if expr:
2098 if expr:
2099 # Revset matchers often operate faster on revisions in changelog
2099 # Revset matchers often operate faster on revisions in changelog
2100 # order, because most filters deal with the changelog.
2100 # order, because most filters deal with the changelog.
2101 revs.reverse()
2101 revs.reverse()
2102 matcher = revset.match(repo.ui, expr)
2102 matcher = revset.match(repo.ui, expr)
2103 # Revset matches can reorder revisions. "A or B" typically returns
2103 # Revset matches can reorder revisions. "A or B" typically returns
2104 # returns the revision matching A then the revision matching B. Sort
2104 # returns the revision matching A then the revision matching B. Sort
2105 # again to fix that.
2105 # again to fix that.
2106 revs = matcher(repo, revs)
2106 revs = matcher(repo, revs)
2107 revs.sort(reverse=True)
2107 revs.sort(reverse=True)
2108 if limit is not None:
2108 if limit is not None:
2109 limitedrevs = []
2109 limitedrevs = []
2110 for idx, rev in enumerate(revs):
2110 for idx, rev in enumerate(revs):
2111 if idx >= limit:
2111 if idx >= limit:
2112 break
2112 break
2113 limitedrevs.append(rev)
2113 limitedrevs.append(rev)
2114 revs = revset.baseset(limitedrevs)
2114 revs = revset.baseset(limitedrevs)
2115
2115
2116 return revs, expr, filematcher
2116 return revs, expr, filematcher
2117
2117
2118 def getlogrevs(repo, pats, opts):
2118 def getlogrevs(repo, pats, opts):
2119 """Return (revs, expr, filematcher) where revs is an iterable of
2119 """Return (revs, expr, filematcher) where revs is an iterable of
2120 revision numbers, expr is a revset string built from log options
2120 revision numbers, expr is a revset string built from log options
2121 and file patterns or None, and used to filter 'revs'. If --stat or
2121 and file patterns or None, and used to filter 'revs'. If --stat or
2122 --patch are not passed filematcher is None. Otherwise it is a
2122 --patch are not passed filematcher is None. Otherwise it is a
2123 callable taking a revision number and returning a match objects
2123 callable taking a revision number and returning a match objects
2124 filtering the files to be detailed when displaying the revision.
2124 filtering the files to be detailed when displaying the revision.
2125 """
2125 """
2126 limit = loglimit(opts)
2126 limit = loglimit(opts)
2127 revs = _logrevs(repo, opts)
2127 revs = _logrevs(repo, opts)
2128 if not revs:
2128 if not revs:
2129 return revset.baseset([]), None, None
2129 return revset.baseset([]), None, None
2130 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2130 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2131 if expr:
2131 if expr:
2132 # Revset matchers often operate faster on revisions in changelog
2132 # Revset matchers often operate faster on revisions in changelog
2133 # order, because most filters deal with the changelog.
2133 # order, because most filters deal with the changelog.
2134 if not opts.get('rev'):
2134 if not opts.get('rev'):
2135 revs.reverse()
2135 revs.reverse()
2136 matcher = revset.match(repo.ui, expr)
2136 matcher = revset.match(repo.ui, expr)
2137 # Revset matches can reorder revisions. "A or B" typically returns
2137 # Revset matches can reorder revisions. "A or B" typically returns
2138 # returns the revision matching A then the revision matching B. Sort
2138 # returns the revision matching A then the revision matching B. Sort
2139 # again to fix that.
2139 # again to fix that.
2140 revs = matcher(repo, revs)
2140 revs = matcher(repo, revs)
2141 if not opts.get('rev'):
2141 if not opts.get('rev'):
2142 revs.sort(reverse=True)
2142 revs.sort(reverse=True)
2143 if limit is not None:
2143 if limit is not None:
2144 limitedrevs = []
2144 limitedrevs = []
2145 for idx, r in enumerate(revs):
2145 for idx, r in enumerate(revs):
2146 if limit <= idx:
2146 if limit <= idx:
2147 break
2147 break
2148 limitedrevs.append(r)
2148 limitedrevs.append(r)
2149 revs = revset.baseset(limitedrevs)
2149 revs = revset.baseset(limitedrevs)
2150
2150
2151 return revs, expr, filematcher
2151 return revs, expr, filematcher
2152
2152
2153 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2153 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2154 filematcher=None):
2154 filematcher=None):
2155 seen, state = [], graphmod.asciistate()
2155 seen, state = [], graphmod.asciistate()
2156 for rev, type, ctx, parents in dag:
2156 for rev, type, ctx, parents in dag:
2157 char = 'o'
2157 char = 'o'
2158 if ctx.node() in showparents:
2158 if ctx.node() in showparents:
2159 char = '@'
2159 char = '@'
2160 elif ctx.obsolete():
2160 elif ctx.obsolete():
2161 char = 'x'
2161 char = 'x'
2162 elif ctx.closesbranch():
2162 elif ctx.closesbranch():
2163 char = '_'
2163 char = '_'
2164 copies = None
2164 copies = None
2165 if getrenamed and ctx.rev():
2165 if getrenamed and ctx.rev():
2166 copies = []
2166 copies = []
2167 for fn in ctx.files():
2167 for fn in ctx.files():
2168 rename = getrenamed(fn, ctx.rev())
2168 rename = getrenamed(fn, ctx.rev())
2169 if rename:
2169 if rename:
2170 copies.append((fn, rename[0]))
2170 copies.append((fn, rename[0]))
2171 revmatchfn = None
2171 revmatchfn = None
2172 if filematcher is not None:
2172 if filematcher is not None:
2173 revmatchfn = filematcher(ctx.rev())
2173 revmatchfn = filematcher(ctx.rev())
2174 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2174 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2175 lines = displayer.hunk.pop(rev).split('\n')
2175 lines = displayer.hunk.pop(rev).split('\n')
2176 if not lines[-1]:
2176 if not lines[-1]:
2177 del lines[-1]
2177 del lines[-1]
2178 displayer.flush(rev)
2178 displayer.flush(rev)
2179 edges = edgefn(type, char, lines, seen, rev, parents)
2179 edges = edgefn(type, char, lines, seen, rev, parents)
2180 for type, char, lines, coldata in edges:
2180 for type, char, lines, coldata in edges:
2181 graphmod.ascii(ui, state, type, char, lines, coldata)
2181 graphmod.ascii(ui, state, type, char, lines, coldata)
2182 displayer.close()
2182 displayer.close()
2183
2183
2184 def graphlog(ui, repo, *pats, **opts):
2184 def graphlog(ui, repo, *pats, **opts):
2185 # Parameters are identical to log command ones
2185 # Parameters are identical to log command ones
2186 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2186 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2187 revdag = graphmod.dagwalker(repo, revs)
2187 revdag = graphmod.dagwalker(repo, revs)
2188
2188
2189 getrenamed = None
2189 getrenamed = None
2190 if opts.get('copies'):
2190 if opts.get('copies'):
2191 endrev = None
2191 endrev = None
2192 if opts.get('rev'):
2192 if opts.get('rev'):
2193 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2193 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2194 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2194 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2195 displayer = show_changeset(ui, repo, opts, buffered=True)
2195 displayer = show_changeset(ui, repo, opts, buffered=True)
2196 showparents = [ctx.node() for ctx in repo[None].parents()]
2196 showparents = [ctx.node() for ctx in repo[None].parents()]
2197 displaygraph(ui, revdag, displayer, showparents,
2197 displaygraph(ui, revdag, displayer, showparents,
2198 graphmod.asciiedges, getrenamed, filematcher)
2198 graphmod.asciiedges, getrenamed, filematcher)
2199
2199
2200 def checkunsupportedgraphflags(pats, opts):
2200 def checkunsupportedgraphflags(pats, opts):
2201 for op in ["newest_first"]:
2201 for op in ["newest_first"]:
2202 if op in opts and opts[op]:
2202 if op in opts and opts[op]:
2203 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2203 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2204 % op.replace("_", "-"))
2204 % op.replace("_", "-"))
2205
2205
2206 def graphrevs(repo, nodes, opts):
2206 def graphrevs(repo, nodes, opts):
2207 limit = loglimit(opts)
2207 limit = loglimit(opts)
2208 nodes.reverse()
2208 nodes.reverse()
2209 if limit is not None:
2209 if limit is not None:
2210 nodes = nodes[:limit]
2210 nodes = nodes[:limit]
2211 return graphmod.nodes(repo, nodes)
2211 return graphmod.nodes(repo, nodes)
2212
2212
2213 def add(ui, repo, match, prefix, explicitonly, **opts):
2213 def add(ui, repo, match, prefix, explicitonly, **opts):
2214 join = lambda f: os.path.join(prefix, f)
2214 join = lambda f: os.path.join(prefix, f)
2215 bad = []
2215 bad = []
2216 oldbad = match.bad
2216 oldbad = match.bad
2217 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2217 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2218 names = []
2218 names = []
2219 wctx = repo[None]
2219 wctx = repo[None]
2220 cca = None
2220 cca = None
2221 abort, warn = scmutil.checkportabilityalert(ui)
2221 abort, warn = scmutil.checkportabilityalert(ui)
2222 if abort or warn:
2222 if abort or warn:
2223 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2223 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2224 for f in wctx.walk(match):
2224 for f in wctx.walk(match):
2225 exact = match.exact(f)
2225 exact = match.exact(f)
2226 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2226 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2227 if cca:
2227 if cca:
2228 cca(f)
2228 cca(f)
2229 names.append(f)
2229 names.append(f)
2230 if ui.verbose or not exact:
2230 if ui.verbose or not exact:
2231 ui.status(_('adding %s\n') % match.rel(f))
2231 ui.status(_('adding %s\n') % match.rel(f))
2232
2232
2233 for subpath in sorted(wctx.substate):
2233 for subpath in sorted(wctx.substate):
2234 sub = wctx.sub(subpath)
2234 sub = wctx.sub(subpath)
2235 try:
2235 try:
2236 submatch = matchmod.narrowmatcher(subpath, match)
2236 submatch = matchmod.narrowmatcher(subpath, match)
2237 if opts.get('subrepos'):
2237 if opts.get('subrepos'):
2238 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2238 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2239 else:
2239 else:
2240 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2240 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2241 except error.LookupError:
2241 except error.LookupError:
2242 ui.status(_("skipping missing subrepository: %s\n")
2242 ui.status(_("skipping missing subrepository: %s\n")
2243 % join(subpath))
2243 % join(subpath))
2244
2244
2245 if not opts.get('dry_run'):
2245 if not opts.get('dry_run'):
2246 rejected = wctx.add(names, prefix)
2246 rejected = wctx.add(names, prefix)
2247 bad.extend(f for f in rejected if f in match.files())
2247 bad.extend(f for f in rejected if f in match.files())
2248 return bad
2248 return bad
2249
2249
2250 def forget(ui, repo, match, prefix, explicitonly):
2250 def forget(ui, repo, match, prefix, explicitonly):
2251 join = lambda f: os.path.join(prefix, f)
2251 join = lambda f: os.path.join(prefix, f)
2252 bad = []
2252 bad = []
2253 oldbad = match.bad
2253 oldbad = match.bad
2254 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2254 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2255 wctx = repo[None]
2255 wctx = repo[None]
2256 forgot = []
2256 forgot = []
2257 s = repo.status(match=match, clean=True)
2257 s = repo.status(match=match, clean=True)
2258 forget = sorted(s[0] + s[1] + s[3] + s[6])
2258 forget = sorted(s[0] + s[1] + s[3] + s[6])
2259 if explicitonly:
2259 if explicitonly:
2260 forget = [f for f in forget if match.exact(f)]
2260 forget = [f for f in forget if match.exact(f)]
2261
2261
2262 for subpath in sorted(wctx.substate):
2262 for subpath in sorted(wctx.substate):
2263 sub = wctx.sub(subpath)
2263 sub = wctx.sub(subpath)
2264 try:
2264 try:
2265 submatch = matchmod.narrowmatcher(subpath, match)
2265 submatch = matchmod.narrowmatcher(subpath, match)
2266 subbad, subforgot = sub.forget(submatch, prefix)
2266 subbad, subforgot = sub.forget(submatch, prefix)
2267 bad.extend([subpath + '/' + f for f in subbad])
2267 bad.extend([subpath + '/' + f for f in subbad])
2268 forgot.extend([subpath + '/' + f for f in subforgot])
2268 forgot.extend([subpath + '/' + f for f in subforgot])
2269 except error.LookupError:
2269 except error.LookupError:
2270 ui.status(_("skipping missing subrepository: %s\n")
2270 ui.status(_("skipping missing subrepository: %s\n")
2271 % join(subpath))
2271 % join(subpath))
2272
2272
2273 if not explicitonly:
2273 if not explicitonly:
2274 for f in match.files():
2274 for f in match.files():
2275 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2275 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2276 if f not in forgot:
2276 if f not in forgot:
2277 if repo.wvfs.exists(f):
2277 if repo.wvfs.exists(f):
2278 # Don't complain if the exact case match wasn't given.
2278 # Don't complain if the exact case match wasn't given.
2279 # But don't do this until after checking 'forgot', so
2279 # But don't do this until after checking 'forgot', so
2280 # that subrepo files aren't normalized, and this op is
2280 # that subrepo files aren't normalized, and this op is
2281 # purely from data cached by the status walk above.
2281 # purely from data cached by the status walk above.
2282 if repo.dirstate.normalize(f) in repo.dirstate:
2282 if repo.dirstate.normalize(f) in repo.dirstate:
2283 continue
2283 continue
2284 ui.warn(_('not removing %s: '
2284 ui.warn(_('not removing %s: '
2285 'file is already untracked\n')
2285 'file is already untracked\n')
2286 % match.rel(f))
2286 % match.rel(f))
2287 bad.append(f)
2287 bad.append(f)
2288
2288
2289 for f in forget:
2289 for f in forget:
2290 if ui.verbose or not match.exact(f):
2290 if ui.verbose or not match.exact(f):
2291 ui.status(_('removing %s\n') % match.rel(f))
2291 ui.status(_('removing %s\n') % match.rel(f))
2292
2292
2293 rejected = wctx.forget(forget, prefix)
2293 rejected = wctx.forget(forget, prefix)
2294 bad.extend(f for f in rejected if f in match.files())
2294 bad.extend(f for f in rejected if f in match.files())
2295 forgot.extend(f for f in forget if f not in rejected)
2295 forgot.extend(f for f in forget if f not in rejected)
2296 return bad, forgot
2296 return bad, forgot
2297
2297
2298 def files(ui, ctx, m, fm, fmt, subrepos):
2298 def files(ui, ctx, m, fm, fmt, subrepos):
2299 rev = ctx.rev()
2299 rev = ctx.rev()
2300 ret = 1
2300 ret = 1
2301 ds = ctx.repo().dirstate
2301 ds = ctx.repo().dirstate
2302
2302
2303 for f in ctx.matches(m):
2303 for f in ctx.matches(m):
2304 if rev is None and ds[f] == 'r':
2304 if rev is None and ds[f] == 'r':
2305 continue
2305 continue
2306 fm.startitem()
2306 fm.startitem()
2307 if ui.verbose:
2307 if ui.verbose:
2308 fc = ctx[f]
2308 fc = ctx[f]
2309 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2309 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2310 fm.data(abspath=f)
2310 fm.data(abspath=f)
2311 fm.write('path', fmt, m.rel(f))
2311 fm.write('path', fmt, m.rel(f))
2312 ret = 0
2312 ret = 0
2313
2313
2314 for subpath in sorted(ctx.substate):
2314 for subpath in sorted(ctx.substate):
2315 def matchessubrepo(subpath):
2315 def matchessubrepo(subpath):
2316 return (m.always() or m.exact(subpath)
2316 return (m.always() or m.exact(subpath)
2317 or any(f.startswith(subpath + '/') for f in m.files()))
2317 or any(f.startswith(subpath + '/') for f in m.files()))
2318
2318
2319 if subrepos or matchessubrepo(subpath):
2319 if subrepos or matchessubrepo(subpath):
2320 sub = ctx.sub(subpath)
2320 sub = ctx.sub(subpath)
2321 try:
2321 try:
2322 submatch = matchmod.narrowmatcher(subpath, m)
2322 submatch = matchmod.narrowmatcher(subpath, m)
2323 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2323 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2324 ret = 0
2324 ret = 0
2325 except error.LookupError:
2325 except error.LookupError:
2326 ui.status(_("skipping missing subrepository: %s\n")
2326 ui.status(_("skipping missing subrepository: %s\n")
2327 % m.abs(subpath))
2327 % m.abs(subpath))
2328
2328
2329 return ret
2329 return ret
2330
2330
2331 def remove(ui, repo, m, prefix, after, force, subrepos):
2331 def remove(ui, repo, m, prefix, after, force, subrepos):
2332 join = lambda f: os.path.join(prefix, f)
2332 join = lambda f: os.path.join(prefix, f)
2333 ret = 0
2333 ret = 0
2334 s = repo.status(match=m, clean=True)
2334 s = repo.status(match=m, clean=True)
2335 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2335 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2336
2336
2337 wctx = repo[None]
2337 wctx = repo[None]
2338
2338
2339 for subpath in sorted(wctx.substate):
2339 for subpath in sorted(wctx.substate):
2340 def matchessubrepo(matcher, subpath):
2340 def matchessubrepo(matcher, subpath):
2341 if matcher.exact(subpath):
2341 if matcher.exact(subpath):
2342 return True
2342 return True
2343 for f in matcher.files():
2343 for f in matcher.files():
2344 if f.startswith(subpath):
2344 if f.startswith(subpath):
2345 return True
2345 return True
2346 return False
2346 return False
2347
2347
2348 if subrepos or matchessubrepo(m, subpath):
2348 if subrepos or matchessubrepo(m, subpath):
2349 sub = wctx.sub(subpath)
2349 sub = wctx.sub(subpath)
2350 try:
2350 try:
2351 submatch = matchmod.narrowmatcher(subpath, m)
2351 submatch = matchmod.narrowmatcher(subpath, m)
2352 if sub.removefiles(submatch, prefix, after, force, subrepos):
2352 if sub.removefiles(submatch, prefix, after, force, subrepos):
2353 ret = 1
2353 ret = 1
2354 except error.LookupError:
2354 except error.LookupError:
2355 ui.status(_("skipping missing subrepository: %s\n")
2355 ui.status(_("skipping missing subrepository: %s\n")
2356 % join(subpath))
2356 % join(subpath))
2357
2357
2358 # warn about failure to delete explicit files/dirs
2358 # warn about failure to delete explicit files/dirs
2359 deleteddirs = util.dirs(deleted)
2359 deleteddirs = util.dirs(deleted)
2360 for f in m.files():
2360 for f in m.files():
2361 def insubrepo():
2361 def insubrepo():
2362 for subpath in wctx.substate:
2362 for subpath in wctx.substate:
2363 if f.startswith(subpath):
2363 if f.startswith(subpath):
2364 return True
2364 return True
2365 return False
2365 return False
2366
2366
2367 isdir = f in deleteddirs or wctx.hasdir(f)
2367 isdir = f in deleteddirs or wctx.hasdir(f)
2368 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2368 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2369 continue
2369 continue
2370
2370
2371 if repo.wvfs.exists(f):
2371 if repo.wvfs.exists(f):
2372 if repo.wvfs.isdir(f):
2372 if repo.wvfs.isdir(f):
2373 ui.warn(_('not removing %s: no tracked files\n')
2373 ui.warn(_('not removing %s: no tracked files\n')
2374 % m.rel(f))
2374 % m.rel(f))
2375 else:
2375 else:
2376 ui.warn(_('not removing %s: file is untracked\n')
2376 ui.warn(_('not removing %s: file is untracked\n')
2377 % m.rel(f))
2377 % m.rel(f))
2378 # missing files will generate a warning elsewhere
2378 # missing files will generate a warning elsewhere
2379 ret = 1
2379 ret = 1
2380
2380
2381 if force:
2381 if force:
2382 list = modified + deleted + clean + added
2382 list = modified + deleted + clean + added
2383 elif after:
2383 elif after:
2384 list = deleted
2384 list = deleted
2385 for f in modified + added + clean:
2385 for f in modified + added + clean:
2386 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2386 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2387 ret = 1
2387 ret = 1
2388 else:
2388 else:
2389 list = deleted + clean
2389 list = deleted + clean
2390 for f in modified:
2390 for f in modified:
2391 ui.warn(_('not removing %s: file is modified (use -f'
2391 ui.warn(_('not removing %s: file is modified (use -f'
2392 ' to force removal)\n') % m.rel(f))
2392 ' to force removal)\n') % m.rel(f))
2393 ret = 1
2393 ret = 1
2394 for f in added:
2394 for f in added:
2395 ui.warn(_('not removing %s: file has been marked for add'
2395 ui.warn(_('not removing %s: file has been marked for add'
2396 ' (use forget to undo)\n') % m.rel(f))
2396 ' (use forget to undo)\n') % m.rel(f))
2397 ret = 1
2397 ret = 1
2398
2398
2399 for f in sorted(list):
2399 for f in sorted(list):
2400 if ui.verbose or not m.exact(f):
2400 if ui.verbose or not m.exact(f):
2401 ui.status(_('removing %s\n') % m.rel(f))
2401 ui.status(_('removing %s\n') % m.rel(f))
2402
2402
2403 wlock = repo.wlock()
2403 wlock = repo.wlock()
2404 try:
2404 try:
2405 if not after:
2405 if not after:
2406 for f in list:
2406 for f in list:
2407 if f in added:
2407 if f in added:
2408 continue # we never unlink added files on remove
2408 continue # we never unlink added files on remove
2409 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2409 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2410 repo[None].forget(list)
2410 repo[None].forget(list)
2411 finally:
2411 finally:
2412 wlock.release()
2412 wlock.release()
2413
2413
2414 return ret
2414 return ret
2415
2415
2416 def cat(ui, repo, ctx, matcher, prefix, **opts):
2416 def cat(ui, repo, ctx, matcher, prefix, **opts):
2417 err = 1
2417 err = 1
2418
2418
2419 def write(path):
2419 def write(path):
2420 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2420 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2421 pathname=os.path.join(prefix, path))
2421 pathname=os.path.join(prefix, path))
2422 data = ctx[path].data()
2422 data = ctx[path].data()
2423 if opts.get('decode'):
2423 if opts.get('decode'):
2424 data = repo.wwritedata(path, data)
2424 data = repo.wwritedata(path, data)
2425 fp.write(data)
2425 fp.write(data)
2426 fp.close()
2426 fp.close()
2427
2427
2428 # Automation often uses hg cat on single files, so special case it
2428 # Automation often uses hg cat on single files, so special case it
2429 # for performance to avoid the cost of parsing the manifest.
2429 # for performance to avoid the cost of parsing the manifest.
2430 if len(matcher.files()) == 1 and not matcher.anypats():
2430 if len(matcher.files()) == 1 and not matcher.anypats():
2431 file = matcher.files()[0]
2431 file = matcher.files()[0]
2432 mf = repo.manifest
2432 mf = repo.manifest
2433 mfnode = ctx.manifestnode()
2433 mfnode = ctx.manifestnode()
2434 if mfnode and mf.find(mfnode, file)[0]:
2434 if mfnode and mf.find(mfnode, file)[0]:
2435 write(file)
2435 write(file)
2436 return 0
2436 return 0
2437
2437
2438 # Don't warn about "missing" files that are really in subrepos
2438 # Don't warn about "missing" files that are really in subrepos
2439 bad = matcher.bad
2439 bad = matcher.bad
2440
2440
2441 def badfn(path, msg):
2441 def badfn(path, msg):
2442 for subpath in ctx.substate:
2442 for subpath in ctx.substate:
2443 if path.startswith(subpath):
2443 if path.startswith(subpath):
2444 return
2444 return
2445 bad(path, msg)
2445 bad(path, msg)
2446
2446
2447 matcher.bad = badfn
2447 matcher.bad = badfn
2448
2448
2449 for abs in ctx.walk(matcher):
2449 for abs in ctx.walk(matcher):
2450 write(abs)
2450 write(abs)
2451 err = 0
2451 err = 0
2452
2452
2453 matcher.bad = bad
2453 matcher.bad = bad
2454
2454
2455 for subpath in sorted(ctx.substate):
2455 for subpath in sorted(ctx.substate):
2456 sub = ctx.sub(subpath)
2456 sub = ctx.sub(subpath)
2457 try:
2457 try:
2458 submatch = matchmod.narrowmatcher(subpath, matcher)
2458 submatch = matchmod.narrowmatcher(subpath, matcher)
2459
2459
2460 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2460 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2461 **opts):
2461 **opts):
2462 err = 0
2462 err = 0
2463 except error.RepoLookupError:
2463 except error.RepoLookupError:
2464 ui.status(_("skipping missing subrepository: %s\n")
2464 ui.status(_("skipping missing subrepository: %s\n")
2465 % os.path.join(prefix, subpath))
2465 % os.path.join(prefix, subpath))
2466
2466
2467 return err
2467 return err
2468
2468
2469 def commit(ui, repo, commitfunc, pats, opts):
2469 def commit(ui, repo, commitfunc, pats, opts):
2470 '''commit the specified files or all outstanding changes'''
2470 '''commit the specified files or all outstanding changes'''
2471 date = opts.get('date')
2471 date = opts.get('date')
2472 if date:
2472 if date:
2473 opts['date'] = util.parsedate(date)
2473 opts['date'] = util.parsedate(date)
2474 message = logmessage(ui, opts)
2474 message = logmessage(ui, opts)
2475 matcher = scmutil.match(repo[None], pats, opts)
2475 matcher = scmutil.match(repo[None], pats, opts)
2476
2476
2477 # extract addremove carefully -- this function can be called from a command
2477 # extract addremove carefully -- this function can be called from a command
2478 # that doesn't support addremove
2478 # that doesn't support addremove
2479 if opts.get('addremove'):
2479 if opts.get('addremove'):
2480 if scmutil.addremove(repo, matcher, "", opts) != 0:
2480 if scmutil.addremove(repo, matcher, "", opts) != 0:
2481 raise util.Abort(
2481 raise util.Abort(
2482 _("failed to mark all new/missing files as added/removed"))
2482 _("failed to mark all new/missing files as added/removed"))
2483
2483
2484 return commitfunc(ui, repo, message, matcher, opts)
2484 return commitfunc(ui, repo, message, matcher, opts)
2485
2485
2486 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2486 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2487 # amend will reuse the existing user if not specified, but the obsolete
2487 # amend will reuse the existing user if not specified, but the obsolete
2488 # marker creation requires that the current user's name is specified.
2488 # marker creation requires that the current user's name is specified.
2489 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2489 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2490 ui.username() # raise exception if username not set
2490 ui.username() # raise exception if username not set
2491
2491
2492 ui.note(_('amending changeset %s\n') % old)
2492 ui.note(_('amending changeset %s\n') % old)
2493 base = old.p1()
2493 base = old.p1()
2494
2494
2495 wlock = dsguard = lock = newid = None
2495 wlock = dsguard = lock = newid = None
2496 try:
2496 try:
2497 wlock = repo.wlock()
2497 wlock = repo.wlock()
2498 dsguard = dirstateguard(repo, 'amend')
2498 dsguard = dirstateguard(repo, 'amend')
2499 lock = repo.lock()
2499 lock = repo.lock()
2500 tr = repo.transaction('amend')
2500 tr = repo.transaction('amend')
2501 try:
2501 try:
2502 # See if we got a message from -m or -l, if not, open the editor
2502 # See if we got a message from -m or -l, if not, open the editor
2503 # with the message of the changeset to amend
2503 # with the message of the changeset to amend
2504 message = logmessage(ui, opts)
2504 message = logmessage(ui, opts)
2505 # ensure logfile does not conflict with later enforcement of the
2505 # ensure logfile does not conflict with later enforcement of the
2506 # message. potential logfile content has been processed by
2506 # message. potential logfile content has been processed by
2507 # `logmessage` anyway.
2507 # `logmessage` anyway.
2508 opts.pop('logfile')
2508 opts.pop('logfile')
2509 # First, do a regular commit to record all changes in the working
2509 # First, do a regular commit to record all changes in the working
2510 # directory (if there are any)
2510 # directory (if there are any)
2511 ui.callhooks = False
2511 ui.callhooks = False
2512 activebookmark = repo._activebookmark
2512 activebookmark = repo._activebookmark
2513 try:
2513 try:
2514 repo._activebookmark = None
2514 repo._activebookmark = None
2515 opts['message'] = 'temporary amend commit for %s' % old
2515 opts['message'] = 'temporary amend commit for %s' % old
2516 node = commit(ui, repo, commitfunc, pats, opts)
2516 node = commit(ui, repo, commitfunc, pats, opts)
2517 finally:
2517 finally:
2518 repo._activebookmark = activebookmark
2518 repo._activebookmark = activebookmark
2519 ui.callhooks = True
2519 ui.callhooks = True
2520 ctx = repo[node]
2520 ctx = repo[node]
2521
2521
2522 # Participating changesets:
2522 # Participating changesets:
2523 #
2523 #
2524 # node/ctx o - new (intermediate) commit that contains changes
2524 # node/ctx o - new (intermediate) commit that contains changes
2525 # | from working dir to go into amending commit
2525 # | from working dir to go into amending commit
2526 # | (or a workingctx if there were no changes)
2526 # | (or a workingctx if there were no changes)
2527 # |
2527 # |
2528 # old o - changeset to amend
2528 # old o - changeset to amend
2529 # |
2529 # |
2530 # base o - parent of amending changeset
2530 # base o - parent of amending changeset
2531
2531
2532 # Update extra dict from amended commit (e.g. to preserve graft
2532 # Update extra dict from amended commit (e.g. to preserve graft
2533 # source)
2533 # source)
2534 extra.update(old.extra())
2534 extra.update(old.extra())
2535
2535
2536 # Also update it from the intermediate commit or from the wctx
2536 # Also update it from the intermediate commit or from the wctx
2537 extra.update(ctx.extra())
2537 extra.update(ctx.extra())
2538
2538
2539 if len(old.parents()) > 1:
2539 if len(old.parents()) > 1:
2540 # ctx.files() isn't reliable for merges, so fall back to the
2540 # ctx.files() isn't reliable for merges, so fall back to the
2541 # slower repo.status() method
2541 # slower repo.status() method
2542 files = set([fn for st in repo.status(base, old)[:3]
2542 files = set([fn for st in repo.status(base, old)[:3]
2543 for fn in st])
2543 for fn in st])
2544 else:
2544 else:
2545 files = set(old.files())
2545 files = set(old.files())
2546
2546
2547 # Second, we use either the commit we just did, or if there were no
2547 # Second, we use either the commit we just did, or if there were no
2548 # changes the parent of the working directory as the version of the
2548 # changes the parent of the working directory as the version of the
2549 # files in the final amend commit
2549 # files in the final amend commit
2550 if node:
2550 if node:
2551 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2551 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2552
2552
2553 user = ctx.user()
2553 user = ctx.user()
2554 date = ctx.date()
2554 date = ctx.date()
2555 # Recompute copies (avoid recording a -> b -> a)
2555 # Recompute copies (avoid recording a -> b -> a)
2556 copied = copies.pathcopies(base, ctx)
2556 copied = copies.pathcopies(base, ctx)
2557 if old.p2:
2557 if old.p2:
2558 copied.update(copies.pathcopies(old.p2(), ctx))
2558 copied.update(copies.pathcopies(old.p2(), ctx))
2559
2559
2560 # Prune files which were reverted by the updates: if old
2560 # Prune files which were reverted by the updates: if old
2561 # introduced file X and our intermediate commit, node,
2561 # introduced file X and our intermediate commit, node,
2562 # renamed that file, then those two files are the same and
2562 # renamed that file, then those two files are the same and
2563 # we can discard X from our list of files. Likewise if X
2563 # we can discard X from our list of files. Likewise if X
2564 # was deleted, it's no longer relevant
2564 # was deleted, it's no longer relevant
2565 files.update(ctx.files())
2565 files.update(ctx.files())
2566
2566
2567 def samefile(f):
2567 def samefile(f):
2568 if f in ctx.manifest():
2568 if f in ctx.manifest():
2569 a = ctx.filectx(f)
2569 a = ctx.filectx(f)
2570 if f in base.manifest():
2570 if f in base.manifest():
2571 b = base.filectx(f)
2571 b = base.filectx(f)
2572 return (not a.cmp(b)
2572 return (not a.cmp(b)
2573 and a.flags() == b.flags())
2573 and a.flags() == b.flags())
2574 else:
2574 else:
2575 return False
2575 return False
2576 else:
2576 else:
2577 return f not in base.manifest()
2577 return f not in base.manifest()
2578 files = [f for f in files if not samefile(f)]
2578 files = [f for f in files if not samefile(f)]
2579
2579
2580 def filectxfn(repo, ctx_, path):
2580 def filectxfn(repo, ctx_, path):
2581 try:
2581 try:
2582 fctx = ctx[path]
2582 fctx = ctx[path]
2583 flags = fctx.flags()
2583 flags = fctx.flags()
2584 mctx = context.memfilectx(repo,
2584 mctx = context.memfilectx(repo,
2585 fctx.path(), fctx.data(),
2585 fctx.path(), fctx.data(),
2586 islink='l' in flags,
2586 islink='l' in flags,
2587 isexec='x' in flags,
2587 isexec='x' in flags,
2588 copied=copied.get(path))
2588 copied=copied.get(path))
2589 return mctx
2589 return mctx
2590 except KeyError:
2590 except KeyError:
2591 return None
2591 return None
2592 else:
2592 else:
2593 ui.note(_('copying changeset %s to %s\n') % (old, base))
2593 ui.note(_('copying changeset %s to %s\n') % (old, base))
2594
2594
2595 # Use version of files as in the old cset
2595 # Use version of files as in the old cset
2596 def filectxfn(repo, ctx_, path):
2596 def filectxfn(repo, ctx_, path):
2597 try:
2597 try:
2598 return old.filectx(path)
2598 return old.filectx(path)
2599 except KeyError:
2599 except KeyError:
2600 return None
2600 return None
2601
2601
2602 user = opts.get('user') or old.user()
2602 user = opts.get('user') or old.user()
2603 date = opts.get('date') or old.date()
2603 date = opts.get('date') or old.date()
2604 editform = mergeeditform(old, 'commit.amend')
2604 editform = mergeeditform(old, 'commit.amend')
2605 editor = getcommiteditor(editform=editform, **opts)
2605 editor = getcommiteditor(editform=editform, **opts)
2606 if not message:
2606 if not message:
2607 editor = getcommiteditor(edit=True, editform=editform)
2607 editor = getcommiteditor(edit=True, editform=editform)
2608 message = old.description()
2608 message = old.description()
2609
2609
2610 pureextra = extra.copy()
2610 pureextra = extra.copy()
2611 extra['amend_source'] = old.hex()
2611 extra['amend_source'] = old.hex()
2612
2612
2613 new = context.memctx(repo,
2613 new = context.memctx(repo,
2614 parents=[base.node(), old.p2().node()],
2614 parents=[base.node(), old.p2().node()],
2615 text=message,
2615 text=message,
2616 files=files,
2616 files=files,
2617 filectxfn=filectxfn,
2617 filectxfn=filectxfn,
2618 user=user,
2618 user=user,
2619 date=date,
2619 date=date,
2620 extra=extra,
2620 extra=extra,
2621 editor=editor)
2621 editor=editor)
2622
2622
2623 newdesc = changelog.stripdesc(new.description())
2623 newdesc = changelog.stripdesc(new.description())
2624 if ((not node)
2624 if ((not node)
2625 and newdesc == old.description()
2625 and newdesc == old.description()
2626 and user == old.user()
2626 and user == old.user()
2627 and date == old.date()
2627 and date == old.date()
2628 and pureextra == old.extra()):
2628 and pureextra == old.extra()):
2629 # nothing changed. continuing here would create a new node
2629 # nothing changed. continuing here would create a new node
2630 # anyway because of the amend_source noise.
2630 # anyway because of the amend_source noise.
2631 #
2631 #
2632 # This not what we expect from amend.
2632 # This not what we expect from amend.
2633 return old.node()
2633 return old.node()
2634
2634
2635 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2635 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2636 try:
2636 try:
2637 if opts.get('secret'):
2637 if opts.get('secret'):
2638 commitphase = 'secret'
2638 commitphase = 'secret'
2639 else:
2639 else:
2640 commitphase = old.phase()
2640 commitphase = old.phase()
2641 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2641 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2642 newid = repo.commitctx(new)
2642 newid = repo.commitctx(new)
2643 finally:
2643 finally:
2644 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2644 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2645 if newid != old.node():
2645 if newid != old.node():
2646 # Reroute the working copy parent to the new changeset
2646 # Reroute the working copy parent to the new changeset
2647 repo.setparents(newid, nullid)
2647 repo.setparents(newid, nullid)
2648
2648
2649 # Move bookmarks from old parent to amend commit
2649 # Move bookmarks from old parent to amend commit
2650 bms = repo.nodebookmarks(old.node())
2650 bms = repo.nodebookmarks(old.node())
2651 if bms:
2651 if bms:
2652 marks = repo._bookmarks
2652 marks = repo._bookmarks
2653 for bm in bms:
2653 for bm in bms:
2654 marks[bm] = newid
2654 marks[bm] = newid
2655 marks.write()
2655 marks.write()
2656 #commit the whole amend process
2656 #commit the whole amend process
2657 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2657 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2658 if createmarkers and newid != old.node():
2658 if createmarkers and newid != old.node():
2659 # mark the new changeset as successor of the rewritten one
2659 # mark the new changeset as successor of the rewritten one
2660 new = repo[newid]
2660 new = repo[newid]
2661 obs = [(old, (new,))]
2661 obs = [(old, (new,))]
2662 if node:
2662 if node:
2663 obs.append((ctx, ()))
2663 obs.append((ctx, ()))
2664
2664
2665 obsolete.createmarkers(repo, obs)
2665 obsolete.createmarkers(repo, obs)
2666 tr.close()
2666 tr.close()
2667 finally:
2667 finally:
2668 tr.release()
2668 tr.release()
2669 dsguard.close()
2669 dsguard.close()
2670 if not createmarkers and newid != old.node():
2670 if not createmarkers and newid != old.node():
2671 # Strip the intermediate commit (if there was one) and the amended
2671 # Strip the intermediate commit (if there was one) and the amended
2672 # commit
2672 # commit
2673 if node:
2673 if node:
2674 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2674 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2675 ui.note(_('stripping amended changeset %s\n') % old)
2675 ui.note(_('stripping amended changeset %s\n') % old)
2676 repair.strip(ui, repo, old.node(), topic='amend-backup')
2676 repair.strip(ui, repo, old.node(), topic='amend-backup')
2677 finally:
2677 finally:
2678 lockmod.release(lock, dsguard, wlock)
2678 lockmod.release(lock, dsguard, wlock)
2679 return newid
2679 return newid
2680
2680
2681 def commiteditor(repo, ctx, subs, editform=''):
2681 def commiteditor(repo, ctx, subs, editform=''):
2682 if ctx.description():
2682 if ctx.description():
2683 return ctx.description()
2683 return ctx.description()
2684 return commitforceeditor(repo, ctx, subs, editform=editform)
2684 return commitforceeditor(repo, ctx, subs, editform=editform)
2685
2685
2686 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2686 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2687 editform=''):
2687 editform=''):
2688 if not extramsg:
2688 if not extramsg:
2689 extramsg = _("Leave message empty to abort commit.")
2689 extramsg = _("Leave message empty to abort commit.")
2690
2690
2691 forms = [e for e in editform.split('.') if e]
2691 forms = [e for e in editform.split('.') if e]
2692 forms.insert(0, 'changeset')
2692 forms.insert(0, 'changeset')
2693 while forms:
2693 while forms:
2694 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2694 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2695 if tmpl:
2695 if tmpl:
2696 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2696 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2697 break
2697 break
2698 forms.pop()
2698 forms.pop()
2699 else:
2699 else:
2700 committext = buildcommittext(repo, ctx, subs, extramsg)
2700 committext = buildcommittext(repo, ctx, subs, extramsg)
2701
2701
2702 # run editor in the repository root
2702 # run editor in the repository root
2703 olddir = os.getcwd()
2703 olddir = os.getcwd()
2704 os.chdir(repo.root)
2704 os.chdir(repo.root)
2705 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2705 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2706 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2706 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2707 os.chdir(olddir)
2707 os.chdir(olddir)
2708
2708
2709 if finishdesc:
2709 if finishdesc:
2710 text = finishdesc(text)
2710 text = finishdesc(text)
2711 if not text.strip():
2711 if not text.strip():
2712 raise util.Abort(_("empty commit message"))
2712 raise util.Abort(_("empty commit message"))
2713
2713
2714 return text
2714 return text
2715
2715
2716 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2716 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2717 ui = repo.ui
2717 ui = repo.ui
2718 tmpl, mapfile = gettemplate(ui, tmpl, None)
2718 tmpl, mapfile = gettemplate(ui, tmpl, None)
2719
2719
2720 try:
2720 try:
2721 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2721 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2722 except SyntaxError, inst:
2722 except SyntaxError, inst:
2723 raise util.Abort(inst.args[0])
2723 raise util.Abort(inst.args[0])
2724
2724
2725 for k, v in repo.ui.configitems('committemplate'):
2725 for k, v in repo.ui.configitems('committemplate'):
2726 if k != 'changeset':
2726 if k != 'changeset':
2727 t.t.cache[k] = v
2727 t.t.cache[k] = v
2728
2728
2729 if not extramsg:
2729 if not extramsg:
2730 extramsg = '' # ensure that extramsg is string
2730 extramsg = '' # ensure that extramsg is string
2731
2731
2732 ui.pushbuffer()
2732 ui.pushbuffer()
2733 t.show(ctx, extramsg=extramsg)
2733 t.show(ctx, extramsg=extramsg)
2734 return ui.popbuffer()
2734 return ui.popbuffer()
2735
2735
2736 def buildcommittext(repo, ctx, subs, extramsg):
2736 def buildcommittext(repo, ctx, subs, extramsg):
2737 edittext = []
2737 edittext = []
2738 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2738 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2739 if ctx.description():
2739 if ctx.description():
2740 edittext.append(ctx.description())
2740 edittext.append(ctx.description())
2741 edittext.append("")
2741 edittext.append("")
2742 edittext.append("") # Empty line between message and comments.
2742 edittext.append("") # Empty line between message and comments.
2743 edittext.append(_("HG: Enter commit message."
2743 edittext.append(_("HG: Enter commit message."
2744 " Lines beginning with 'HG:' are removed."))
2744 " Lines beginning with 'HG:' are removed."))
2745 edittext.append("HG: %s" % extramsg)
2745 edittext.append("HG: %s" % extramsg)
2746 edittext.append("HG: --")
2746 edittext.append("HG: --")
2747 edittext.append(_("HG: user: %s") % ctx.user())
2747 edittext.append(_("HG: user: %s") % ctx.user())
2748 if ctx.p2():
2748 if ctx.p2():
2749 edittext.append(_("HG: branch merge"))
2749 edittext.append(_("HG: branch merge"))
2750 if ctx.branch():
2750 if ctx.branch():
2751 edittext.append(_("HG: branch '%s'") % ctx.branch())
2751 edittext.append(_("HG: branch '%s'") % ctx.branch())
2752 if bookmarks.isactivewdirparent(repo):
2752 if bookmarks.isactivewdirparent(repo):
2753 edittext.append(_("HG: bookmark '%s'") % repo._activebookmark)
2753 edittext.append(_("HG: bookmark '%s'") % repo._activebookmark)
2754 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2754 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2755 edittext.extend([_("HG: added %s") % f for f in added])
2755 edittext.extend([_("HG: added %s") % f for f in added])
2756 edittext.extend([_("HG: changed %s") % f for f in modified])
2756 edittext.extend([_("HG: changed %s") % f for f in modified])
2757 edittext.extend([_("HG: removed %s") % f for f in removed])
2757 edittext.extend([_("HG: removed %s") % f for f in removed])
2758 if not added and not modified and not removed:
2758 if not added and not modified and not removed:
2759 edittext.append(_("HG: no files changed"))
2759 edittext.append(_("HG: no files changed"))
2760 edittext.append("")
2760 edittext.append("")
2761
2761
2762 return "\n".join(edittext)
2762 return "\n".join(edittext)
2763
2763
2764 def commitstatus(repo, node, branch, bheads=None, opts={}):
2764 def commitstatus(repo, node, branch, bheads=None, opts={}):
2765 ctx = repo[node]
2765 ctx = repo[node]
2766 parents = ctx.parents()
2766 parents = ctx.parents()
2767
2767
2768 if (not opts.get('amend') and bheads and node not in bheads and not
2768 if (not opts.get('amend') and bheads and node not in bheads and not
2769 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2769 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2770 repo.ui.status(_('created new head\n'))
2770 repo.ui.status(_('created new head\n'))
2771 # The message is not printed for initial roots. For the other
2771 # The message is not printed for initial roots. For the other
2772 # changesets, it is printed in the following situations:
2772 # changesets, it is printed in the following situations:
2773 #
2773 #
2774 # Par column: for the 2 parents with ...
2774 # Par column: for the 2 parents with ...
2775 # N: null or no parent
2775 # N: null or no parent
2776 # B: parent is on another named branch
2776 # B: parent is on another named branch
2777 # C: parent is a regular non head changeset
2777 # C: parent is a regular non head changeset
2778 # H: parent was a branch head of the current branch
2778 # H: parent was a branch head of the current branch
2779 # Msg column: whether we print "created new head" message
2779 # Msg column: whether we print "created new head" message
2780 # In the following, it is assumed that there already exists some
2780 # In the following, it is assumed that there already exists some
2781 # initial branch heads of the current branch, otherwise nothing is
2781 # initial branch heads of the current branch, otherwise nothing is
2782 # printed anyway.
2782 # printed anyway.
2783 #
2783 #
2784 # Par Msg Comment
2784 # Par Msg Comment
2785 # N N y additional topo root
2785 # N N y additional topo root
2786 #
2786 #
2787 # B N y additional branch root
2787 # B N y additional branch root
2788 # C N y additional topo head
2788 # C N y additional topo head
2789 # H N n usual case
2789 # H N n usual case
2790 #
2790 #
2791 # B B y weird additional branch root
2791 # B B y weird additional branch root
2792 # C B y branch merge
2792 # C B y branch merge
2793 # H B n merge with named branch
2793 # H B n merge with named branch
2794 #
2794 #
2795 # C C y additional head from merge
2795 # C C y additional head from merge
2796 # C H n merge with a head
2796 # C H n merge with a head
2797 #
2797 #
2798 # H H n head merge: head count decreases
2798 # H H n head merge: head count decreases
2799
2799
2800 if not opts.get('close_branch'):
2800 if not opts.get('close_branch'):
2801 for r in parents:
2801 for r in parents:
2802 if r.closesbranch() and r.branch() == branch:
2802 if r.closesbranch() and r.branch() == branch:
2803 repo.ui.status(_('reopening closed branch head %d\n') % r)
2803 repo.ui.status(_('reopening closed branch head %d\n') % r)
2804
2804
2805 if repo.ui.debugflag:
2805 if repo.ui.debugflag:
2806 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2806 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2807 elif repo.ui.verbose:
2807 elif repo.ui.verbose:
2808 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2808 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2809
2809
2810 def revert(ui, repo, ctx, parents, *pats, **opts):
2810 def revert(ui, repo, ctx, parents, *pats, **opts):
2811 parent, p2 = parents
2811 parent, p2 = parents
2812 node = ctx.node()
2812 node = ctx.node()
2813
2813
2814 mf = ctx.manifest()
2814 mf = ctx.manifest()
2815 if node == p2:
2815 if node == p2:
2816 parent = p2
2816 parent = p2
2817 if node == parent:
2817 if node == parent:
2818 pmf = mf
2818 pmf = mf
2819 else:
2819 else:
2820 pmf = None
2820 pmf = None
2821
2821
2822 # need all matching names in dirstate and manifest of target rev,
2822 # need all matching names in dirstate and manifest of target rev,
2823 # so have to walk both. do not print errors if files exist in one
2823 # so have to walk both. do not print errors if files exist in one
2824 # but not other. in both cases, filesets should be evaluated against
2824 # but not other. in both cases, filesets should be evaluated against
2825 # workingctx to get consistent result (issue4497). this means 'set:**'
2825 # workingctx to get consistent result (issue4497). this means 'set:**'
2826 # cannot be used to select missing files from target rev.
2826 # cannot be used to select missing files from target rev.
2827
2827
2828 # `names` is a mapping for all elements in working copy and target revision
2828 # `names` is a mapping for all elements in working copy and target revision
2829 # The mapping is in the form:
2829 # The mapping is in the form:
2830 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2830 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2831 names = {}
2831 names = {}
2832
2832
2833 wlock = repo.wlock()
2833 wlock = repo.wlock()
2834 try:
2834 try:
2835 ## filling of the `names` mapping
2835 ## filling of the `names` mapping
2836 # walk dirstate to fill `names`
2836 # walk dirstate to fill `names`
2837
2837
2838 interactive = opts.get('interactive', False)
2838 interactive = opts.get('interactive', False)
2839 wctx = repo[None]
2839 wctx = repo[None]
2840 m = scmutil.match(wctx, pats, opts)
2840 m = scmutil.match(wctx, pats, opts)
2841
2841
2842 # we'll need this later
2842 # we'll need this later
2843 targetsubs = sorted(s for s in wctx.substate if m(s))
2843 targetsubs = sorted(s for s in wctx.substate if m(s))
2844
2844
2845 if not m.always():
2845 if not m.always():
2846 m.bad = lambda x, y: False
2846 m.bad = lambda x, y: False
2847 for abs in repo.walk(m):
2847 for abs in repo.walk(m):
2848 names[abs] = m.rel(abs), m.exact(abs)
2848 names[abs] = m.rel(abs), m.exact(abs)
2849
2849
2850 # walk target manifest to fill `names`
2850 # walk target manifest to fill `names`
2851
2851
2852 def badfn(path, msg):
2852 def badfn(path, msg):
2853 if path in names:
2853 if path in names:
2854 return
2854 return
2855 if path in ctx.substate:
2855 if path in ctx.substate:
2856 return
2856 return
2857 path_ = path + '/'
2857 path_ = path + '/'
2858 for f in names:
2858 for f in names:
2859 if f.startswith(path_):
2859 if f.startswith(path_):
2860 return
2860 return
2861 ui.warn("%s: %s\n" % (m.rel(path), msg))
2861 ui.warn("%s: %s\n" % (m.rel(path), msg))
2862
2862
2863 m.bad = badfn
2863 m.bad = badfn
2864 for abs in ctx.walk(m):
2864 for abs in ctx.walk(m):
2865 if abs not in names:
2865 if abs not in names:
2866 names[abs] = m.rel(abs), m.exact(abs)
2866 names[abs] = m.rel(abs), m.exact(abs)
2867
2867
2868 # Find status of all file in `names`.
2868 # Find status of all file in `names`.
2869 m = scmutil.matchfiles(repo, names)
2869 m = scmutil.matchfiles(repo, names)
2870
2870
2871 changes = repo.status(node1=node, match=m,
2871 changes = repo.status(node1=node, match=m,
2872 unknown=True, ignored=True, clean=True)
2872 unknown=True, ignored=True, clean=True)
2873 else:
2873 else:
2874 changes = repo.status(node1=node, match=m)
2874 changes = repo.status(node1=node, match=m)
2875 for kind in changes:
2875 for kind in changes:
2876 for abs in kind:
2876 for abs in kind:
2877 names[abs] = m.rel(abs), m.exact(abs)
2877 names[abs] = m.rel(abs), m.exact(abs)
2878
2878
2879 m = scmutil.matchfiles(repo, names)
2879 m = scmutil.matchfiles(repo, names)
2880
2880
2881 modified = set(changes.modified)
2881 modified = set(changes.modified)
2882 added = set(changes.added)
2882 added = set(changes.added)
2883 removed = set(changes.removed)
2883 removed = set(changes.removed)
2884 _deleted = set(changes.deleted)
2884 _deleted = set(changes.deleted)
2885 unknown = set(changes.unknown)
2885 unknown = set(changes.unknown)
2886 unknown.update(changes.ignored)
2886 unknown.update(changes.ignored)
2887 clean = set(changes.clean)
2887 clean = set(changes.clean)
2888 modadded = set()
2888 modadded = set()
2889
2889
2890 # split between files known in target manifest and the others
2890 # split between files known in target manifest and the others
2891 smf = set(mf)
2891 smf = set(mf)
2892
2892
2893 # determine the exact nature of the deleted changesets
2893 # determine the exact nature of the deleted changesets
2894 deladded = _deleted - smf
2894 deladded = _deleted - smf
2895 deleted = _deleted - deladded
2895 deleted = _deleted - deladded
2896
2896
2897 # We need to account for the state of the file in the dirstate,
2897 # We need to account for the state of the file in the dirstate,
2898 # even when we revert against something else than parent. This will
2898 # even when we revert against something else than parent. This will
2899 # slightly alter the behavior of revert (doing back up or not, delete
2899 # slightly alter the behavior of revert (doing back up or not, delete
2900 # or just forget etc).
2900 # or just forget etc).
2901 if parent == node:
2901 if parent == node:
2902 dsmodified = modified
2902 dsmodified = modified
2903 dsadded = added
2903 dsadded = added
2904 dsremoved = removed
2904 dsremoved = removed
2905 # store all local modifications, useful later for rename detection
2905 # store all local modifications, useful later for rename detection
2906 localchanges = dsmodified | dsadded
2906 localchanges = dsmodified | dsadded
2907 modified, added, removed = set(), set(), set()
2907 modified, added, removed = set(), set(), set()
2908 else:
2908 else:
2909 changes = repo.status(node1=parent, match=m)
2909 changes = repo.status(node1=parent, match=m)
2910 dsmodified = set(changes.modified)
2910 dsmodified = set(changes.modified)
2911 dsadded = set(changes.added)
2911 dsadded = set(changes.added)
2912 dsremoved = set(changes.removed)
2912 dsremoved = set(changes.removed)
2913 # store all local modifications, useful later for rename detection
2913 # store all local modifications, useful later for rename detection
2914 localchanges = dsmodified | dsadded
2914 localchanges = dsmodified | dsadded
2915
2915
2916 # only take into account for removes between wc and target
2916 # only take into account for removes between wc and target
2917 clean |= dsremoved - removed
2917 clean |= dsremoved - removed
2918 dsremoved &= removed
2918 dsremoved &= removed
2919 # distinct between dirstate remove and other
2919 # distinct between dirstate remove and other
2920 removed -= dsremoved
2920 removed -= dsremoved
2921
2921
2922 modadded = added & dsmodified
2922 modadded = added & dsmodified
2923 added -= modadded
2923 added -= modadded
2924
2924
2925 # tell newly modified apart.
2925 # tell newly modified apart.
2926 dsmodified &= modified
2926 dsmodified &= modified
2927 dsmodified |= modified & dsadded # dirstate added may needs backup
2927 dsmodified |= modified & dsadded # dirstate added may needs backup
2928 modified -= dsmodified
2928 modified -= dsmodified
2929
2929
2930 # We need to wait for some post-processing to update this set
2930 # We need to wait for some post-processing to update this set
2931 # before making the distinction. The dirstate will be used for
2931 # before making the distinction. The dirstate will be used for
2932 # that purpose.
2932 # that purpose.
2933 dsadded = added
2933 dsadded = added
2934
2934
2935 # in case of merge, files that are actually added can be reported as
2935 # in case of merge, files that are actually added can be reported as
2936 # modified, we need to post process the result
2936 # modified, we need to post process the result
2937 if p2 != nullid:
2937 if p2 != nullid:
2938 if pmf is None:
2938 if pmf is None:
2939 # only need parent manifest in the merge case,
2939 # only need parent manifest in the merge case,
2940 # so do not read by default
2940 # so do not read by default
2941 pmf = repo[parent].manifest()
2941 pmf = repo[parent].manifest()
2942 mergeadd = dsmodified - set(pmf)
2942 mergeadd = dsmodified - set(pmf)
2943 dsadded |= mergeadd
2943 dsadded |= mergeadd
2944 dsmodified -= mergeadd
2944 dsmodified -= mergeadd
2945
2945
2946 # if f is a rename, update `names` to also revert the source
2946 # if f is a rename, update `names` to also revert the source
2947 cwd = repo.getcwd()
2947 cwd = repo.getcwd()
2948 for f in localchanges:
2948 for f in localchanges:
2949 src = repo.dirstate.copied(f)
2949 src = repo.dirstate.copied(f)
2950 # XXX should we check for rename down to target node?
2950 # XXX should we check for rename down to target node?
2951 if src and src not in names and repo.dirstate[src] == 'r':
2951 if src and src not in names and repo.dirstate[src] == 'r':
2952 dsremoved.add(src)
2952 dsremoved.add(src)
2953 names[src] = (repo.pathto(src, cwd), True)
2953 names[src] = (repo.pathto(src, cwd), True)
2954
2954
2955 # distinguish between file to forget and the other
2955 # distinguish between file to forget and the other
2956 added = set()
2956 added = set()
2957 for abs in dsadded:
2957 for abs in dsadded:
2958 if repo.dirstate[abs] != 'a':
2958 if repo.dirstate[abs] != 'a':
2959 added.add(abs)
2959 added.add(abs)
2960 dsadded -= added
2960 dsadded -= added
2961
2961
2962 for abs in deladded:
2962 for abs in deladded:
2963 if repo.dirstate[abs] == 'a':
2963 if repo.dirstate[abs] == 'a':
2964 dsadded.add(abs)
2964 dsadded.add(abs)
2965 deladded -= dsadded
2965 deladded -= dsadded
2966
2966
2967 # For files marked as removed, we check if an unknown file is present at
2967 # For files marked as removed, we check if an unknown file is present at
2968 # the same path. If a such file exists it may need to be backed up.
2968 # the same path. If a such file exists it may need to be backed up.
2969 # Making the distinction at this stage helps have simpler backup
2969 # Making the distinction at this stage helps have simpler backup
2970 # logic.
2970 # logic.
2971 removunk = set()
2971 removunk = set()
2972 for abs in removed:
2972 for abs in removed:
2973 target = repo.wjoin(abs)
2973 target = repo.wjoin(abs)
2974 if os.path.lexists(target):
2974 if os.path.lexists(target):
2975 removunk.add(abs)
2975 removunk.add(abs)
2976 removed -= removunk
2976 removed -= removunk
2977
2977
2978 dsremovunk = set()
2978 dsremovunk = set()
2979 for abs in dsremoved:
2979 for abs in dsremoved:
2980 target = repo.wjoin(abs)
2980 target = repo.wjoin(abs)
2981 if os.path.lexists(target):
2981 if os.path.lexists(target):
2982 dsremovunk.add(abs)
2982 dsremovunk.add(abs)
2983 dsremoved -= dsremovunk
2983 dsremoved -= dsremovunk
2984
2984
2985 # action to be actually performed by revert
2985 # action to be actually performed by revert
2986 # (<list of file>, message>) tuple
2986 # (<list of file>, message>) tuple
2987 actions = {'revert': ([], _('reverting %s\n')),
2987 actions = {'revert': ([], _('reverting %s\n')),
2988 'add': ([], _('adding %s\n')),
2988 'add': ([], _('adding %s\n')),
2989 'remove': ([], _('removing %s\n')),
2989 'remove': ([], _('removing %s\n')),
2990 'drop': ([], _('removing %s\n')),
2990 'drop': ([], _('removing %s\n')),
2991 'forget': ([], _('forgetting %s\n')),
2991 'forget': ([], _('forgetting %s\n')),
2992 'undelete': ([], _('undeleting %s\n')),
2992 'undelete': ([], _('undeleting %s\n')),
2993 'noop': (None, _('no changes needed to %s\n')),
2993 'noop': (None, _('no changes needed to %s\n')),
2994 'unknown': (None, _('file not managed: %s\n')),
2994 'unknown': (None, _('file not managed: %s\n')),
2995 }
2995 }
2996
2996
2997 # "constant" that convey the backup strategy.
2997 # "constant" that convey the backup strategy.
2998 # All set to `discard` if `no-backup` is set do avoid checking
2998 # All set to `discard` if `no-backup` is set do avoid checking
2999 # no_backup lower in the code.
2999 # no_backup lower in the code.
3000 # These values are ordered for comparison purposes
3000 # These values are ordered for comparison purposes
3001 backup = 2 # unconditionally do backup
3001 backup = 2 # unconditionally do backup
3002 check = 1 # check if the existing file differs from target
3002 check = 1 # check if the existing file differs from target
3003 discard = 0 # never do backup
3003 discard = 0 # never do backup
3004 if opts.get('no_backup'):
3004 if opts.get('no_backup'):
3005 backup = check = discard
3005 backup = check = discard
3006
3006
3007 backupanddel = actions['remove']
3007 backupanddel = actions['remove']
3008 if not opts.get('no_backup'):
3008 if not opts.get('no_backup'):
3009 backupanddel = actions['drop']
3009 backupanddel = actions['drop']
3010
3010
3011 disptable = (
3011 disptable = (
3012 # dispatch table:
3012 # dispatch table:
3013 # file state
3013 # file state
3014 # action
3014 # action
3015 # make backup
3015 # make backup
3016
3016
3017 ## Sets that results that will change file on disk
3017 ## Sets that results that will change file on disk
3018 # Modified compared to target, no local change
3018 # Modified compared to target, no local change
3019 (modified, actions['revert'], discard),
3019 (modified, actions['revert'], discard),
3020 # Modified compared to target, but local file is deleted
3020 # Modified compared to target, but local file is deleted
3021 (deleted, actions['revert'], discard),
3021 (deleted, actions['revert'], discard),
3022 # Modified compared to target, local change
3022 # Modified compared to target, local change
3023 (dsmodified, actions['revert'], backup),
3023 (dsmodified, actions['revert'], backup),
3024 # Added since target
3024 # Added since target
3025 (added, actions['remove'], discard),
3025 (added, actions['remove'], discard),
3026 # Added in working directory
3026 # Added in working directory
3027 (dsadded, actions['forget'], discard),
3027 (dsadded, actions['forget'], discard),
3028 # Added since target, have local modification
3028 # Added since target, have local modification
3029 (modadded, backupanddel, backup),
3029 (modadded, backupanddel, backup),
3030 # Added since target but file is missing in working directory
3030 # Added since target but file is missing in working directory
3031 (deladded, actions['drop'], discard),
3031 (deladded, actions['drop'], discard),
3032 # Removed since target, before working copy parent
3032 # Removed since target, before working copy parent
3033 (removed, actions['add'], discard),
3033 (removed, actions['add'], discard),
3034 # Same as `removed` but an unknown file exists at the same path
3034 # Same as `removed` but an unknown file exists at the same path
3035 (removunk, actions['add'], check),
3035 (removunk, actions['add'], check),
3036 # Removed since targe, marked as such in working copy parent
3036 # Removed since targe, marked as such in working copy parent
3037 (dsremoved, actions['undelete'], discard),
3037 (dsremoved, actions['undelete'], discard),
3038 # Same as `dsremoved` but an unknown file exists at the same path
3038 # Same as `dsremoved` but an unknown file exists at the same path
3039 (dsremovunk, actions['undelete'], check),
3039 (dsremovunk, actions['undelete'], check),
3040 ## the following sets does not result in any file changes
3040 ## the following sets does not result in any file changes
3041 # File with no modification
3041 # File with no modification
3042 (clean, actions['noop'], discard),
3042 (clean, actions['noop'], discard),
3043 # Existing file, not tracked anywhere
3043 # Existing file, not tracked anywhere
3044 (unknown, actions['unknown'], discard),
3044 (unknown, actions['unknown'], discard),
3045 )
3045 )
3046
3046
3047 for abs, (rel, exact) in sorted(names.items()):
3047 for abs, (rel, exact) in sorted(names.items()):
3048 # target file to be touch on disk (relative to cwd)
3048 # target file to be touch on disk (relative to cwd)
3049 target = repo.wjoin(abs)
3049 target = repo.wjoin(abs)
3050 # search the entry in the dispatch table.
3050 # search the entry in the dispatch table.
3051 # if the file is in any of these sets, it was touched in the working
3051 # if the file is in any of these sets, it was touched in the working
3052 # directory parent and we are sure it needs to be reverted.
3052 # directory parent and we are sure it needs to be reverted.
3053 for table, (xlist, msg), dobackup in disptable:
3053 for table, (xlist, msg), dobackup in disptable:
3054 if abs not in table:
3054 if abs not in table:
3055 continue
3055 continue
3056 if xlist is not None:
3056 if xlist is not None:
3057 xlist.append(abs)
3057 xlist.append(abs)
3058 if dobackup and (backup <= dobackup
3058 if dobackup and (backup <= dobackup
3059 or wctx[abs].cmp(ctx[abs])):
3059 or wctx[abs].cmp(ctx[abs])):
3060 bakname = "%s.orig" % rel
3060 bakname = "%s.orig" % rel
3061 ui.note(_('saving current version of %s as %s\n') %
3061 ui.note(_('saving current version of %s as %s\n') %
3062 (rel, bakname))
3062 (rel, bakname))
3063 if not opts.get('dry_run'):
3063 if not opts.get('dry_run'):
3064 if interactive:
3064 if interactive:
3065 util.copyfile(target, bakname)
3065 util.copyfile(target, bakname)
3066 else:
3066 else:
3067 util.rename(target, bakname)
3067 util.rename(target, bakname)
3068 if ui.verbose or not exact:
3068 if ui.verbose or not exact:
3069 if not isinstance(msg, basestring):
3069 if not isinstance(msg, basestring):
3070 msg = msg(abs)
3070 msg = msg(abs)
3071 ui.status(msg % rel)
3071 ui.status(msg % rel)
3072 elif exact:
3072 elif exact:
3073 ui.warn(msg % rel)
3073 ui.warn(msg % rel)
3074 break
3074 break
3075
3075
3076 if not opts.get('dry_run'):
3076 if not opts.get('dry_run'):
3077 needdata = ('revert', 'add', 'undelete')
3077 needdata = ('revert', 'add', 'undelete')
3078 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3078 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3079 _performrevert(repo, parents, ctx, actions, interactive)
3079 _performrevert(repo, parents, ctx, actions, interactive)
3080
3080
3081 if targetsubs:
3081 if targetsubs:
3082 # Revert the subrepos on the revert list
3082 # Revert the subrepos on the revert list
3083 for sub in targetsubs:
3083 for sub in targetsubs:
3084 try:
3084 try:
3085 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3085 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3086 except KeyError:
3086 except KeyError:
3087 raise util.Abort("subrepository '%s' does not exist in %s!"
3087 raise util.Abort("subrepository '%s' does not exist in %s!"
3088 % (sub, short(ctx.node())))
3088 % (sub, short(ctx.node())))
3089 finally:
3089 finally:
3090 wlock.release()
3090 wlock.release()
3091
3091
3092 def _revertprefetch(repo, ctx, *files):
3092 def _revertprefetch(repo, ctx, *files):
3093 """Let extension changing the storage layer prefetch content"""
3093 """Let extension changing the storage layer prefetch content"""
3094 pass
3094 pass
3095
3095
3096 def _performrevert(repo, parents, ctx, actions, interactive=False):
3096 def _performrevert(repo, parents, ctx, actions, interactive=False):
3097 """function that actually perform all the actions computed for revert
3097 """function that actually perform all the actions computed for revert
3098
3098
3099 This is an independent function to let extension to plug in and react to
3099 This is an independent function to let extension to plug in and react to
3100 the imminent revert.
3100 the imminent revert.
3101
3101
3102 Make sure you have the working directory locked when calling this function.
3102 Make sure you have the working directory locked when calling this function.
3103 """
3103 """
3104 parent, p2 = parents
3104 parent, p2 = parents
3105 node = ctx.node()
3105 node = ctx.node()
3106 def checkout(f):
3106 def checkout(f):
3107 fc = ctx[f]
3107 fc = ctx[f]
3108 return repo.wwrite(f, fc.data(), fc.flags())
3108 return repo.wwrite(f, fc.data(), fc.flags())
3109
3109
3110 audit_path = pathutil.pathauditor(repo.root)
3110 audit_path = pathutil.pathauditor(repo.root)
3111 for f in actions['forget'][0]:
3111 for f in actions['forget'][0]:
3112 repo.dirstate.drop(f)
3112 repo.dirstate.drop(f)
3113 for f in actions['remove'][0]:
3113 for f in actions['remove'][0]:
3114 audit_path(f)
3114 audit_path(f)
3115 try:
3115 try:
3116 util.unlinkpath(repo.wjoin(f))
3116 util.unlinkpath(repo.wjoin(f))
3117 except OSError:
3117 except OSError:
3118 pass
3118 pass
3119 repo.dirstate.remove(f)
3119 repo.dirstate.remove(f)
3120 for f in actions['drop'][0]:
3120 for f in actions['drop'][0]:
3121 audit_path(f)
3121 audit_path(f)
3122 repo.dirstate.remove(f)
3122 repo.dirstate.remove(f)
3123
3123
3124 normal = None
3124 normal = None
3125 if node == parent:
3125 if node == parent:
3126 # We're reverting to our parent. If possible, we'd like status
3126 # We're reverting to our parent. If possible, we'd like status
3127 # to report the file as clean. We have to use normallookup for
3127 # to report the file as clean. We have to use normallookup for
3128 # merges to avoid losing information about merged/dirty files.
3128 # merges to avoid losing information about merged/dirty files.
3129 if p2 != nullid:
3129 if p2 != nullid:
3130 normal = repo.dirstate.normallookup
3130 normal = repo.dirstate.normallookup
3131 else:
3131 else:
3132 normal = repo.dirstate.normal
3132 normal = repo.dirstate.normal
3133
3133
3134 newlyaddedandmodifiedfiles = set()
3134 newlyaddedandmodifiedfiles = set()
3135 if interactive:
3135 if interactive:
3136 # Prompt the user for changes to revert
3136 # Prompt the user for changes to revert
3137 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3137 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3138 m = scmutil.match(ctx, torevert, {})
3138 m = scmutil.match(ctx, torevert, {})
3139 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3139 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3140 diffopts.nodates = True
3140 diffopts.nodates = True
3141 diffopts.git = True
3141 diffopts.git = True
3142 reversehunks = repo.ui.configbool('experimental',
3143 'revertalternateinteractivemode',
3144 False)
3145 if reversehunks:
3146 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3147 else:
3142 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3148 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3143 originalchunks = patch.parsepatch(diff)
3149 originalchunks = patch.parsepatch(diff)
3150
3144 try:
3151 try:
3152
3145 chunks = recordfilter(repo.ui, originalchunks)
3153 chunks = recordfilter(repo.ui, originalchunks)
3154 if reversehunks:
3155 chunks = patch.reversehunks(chunks)
3156
3146 except patch.PatchError, err:
3157 except patch.PatchError, err:
3147 raise util.Abort(_('error parsing patch: %s') % err)
3158 raise util.Abort(_('error parsing patch: %s') % err)
3148
3159
3149 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3160 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3150 # Apply changes
3161 # Apply changes
3151 fp = cStringIO.StringIO()
3162 fp = cStringIO.StringIO()
3152 for c in chunks:
3163 for c in chunks:
3153 c.write(fp)
3164 c.write(fp)
3154 dopatch = fp.tell()
3165 dopatch = fp.tell()
3155 fp.seek(0)
3166 fp.seek(0)
3156 if dopatch:
3167 if dopatch:
3157 try:
3168 try:
3158 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3169 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3159 except patch.PatchError, err:
3170 except patch.PatchError, err:
3160 raise util.Abort(str(err))
3171 raise util.Abort(str(err))
3161 del fp
3172 del fp
3162 else:
3173 else:
3163 for f in actions['revert'][0]:
3174 for f in actions['revert'][0]:
3164 wsize = checkout(f)
3175 wsize = checkout(f)
3165 if normal:
3176 if normal:
3166 normal(f)
3177 normal(f)
3167 elif wsize == repo.dirstate._map[f][2]:
3178 elif wsize == repo.dirstate._map[f][2]:
3168 # changes may be overlooked without normallookup,
3179 # changes may be overlooked without normallookup,
3169 # if size isn't changed at reverting
3180 # if size isn't changed at reverting
3170 repo.dirstate.normallookup(f)
3181 repo.dirstate.normallookup(f)
3171
3182
3172 for f in actions['add'][0]:
3183 for f in actions['add'][0]:
3173 # Don't checkout modified files, they are already created by the diff
3184 # Don't checkout modified files, they are already created by the diff
3174 if f not in newlyaddedandmodifiedfiles:
3185 if f not in newlyaddedandmodifiedfiles:
3175 checkout(f)
3186 checkout(f)
3176 repo.dirstate.add(f)
3187 repo.dirstate.add(f)
3177
3188
3178 normal = repo.dirstate.normallookup
3189 normal = repo.dirstate.normallookup
3179 if node == parent and p2 == nullid:
3190 if node == parent and p2 == nullid:
3180 normal = repo.dirstate.normal
3191 normal = repo.dirstate.normal
3181 for f in actions['undelete'][0]:
3192 for f in actions['undelete'][0]:
3182 checkout(f)
3193 checkout(f)
3183 normal(f)
3194 normal(f)
3184
3195
3185 copied = copies.pathcopies(repo[parent], ctx)
3196 copied = copies.pathcopies(repo[parent], ctx)
3186
3197
3187 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3198 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3188 if f in copied:
3199 if f in copied:
3189 repo.dirstate.copy(copied[f], f)
3200 repo.dirstate.copy(copied[f], f)
3190
3201
3191 def command(table):
3202 def command(table):
3192 """Returns a function object to be used as a decorator for making commands.
3203 """Returns a function object to be used as a decorator for making commands.
3193
3204
3194 This function receives a command table as its argument. The table should
3205 This function receives a command table as its argument. The table should
3195 be a dict.
3206 be a dict.
3196
3207
3197 The returned function can be used as a decorator for adding commands
3208 The returned function can be used as a decorator for adding commands
3198 to that command table. This function accepts multiple arguments to define
3209 to that command table. This function accepts multiple arguments to define
3199 a command.
3210 a command.
3200
3211
3201 The first argument is the command name.
3212 The first argument is the command name.
3202
3213
3203 The options argument is an iterable of tuples defining command arguments.
3214 The options argument is an iterable of tuples defining command arguments.
3204 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3215 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3205
3216
3206 The synopsis argument defines a short, one line summary of how to use the
3217 The synopsis argument defines a short, one line summary of how to use the
3207 command. This shows up in the help output.
3218 command. This shows up in the help output.
3208
3219
3209 The norepo argument defines whether the command does not require a
3220 The norepo argument defines whether the command does not require a
3210 local repository. Most commands operate against a repository, thus the
3221 local repository. Most commands operate against a repository, thus the
3211 default is False.
3222 default is False.
3212
3223
3213 The optionalrepo argument defines whether the command optionally requires
3224 The optionalrepo argument defines whether the command optionally requires
3214 a local repository.
3225 a local repository.
3215
3226
3216 The inferrepo argument defines whether to try to find a repository from the
3227 The inferrepo argument defines whether to try to find a repository from the
3217 command line arguments. If True, arguments will be examined for potential
3228 command line arguments. If True, arguments will be examined for potential
3218 repository locations. See ``findrepo()``. If a repository is found, it
3229 repository locations. See ``findrepo()``. If a repository is found, it
3219 will be used.
3230 will be used.
3220 """
3231 """
3221 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3232 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3222 inferrepo=False):
3233 inferrepo=False):
3223 def decorator(func):
3234 def decorator(func):
3224 if synopsis:
3235 if synopsis:
3225 table[name] = func, list(options), synopsis
3236 table[name] = func, list(options), synopsis
3226 else:
3237 else:
3227 table[name] = func, list(options)
3238 table[name] = func, list(options)
3228
3239
3229 if norepo:
3240 if norepo:
3230 # Avoid import cycle.
3241 # Avoid import cycle.
3231 import commands
3242 import commands
3232 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3243 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3233
3244
3234 if optionalrepo:
3245 if optionalrepo:
3235 import commands
3246 import commands
3236 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3247 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3237
3248
3238 if inferrepo:
3249 if inferrepo:
3239 import commands
3250 import commands
3240 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3251 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3241
3252
3242 return func
3253 return func
3243 return decorator
3254 return decorator
3244
3255
3245 return cmd
3256 return cmd
3246
3257
3247 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3258 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3248 # commands.outgoing. "missing" is "missing" of the result of
3259 # commands.outgoing. "missing" is "missing" of the result of
3249 # "findcommonoutgoing()"
3260 # "findcommonoutgoing()"
3250 outgoinghooks = util.hooks()
3261 outgoinghooks = util.hooks()
3251
3262
3252 # a list of (ui, repo) functions called by commands.summary
3263 # a list of (ui, repo) functions called by commands.summary
3253 summaryhooks = util.hooks()
3264 summaryhooks = util.hooks()
3254
3265
3255 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3266 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3256 #
3267 #
3257 # functions should return tuple of booleans below, if 'changes' is None:
3268 # functions should return tuple of booleans below, if 'changes' is None:
3258 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3269 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3259 #
3270 #
3260 # otherwise, 'changes' is a tuple of tuples below:
3271 # otherwise, 'changes' is a tuple of tuples below:
3261 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3272 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3262 # - (desturl, destbranch, destpeer, outgoing)
3273 # - (desturl, destbranch, destpeer, outgoing)
3263 summaryremotehooks = util.hooks()
3274 summaryremotehooks = util.hooks()
3264
3275
3265 # A list of state files kept by multistep operations like graft.
3276 # A list of state files kept by multistep operations like graft.
3266 # Since graft cannot be aborted, it is considered 'clearable' by update.
3277 # Since graft cannot be aborted, it is considered 'clearable' by update.
3267 # note: bisect is intentionally excluded
3278 # note: bisect is intentionally excluded
3268 # (state file, clearable, allowcommit, error, hint)
3279 # (state file, clearable, allowcommit, error, hint)
3269 unfinishedstates = [
3280 unfinishedstates = [
3270 ('graftstate', True, False, _('graft in progress'),
3281 ('graftstate', True, False, _('graft in progress'),
3271 _("use 'hg graft --continue' or 'hg update' to abort")),
3282 _("use 'hg graft --continue' or 'hg update' to abort")),
3272 ('updatestate', True, False, _('last update was interrupted'),
3283 ('updatestate', True, False, _('last update was interrupted'),
3273 _("use 'hg update' to get a consistent checkout"))
3284 _("use 'hg update' to get a consistent checkout"))
3274 ]
3285 ]
3275
3286
3276 def checkunfinished(repo, commit=False):
3287 def checkunfinished(repo, commit=False):
3277 '''Look for an unfinished multistep operation, like graft, and abort
3288 '''Look for an unfinished multistep operation, like graft, and abort
3278 if found. It's probably good to check this right before
3289 if found. It's probably good to check this right before
3279 bailifchanged().
3290 bailifchanged().
3280 '''
3291 '''
3281 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3292 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3282 if commit and allowcommit:
3293 if commit and allowcommit:
3283 continue
3294 continue
3284 if repo.vfs.exists(f):
3295 if repo.vfs.exists(f):
3285 raise util.Abort(msg, hint=hint)
3296 raise util.Abort(msg, hint=hint)
3286
3297
3287 def clearunfinished(repo):
3298 def clearunfinished(repo):
3288 '''Check for unfinished operations (as above), and clear the ones
3299 '''Check for unfinished operations (as above), and clear the ones
3289 that are clearable.
3300 that are clearable.
3290 '''
3301 '''
3291 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3302 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3292 if not clearable and repo.vfs.exists(f):
3303 if not clearable and repo.vfs.exists(f):
3293 raise util.Abort(msg, hint=hint)
3304 raise util.Abort(msg, hint=hint)
3294 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3305 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3295 if clearable and repo.vfs.exists(f):
3306 if clearable and repo.vfs.exists(f):
3296 util.unlink(repo.join(f))
3307 util.unlink(repo.join(f))
3297
3308
3298 class dirstateguard(object):
3309 class dirstateguard(object):
3299 '''Restore dirstate at unexpected failure.
3310 '''Restore dirstate at unexpected failure.
3300
3311
3301 At the construction, this class does:
3312 At the construction, this class does:
3302
3313
3303 - write current ``repo.dirstate`` out, and
3314 - write current ``repo.dirstate`` out, and
3304 - save ``.hg/dirstate`` into the backup file
3315 - save ``.hg/dirstate`` into the backup file
3305
3316
3306 This restores ``.hg/dirstate`` from backup file, if ``release()``
3317 This restores ``.hg/dirstate`` from backup file, if ``release()``
3307 is invoked before ``close()``.
3318 is invoked before ``close()``.
3308
3319
3309 This just removes the backup file at ``close()`` before ``release()``.
3320 This just removes the backup file at ``close()`` before ``release()``.
3310 '''
3321 '''
3311
3322
3312 def __init__(self, repo, name):
3323 def __init__(self, repo, name):
3313 repo.dirstate.write()
3324 repo.dirstate.write()
3314 self._repo = repo
3325 self._repo = repo
3315 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3326 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3316 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3327 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3317 self._active = True
3328 self._active = True
3318 self._closed = False
3329 self._closed = False
3319
3330
3320 def __del__(self):
3331 def __del__(self):
3321 if self._active: # still active
3332 if self._active: # still active
3322 # this may occur, even if this class is used correctly:
3333 # this may occur, even if this class is used correctly:
3323 # for example, releasing other resources like transaction
3334 # for example, releasing other resources like transaction
3324 # may raise exception before ``dirstateguard.release`` in
3335 # may raise exception before ``dirstateguard.release`` in
3325 # ``release(tr, ....)``.
3336 # ``release(tr, ....)``.
3326 self._abort()
3337 self._abort()
3327
3338
3328 def close(self):
3339 def close(self):
3329 if not self._active: # already inactivated
3340 if not self._active: # already inactivated
3330 msg = (_("can't close already inactivated backup: %s")
3341 msg = (_("can't close already inactivated backup: %s")
3331 % self._filename)
3342 % self._filename)
3332 raise util.Abort(msg)
3343 raise util.Abort(msg)
3333
3344
3334 self._repo.vfs.unlink(self._filename)
3345 self._repo.vfs.unlink(self._filename)
3335 self._active = False
3346 self._active = False
3336 self._closed = True
3347 self._closed = True
3337
3348
3338 def _abort(self):
3349 def _abort(self):
3339 # this "invalidate()" prevents "wlock.release()" from writing
3350 # this "invalidate()" prevents "wlock.release()" from writing
3340 # changes of dirstate out after restoring to original status
3351 # changes of dirstate out after restoring to original status
3341 self._repo.dirstate.invalidate()
3352 self._repo.dirstate.invalidate()
3342
3353
3343 self._repo.vfs.rename(self._filename, 'dirstate')
3354 self._repo.vfs.rename(self._filename, 'dirstate')
3344 self._active = False
3355 self._active = False
3345
3356
3346 def release(self):
3357 def release(self):
3347 if not self._closed:
3358 if not self._closed:
3348 if not self._active: # already inactivated
3359 if not self._active: # already inactivated
3349 msg = (_("can't release already inactivated backup: %s")
3360 msg = (_("can't release already inactivated backup: %s")
3350 % self._filename)
3361 % self._filename)
3351 raise util.Abort(msg)
3362 raise util.Abort(msg)
3352 self._abort()
3363 self._abort()
@@ -1,2480 +1,2551 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12 # On python2.4 you have to import these by name or they fail to
12 # On python2.4 you have to import these by name or they fail to
13 # load. This was not a problem on Python 2.7.
13 # load. This was not a problem on Python 2.7.
14 import email.Generator
14 import email.Generator
15 import email.Parser
15 import email.Parser
16
16
17 from i18n import _
17 from i18n import _
18 from node import hex, short
18 from node import hex, short
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20 import pathutil
20 import pathutil
21
21
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
24
24
25 class PatchError(Exception):
25 class PatchError(Exception):
26 pass
26 pass
27
27
28
28
29 # public functions
29 # public functions
30
30
31 def split(stream):
31 def split(stream):
32 '''return an iterator of individual patches from a stream'''
32 '''return an iterator of individual patches from a stream'''
33 def isheader(line, inheader):
33 def isheader(line, inheader):
34 if inheader and line[0] in (' ', '\t'):
34 if inheader and line[0] in (' ', '\t'):
35 # continuation
35 # continuation
36 return True
36 return True
37 if line[0] in (' ', '-', '+'):
37 if line[0] in (' ', '-', '+'):
38 # diff line - don't check for header pattern in there
38 # diff line - don't check for header pattern in there
39 return False
39 return False
40 l = line.split(': ', 1)
40 l = line.split(': ', 1)
41 return len(l) == 2 and ' ' not in l[0]
41 return len(l) == 2 and ' ' not in l[0]
42
42
43 def chunk(lines):
43 def chunk(lines):
44 return cStringIO.StringIO(''.join(lines))
44 return cStringIO.StringIO(''.join(lines))
45
45
46 def hgsplit(stream, cur):
46 def hgsplit(stream, cur):
47 inheader = True
47 inheader = True
48
48
49 for line in stream:
49 for line in stream:
50 if not line.strip():
50 if not line.strip():
51 inheader = False
51 inheader = False
52 if not inheader and line.startswith('# HG changeset patch'):
52 if not inheader and line.startswith('# HG changeset patch'):
53 yield chunk(cur)
53 yield chunk(cur)
54 cur = []
54 cur = []
55 inheader = True
55 inheader = True
56
56
57 cur.append(line)
57 cur.append(line)
58
58
59 if cur:
59 if cur:
60 yield chunk(cur)
60 yield chunk(cur)
61
61
62 def mboxsplit(stream, cur):
62 def mboxsplit(stream, cur):
63 for line in stream:
63 for line in stream:
64 if line.startswith('From '):
64 if line.startswith('From '):
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67 cur = []
67 cur = []
68
68
69 cur.append(line)
69 cur.append(line)
70
70
71 if cur:
71 if cur:
72 for c in split(chunk(cur[1:])):
72 for c in split(chunk(cur[1:])):
73 yield c
73 yield c
74
74
75 def mimesplit(stream, cur):
75 def mimesplit(stream, cur):
76 def msgfp(m):
76 def msgfp(m):
77 fp = cStringIO.StringIO()
77 fp = cStringIO.StringIO()
78 g = email.Generator.Generator(fp, mangle_from_=False)
78 g = email.Generator.Generator(fp, mangle_from_=False)
79 g.flatten(m)
79 g.flatten(m)
80 fp.seek(0)
80 fp.seek(0)
81 return fp
81 return fp
82
82
83 for line in stream:
83 for line in stream:
84 cur.append(line)
84 cur.append(line)
85 c = chunk(cur)
85 c = chunk(cur)
86
86
87 m = email.Parser.Parser().parse(c)
87 m = email.Parser.Parser().parse(c)
88 if not m.is_multipart():
88 if not m.is_multipart():
89 yield msgfp(m)
89 yield msgfp(m)
90 else:
90 else:
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
92 for part in m.walk():
92 for part in m.walk():
93 ct = part.get_content_type()
93 ct = part.get_content_type()
94 if ct not in ok_types:
94 if ct not in ok_types:
95 continue
95 continue
96 yield msgfp(part)
96 yield msgfp(part)
97
97
98 def headersplit(stream, cur):
98 def headersplit(stream, cur):
99 inheader = False
99 inheader = False
100
100
101 for line in stream:
101 for line in stream:
102 if not inheader and isheader(line, inheader):
102 if not inheader and isheader(line, inheader):
103 yield chunk(cur)
103 yield chunk(cur)
104 cur = []
104 cur = []
105 inheader = True
105 inheader = True
106 if inheader and not isheader(line, inheader):
106 if inheader and not isheader(line, inheader):
107 inheader = False
107 inheader = False
108
108
109 cur.append(line)
109 cur.append(line)
110
110
111 if cur:
111 if cur:
112 yield chunk(cur)
112 yield chunk(cur)
113
113
114 def remainder(cur):
114 def remainder(cur):
115 yield chunk(cur)
115 yield chunk(cur)
116
116
117 class fiter(object):
117 class fiter(object):
118 def __init__(self, fp):
118 def __init__(self, fp):
119 self.fp = fp
119 self.fp = fp
120
120
121 def __iter__(self):
121 def __iter__(self):
122 return self
122 return self
123
123
124 def next(self):
124 def next(self):
125 l = self.fp.readline()
125 l = self.fp.readline()
126 if not l:
126 if not l:
127 raise StopIteration
127 raise StopIteration
128 return l
128 return l
129
129
130 inheader = False
130 inheader = False
131 cur = []
131 cur = []
132
132
133 mimeheaders = ['content-type']
133 mimeheaders = ['content-type']
134
134
135 if not util.safehasattr(stream, 'next'):
135 if not util.safehasattr(stream, 'next'):
136 # http responses, for example, have readline but not next
136 # http responses, for example, have readline but not next
137 stream = fiter(stream)
137 stream = fiter(stream)
138
138
139 for line in stream:
139 for line in stream:
140 cur.append(line)
140 cur.append(line)
141 if line.startswith('# HG changeset patch'):
141 if line.startswith('# HG changeset patch'):
142 return hgsplit(stream, cur)
142 return hgsplit(stream, cur)
143 elif line.startswith('From '):
143 elif line.startswith('From '):
144 return mboxsplit(stream, cur)
144 return mboxsplit(stream, cur)
145 elif isheader(line, inheader):
145 elif isheader(line, inheader):
146 inheader = True
146 inheader = True
147 if line.split(':', 1)[0].lower() in mimeheaders:
147 if line.split(':', 1)[0].lower() in mimeheaders:
148 # let email parser handle this
148 # let email parser handle this
149 return mimesplit(stream, cur)
149 return mimesplit(stream, cur)
150 elif line.startswith('--- ') and inheader:
150 elif line.startswith('--- ') and inheader:
151 # No evil headers seen by diff start, split by hand
151 # No evil headers seen by diff start, split by hand
152 return headersplit(stream, cur)
152 return headersplit(stream, cur)
153 # Not enough info, keep reading
153 # Not enough info, keep reading
154
154
155 # if we are here, we have a very plain patch
155 # if we are here, we have a very plain patch
156 return remainder(cur)
156 return remainder(cur)
157
157
158 def extract(ui, fileobj):
158 def extract(ui, fileobj):
159 '''extract patch from data read from fileobj.
159 '''extract patch from data read from fileobj.
160
160
161 patch can be a normal patch or contained in an email message.
161 patch can be a normal patch or contained in an email message.
162
162
163 return tuple (filename, message, user, date, branch, node, p1, p2).
163 return tuple (filename, message, user, date, branch, node, p1, p2).
164 Any item in the returned tuple can be None. If filename is None,
164 Any item in the returned tuple can be None. If filename is None,
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
166
166
167 # attempt to detect the start of a patch
167 # attempt to detect the start of a patch
168 # (this heuristic is borrowed from quilt)
168 # (this heuristic is borrowed from quilt)
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
173
173
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
175 tmpfp = os.fdopen(fd, 'w')
175 tmpfp = os.fdopen(fd, 'w')
176 try:
176 try:
177 msg = email.Parser.Parser().parse(fileobj)
177 msg = email.Parser.Parser().parse(fileobj)
178
178
179 subject = msg['Subject']
179 subject = msg['Subject']
180 user = msg['From']
180 user = msg['From']
181 if not subject and not user:
181 if not subject and not user:
182 # Not an email, restore parsed headers if any
182 # Not an email, restore parsed headers if any
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
184
184
185 # should try to parse msg['Date']
185 # should try to parse msg['Date']
186 date = None
186 date = None
187 nodeid = None
187 nodeid = None
188 branch = None
188 branch = None
189 parents = []
189 parents = []
190
190
191 if subject:
191 if subject:
192 if subject.startswith('[PATCH'):
192 if subject.startswith('[PATCH'):
193 pend = subject.find(']')
193 pend = subject.find(']')
194 if pend >= 0:
194 if pend >= 0:
195 subject = subject[pend + 1:].lstrip()
195 subject = subject[pend + 1:].lstrip()
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
197 ui.debug('Subject: %s\n' % subject)
197 ui.debug('Subject: %s\n' % subject)
198 if user:
198 if user:
199 ui.debug('From: %s\n' % user)
199 ui.debug('From: %s\n' % user)
200 diffs_seen = 0
200 diffs_seen = 0
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
202 message = ''
202 message = ''
203 for part in msg.walk():
203 for part in msg.walk():
204 content_type = part.get_content_type()
204 content_type = part.get_content_type()
205 ui.debug('Content-Type: %s\n' % content_type)
205 ui.debug('Content-Type: %s\n' % content_type)
206 if content_type not in ok_types:
206 if content_type not in ok_types:
207 continue
207 continue
208 payload = part.get_payload(decode=True)
208 payload = part.get_payload(decode=True)
209 m = diffre.search(payload)
209 m = diffre.search(payload)
210 if m:
210 if m:
211 hgpatch = False
211 hgpatch = False
212 hgpatchheader = False
212 hgpatchheader = False
213 ignoretext = False
213 ignoretext = False
214
214
215 ui.debug('found patch at byte %d\n' % m.start(0))
215 ui.debug('found patch at byte %d\n' % m.start(0))
216 diffs_seen += 1
216 diffs_seen += 1
217 cfp = cStringIO.StringIO()
217 cfp = cStringIO.StringIO()
218 for line in payload[:m.start(0)].splitlines():
218 for line in payload[:m.start(0)].splitlines():
219 if line.startswith('# HG changeset patch') and not hgpatch:
219 if line.startswith('# HG changeset patch') and not hgpatch:
220 ui.debug('patch generated by hg export\n')
220 ui.debug('patch generated by hg export\n')
221 hgpatch = True
221 hgpatch = True
222 hgpatchheader = True
222 hgpatchheader = True
223 # drop earlier commit message content
223 # drop earlier commit message content
224 cfp.seek(0)
224 cfp.seek(0)
225 cfp.truncate()
225 cfp.truncate()
226 subject = None
226 subject = None
227 elif hgpatchheader:
227 elif hgpatchheader:
228 if line.startswith('# User '):
228 if line.startswith('# User '):
229 user = line[7:]
229 user = line[7:]
230 ui.debug('From: %s\n' % user)
230 ui.debug('From: %s\n' % user)
231 elif line.startswith("# Date "):
231 elif line.startswith("# Date "):
232 date = line[7:]
232 date = line[7:]
233 elif line.startswith("# Branch "):
233 elif line.startswith("# Branch "):
234 branch = line[9:]
234 branch = line[9:]
235 elif line.startswith("# Node ID "):
235 elif line.startswith("# Node ID "):
236 nodeid = line[10:]
236 nodeid = line[10:]
237 elif line.startswith("# Parent "):
237 elif line.startswith("# Parent "):
238 parents.append(line[9:].lstrip())
238 parents.append(line[9:].lstrip())
239 elif not line.startswith("# "):
239 elif not line.startswith("# "):
240 hgpatchheader = False
240 hgpatchheader = False
241 elif line == '---':
241 elif line == '---':
242 ignoretext = True
242 ignoretext = True
243 if not hgpatchheader and not ignoretext:
243 if not hgpatchheader and not ignoretext:
244 cfp.write(line)
244 cfp.write(line)
245 cfp.write('\n')
245 cfp.write('\n')
246 message = cfp.getvalue()
246 message = cfp.getvalue()
247 if tmpfp:
247 if tmpfp:
248 tmpfp.write(payload)
248 tmpfp.write(payload)
249 if not payload.endswith('\n'):
249 if not payload.endswith('\n'):
250 tmpfp.write('\n')
250 tmpfp.write('\n')
251 elif not diffs_seen and message and content_type == 'text/plain':
251 elif not diffs_seen and message and content_type == 'text/plain':
252 message += '\n' + payload
252 message += '\n' + payload
253 except: # re-raises
253 except: # re-raises
254 tmpfp.close()
254 tmpfp.close()
255 os.unlink(tmpname)
255 os.unlink(tmpname)
256 raise
256 raise
257
257
258 if subject and not message.startswith(subject):
258 if subject and not message.startswith(subject):
259 message = '%s\n%s' % (subject, message)
259 message = '%s\n%s' % (subject, message)
260 tmpfp.close()
260 tmpfp.close()
261 if not diffs_seen:
261 if not diffs_seen:
262 os.unlink(tmpname)
262 os.unlink(tmpname)
263 return None, message, user, date, branch, None, None, None
263 return None, message, user, date, branch, None, None, None
264
264
265 if parents:
265 if parents:
266 p1 = parents.pop(0)
266 p1 = parents.pop(0)
267 else:
267 else:
268 p1 = None
268 p1 = None
269
269
270 if parents:
270 if parents:
271 p2 = parents.pop(0)
271 p2 = parents.pop(0)
272 else:
272 else:
273 p2 = None
273 p2 = None
274
274
275 return tmpname, message, user, date, branch, nodeid, p1, p2
275 return tmpname, message, user, date, branch, nodeid, p1, p2
276
276
277 class patchmeta(object):
277 class patchmeta(object):
278 """Patched file metadata
278 """Patched file metadata
279
279
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
284 'islink' is True if the file is a symlink and 'isexec' is True if
284 'islink' is True if the file is a symlink and 'isexec' is True if
285 the file is executable. Otherwise, 'mode' is None.
285 the file is executable. Otherwise, 'mode' is None.
286 """
286 """
287 def __init__(self, path):
287 def __init__(self, path):
288 self.path = path
288 self.path = path
289 self.oldpath = None
289 self.oldpath = None
290 self.mode = None
290 self.mode = None
291 self.op = 'MODIFY'
291 self.op = 'MODIFY'
292 self.binary = False
292 self.binary = False
293
293
294 def setmode(self, mode):
294 def setmode(self, mode):
295 islink = mode & 020000
295 islink = mode & 020000
296 isexec = mode & 0100
296 isexec = mode & 0100
297 self.mode = (islink, isexec)
297 self.mode = (islink, isexec)
298
298
299 def copy(self):
299 def copy(self):
300 other = patchmeta(self.path)
300 other = patchmeta(self.path)
301 other.oldpath = self.oldpath
301 other.oldpath = self.oldpath
302 other.mode = self.mode
302 other.mode = self.mode
303 other.op = self.op
303 other.op = self.op
304 other.binary = self.binary
304 other.binary = self.binary
305 return other
305 return other
306
306
307 def _ispatchinga(self, afile):
307 def _ispatchinga(self, afile):
308 if afile == '/dev/null':
308 if afile == '/dev/null':
309 return self.op == 'ADD'
309 return self.op == 'ADD'
310 return afile == 'a/' + (self.oldpath or self.path)
310 return afile == 'a/' + (self.oldpath or self.path)
311
311
312 def _ispatchingb(self, bfile):
312 def _ispatchingb(self, bfile):
313 if bfile == '/dev/null':
313 if bfile == '/dev/null':
314 return self.op == 'DELETE'
314 return self.op == 'DELETE'
315 return bfile == 'b/' + self.path
315 return bfile == 'b/' + self.path
316
316
317 def ispatching(self, afile, bfile):
317 def ispatching(self, afile, bfile):
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
319
319
320 def __repr__(self):
320 def __repr__(self):
321 return "<patchmeta %s %r>" % (self.op, self.path)
321 return "<patchmeta %s %r>" % (self.op, self.path)
322
322
323 def readgitpatch(lr):
323 def readgitpatch(lr):
324 """extract git-style metadata about patches from <patchname>"""
324 """extract git-style metadata about patches from <patchname>"""
325
325
326 # Filter patch for git information
326 # Filter patch for git information
327 gp = None
327 gp = None
328 gitpatches = []
328 gitpatches = []
329 for line in lr:
329 for line in lr:
330 line = line.rstrip(' \r\n')
330 line = line.rstrip(' \r\n')
331 if line.startswith('diff --git a/'):
331 if line.startswith('diff --git a/'):
332 m = gitre.match(line)
332 m = gitre.match(line)
333 if m:
333 if m:
334 if gp:
334 if gp:
335 gitpatches.append(gp)
335 gitpatches.append(gp)
336 dst = m.group(2)
336 dst = m.group(2)
337 gp = patchmeta(dst)
337 gp = patchmeta(dst)
338 elif gp:
338 elif gp:
339 if line.startswith('--- '):
339 if line.startswith('--- '):
340 gitpatches.append(gp)
340 gitpatches.append(gp)
341 gp = None
341 gp = None
342 continue
342 continue
343 if line.startswith('rename from '):
343 if line.startswith('rename from '):
344 gp.op = 'RENAME'
344 gp.op = 'RENAME'
345 gp.oldpath = line[12:]
345 gp.oldpath = line[12:]
346 elif line.startswith('rename to '):
346 elif line.startswith('rename to '):
347 gp.path = line[10:]
347 gp.path = line[10:]
348 elif line.startswith('copy from '):
348 elif line.startswith('copy from '):
349 gp.op = 'COPY'
349 gp.op = 'COPY'
350 gp.oldpath = line[10:]
350 gp.oldpath = line[10:]
351 elif line.startswith('copy to '):
351 elif line.startswith('copy to '):
352 gp.path = line[8:]
352 gp.path = line[8:]
353 elif line.startswith('deleted file'):
353 elif line.startswith('deleted file'):
354 gp.op = 'DELETE'
354 gp.op = 'DELETE'
355 elif line.startswith('new file mode '):
355 elif line.startswith('new file mode '):
356 gp.op = 'ADD'
356 gp.op = 'ADD'
357 gp.setmode(int(line[-6:], 8))
357 gp.setmode(int(line[-6:], 8))
358 elif line.startswith('new mode '):
358 elif line.startswith('new mode '):
359 gp.setmode(int(line[-6:], 8))
359 gp.setmode(int(line[-6:], 8))
360 elif line.startswith('GIT binary patch'):
360 elif line.startswith('GIT binary patch'):
361 gp.binary = True
361 gp.binary = True
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364
364
365 return gitpatches
365 return gitpatches
366
366
367 class linereader(object):
367 class linereader(object):
368 # simple class to allow pushing lines back into the input stream
368 # simple class to allow pushing lines back into the input stream
369 def __init__(self, fp):
369 def __init__(self, fp):
370 self.fp = fp
370 self.fp = fp
371 self.buf = []
371 self.buf = []
372
372
373 def push(self, line):
373 def push(self, line):
374 if line is not None:
374 if line is not None:
375 self.buf.append(line)
375 self.buf.append(line)
376
376
377 def readline(self):
377 def readline(self):
378 if self.buf:
378 if self.buf:
379 l = self.buf[0]
379 l = self.buf[0]
380 del self.buf[0]
380 del self.buf[0]
381 return l
381 return l
382 return self.fp.readline()
382 return self.fp.readline()
383
383
384 def __iter__(self):
384 def __iter__(self):
385 while True:
385 while True:
386 l = self.readline()
386 l = self.readline()
387 if not l:
387 if not l:
388 break
388 break
389 yield l
389 yield l
390
390
391 class abstractbackend(object):
391 class abstractbackend(object):
392 def __init__(self, ui):
392 def __init__(self, ui):
393 self.ui = ui
393 self.ui = ui
394
394
395 def getfile(self, fname):
395 def getfile(self, fname):
396 """Return target file data and flags as a (data, (islink,
396 """Return target file data and flags as a (data, (islink,
397 isexec)) tuple. Data is None if file is missing/deleted.
397 isexec)) tuple. Data is None if file is missing/deleted.
398 """
398 """
399 raise NotImplementedError
399 raise NotImplementedError
400
400
401 def setfile(self, fname, data, mode, copysource):
401 def setfile(self, fname, data, mode, copysource):
402 """Write data to target file fname and set its mode. mode is a
402 """Write data to target file fname and set its mode. mode is a
403 (islink, isexec) tuple. If data is None, the file content should
403 (islink, isexec) tuple. If data is None, the file content should
404 be left unchanged. If the file is modified after being copied,
404 be left unchanged. If the file is modified after being copied,
405 copysource is set to the original file name.
405 copysource is set to the original file name.
406 """
406 """
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 def unlink(self, fname):
409 def unlink(self, fname):
410 """Unlink target file."""
410 """Unlink target file."""
411 raise NotImplementedError
411 raise NotImplementedError
412
412
413 def writerej(self, fname, failed, total, lines):
413 def writerej(self, fname, failed, total, lines):
414 """Write rejected lines for fname. total is the number of hunks
414 """Write rejected lines for fname. total is the number of hunks
415 which failed to apply and total the total number of hunks for this
415 which failed to apply and total the total number of hunks for this
416 files.
416 files.
417 """
417 """
418 pass
418 pass
419
419
420 def exists(self, fname):
420 def exists(self, fname):
421 raise NotImplementedError
421 raise NotImplementedError
422
422
423 class fsbackend(abstractbackend):
423 class fsbackend(abstractbackend):
424 def __init__(self, ui, basedir):
424 def __init__(self, ui, basedir):
425 super(fsbackend, self).__init__(ui)
425 super(fsbackend, self).__init__(ui)
426 self.opener = scmutil.opener(basedir)
426 self.opener = scmutil.opener(basedir)
427
427
428 def _join(self, f):
428 def _join(self, f):
429 return os.path.join(self.opener.base, f)
429 return os.path.join(self.opener.base, f)
430
430
431 def getfile(self, fname):
431 def getfile(self, fname):
432 if self.opener.islink(fname):
432 if self.opener.islink(fname):
433 return (self.opener.readlink(fname), (True, False))
433 return (self.opener.readlink(fname), (True, False))
434
434
435 isexec = False
435 isexec = False
436 try:
436 try:
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
438 except OSError, e:
438 except OSError, e:
439 if e.errno != errno.ENOENT:
439 if e.errno != errno.ENOENT:
440 raise
440 raise
441 try:
441 try:
442 return (self.opener.read(fname), (False, isexec))
442 return (self.opener.read(fname), (False, isexec))
443 except IOError, e:
443 except IOError, e:
444 if e.errno != errno.ENOENT:
444 if e.errno != errno.ENOENT:
445 raise
445 raise
446 return None, None
446 return None, None
447
447
448 def setfile(self, fname, data, mode, copysource):
448 def setfile(self, fname, data, mode, copysource):
449 islink, isexec = mode
449 islink, isexec = mode
450 if data is None:
450 if data is None:
451 self.opener.setflags(fname, islink, isexec)
451 self.opener.setflags(fname, islink, isexec)
452 return
452 return
453 if islink:
453 if islink:
454 self.opener.symlink(data, fname)
454 self.opener.symlink(data, fname)
455 else:
455 else:
456 self.opener.write(fname, data)
456 self.opener.write(fname, data)
457 if isexec:
457 if isexec:
458 self.opener.setflags(fname, False, True)
458 self.opener.setflags(fname, False, True)
459
459
460 def unlink(self, fname):
460 def unlink(self, fname):
461 self.opener.unlinkpath(fname, ignoremissing=True)
461 self.opener.unlinkpath(fname, ignoremissing=True)
462
462
463 def writerej(self, fname, failed, total, lines):
463 def writerej(self, fname, failed, total, lines):
464 fname = fname + ".rej"
464 fname = fname + ".rej"
465 self.ui.warn(
465 self.ui.warn(
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
467 (failed, total, fname))
467 (failed, total, fname))
468 fp = self.opener(fname, 'w')
468 fp = self.opener(fname, 'w')
469 fp.writelines(lines)
469 fp.writelines(lines)
470 fp.close()
470 fp.close()
471
471
472 def exists(self, fname):
472 def exists(self, fname):
473 return self.opener.lexists(fname)
473 return self.opener.lexists(fname)
474
474
475 class workingbackend(fsbackend):
475 class workingbackend(fsbackend):
476 def __init__(self, ui, repo, similarity):
476 def __init__(self, ui, repo, similarity):
477 super(workingbackend, self).__init__(ui, repo.root)
477 super(workingbackend, self).__init__(ui, repo.root)
478 self.repo = repo
478 self.repo = repo
479 self.similarity = similarity
479 self.similarity = similarity
480 self.removed = set()
480 self.removed = set()
481 self.changed = set()
481 self.changed = set()
482 self.copied = []
482 self.copied = []
483
483
484 def _checkknown(self, fname):
484 def _checkknown(self, fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 self._checkknown(fname)
489 self._checkknown(fname)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
491 if copysource is not None:
491 if copysource is not None:
492 self.copied.append((copysource, fname))
492 self.copied.append((copysource, fname))
493 self.changed.add(fname)
493 self.changed.add(fname)
494
494
495 def unlink(self, fname):
495 def unlink(self, fname):
496 self._checkknown(fname)
496 self._checkknown(fname)
497 super(workingbackend, self).unlink(fname)
497 super(workingbackend, self).unlink(fname)
498 self.removed.add(fname)
498 self.removed.add(fname)
499 self.changed.add(fname)
499 self.changed.add(fname)
500
500
501 def close(self):
501 def close(self):
502 wctx = self.repo[None]
502 wctx = self.repo[None]
503 changed = set(self.changed)
503 changed = set(self.changed)
504 for src, dst in self.copied:
504 for src, dst in self.copied:
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
506 if self.removed:
506 if self.removed:
507 wctx.forget(sorted(self.removed))
507 wctx.forget(sorted(self.removed))
508 for f in self.removed:
508 for f in self.removed:
509 if f not in self.repo.dirstate:
509 if f not in self.repo.dirstate:
510 # File was deleted and no longer belongs to the
510 # File was deleted and no longer belongs to the
511 # dirstate, it was probably marked added then
511 # dirstate, it was probably marked added then
512 # deleted, and should not be considered by
512 # deleted, and should not be considered by
513 # marktouched().
513 # marktouched().
514 changed.discard(f)
514 changed.discard(f)
515 if changed:
515 if changed:
516 scmutil.marktouched(self.repo, changed, self.similarity)
516 scmutil.marktouched(self.repo, changed, self.similarity)
517 return sorted(self.changed)
517 return sorted(self.changed)
518
518
519 class filestore(object):
519 class filestore(object):
520 def __init__(self, maxsize=None):
520 def __init__(self, maxsize=None):
521 self.opener = None
521 self.opener = None
522 self.files = {}
522 self.files = {}
523 self.created = 0
523 self.created = 0
524 self.maxsize = maxsize
524 self.maxsize = maxsize
525 if self.maxsize is None:
525 if self.maxsize is None:
526 self.maxsize = 4*(2**20)
526 self.maxsize = 4*(2**20)
527 self.size = 0
527 self.size = 0
528 self.data = {}
528 self.data = {}
529
529
530 def setfile(self, fname, data, mode, copied=None):
530 def setfile(self, fname, data, mode, copied=None):
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
532 self.data[fname] = (data, mode, copied)
532 self.data[fname] = (data, mode, copied)
533 self.size += len(data)
533 self.size += len(data)
534 else:
534 else:
535 if self.opener is None:
535 if self.opener is None:
536 root = tempfile.mkdtemp(prefix='hg-patch-')
536 root = tempfile.mkdtemp(prefix='hg-patch-')
537 self.opener = scmutil.opener(root)
537 self.opener = scmutil.opener(root)
538 # Avoid filename issues with these simple names
538 # Avoid filename issues with these simple names
539 fn = str(self.created)
539 fn = str(self.created)
540 self.opener.write(fn, data)
540 self.opener.write(fn, data)
541 self.created += 1
541 self.created += 1
542 self.files[fname] = (fn, mode, copied)
542 self.files[fname] = (fn, mode, copied)
543
543
544 def getfile(self, fname):
544 def getfile(self, fname):
545 if fname in self.data:
545 if fname in self.data:
546 return self.data[fname]
546 return self.data[fname]
547 if not self.opener or fname not in self.files:
547 if not self.opener or fname not in self.files:
548 return None, None, None
548 return None, None, None
549 fn, mode, copied = self.files[fname]
549 fn, mode, copied = self.files[fname]
550 return self.opener.read(fn), mode, copied
550 return self.opener.read(fn), mode, copied
551
551
552 def close(self):
552 def close(self):
553 if self.opener:
553 if self.opener:
554 shutil.rmtree(self.opener.base)
554 shutil.rmtree(self.opener.base)
555
555
556 class repobackend(abstractbackend):
556 class repobackend(abstractbackend):
557 def __init__(self, ui, repo, ctx, store):
557 def __init__(self, ui, repo, ctx, store):
558 super(repobackend, self).__init__(ui)
558 super(repobackend, self).__init__(ui)
559 self.repo = repo
559 self.repo = repo
560 self.ctx = ctx
560 self.ctx = ctx
561 self.store = store
561 self.store = store
562 self.changed = set()
562 self.changed = set()
563 self.removed = set()
563 self.removed = set()
564 self.copied = {}
564 self.copied = {}
565
565
566 def _checkknown(self, fname):
566 def _checkknown(self, fname):
567 if fname not in self.ctx:
567 if fname not in self.ctx:
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 try:
571 try:
572 fctx = self.ctx[fname]
572 fctx = self.ctx[fname]
573 except error.LookupError:
573 except error.LookupError:
574 return None, None
574 return None, None
575 flags = fctx.flags()
575 flags = fctx.flags()
576 return fctx.data(), ('l' in flags, 'x' in flags)
576 return fctx.data(), ('l' in flags, 'x' in flags)
577
577
578 def setfile(self, fname, data, mode, copysource):
578 def setfile(self, fname, data, mode, copysource):
579 if copysource:
579 if copysource:
580 self._checkknown(copysource)
580 self._checkknown(copysource)
581 if data is None:
581 if data is None:
582 data = self.ctx[fname].data()
582 data = self.ctx[fname].data()
583 self.store.setfile(fname, data, mode, copysource)
583 self.store.setfile(fname, data, mode, copysource)
584 self.changed.add(fname)
584 self.changed.add(fname)
585 if copysource:
585 if copysource:
586 self.copied[fname] = copysource
586 self.copied[fname] = copysource
587
587
588 def unlink(self, fname):
588 def unlink(self, fname):
589 self._checkknown(fname)
589 self._checkknown(fname)
590 self.removed.add(fname)
590 self.removed.add(fname)
591
591
592 def exists(self, fname):
592 def exists(self, fname):
593 return fname in self.ctx
593 return fname in self.ctx
594
594
595 def close(self):
595 def close(self):
596 return self.changed | self.removed
596 return self.changed | self.removed
597
597
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
602
602
603 class patchfile(object):
603 class patchfile(object):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
605 self.fname = gp.path
605 self.fname = gp.path
606 self.eolmode = eolmode
606 self.eolmode = eolmode
607 self.eol = None
607 self.eol = None
608 self.backend = backend
608 self.backend = backend
609 self.ui = ui
609 self.ui = ui
610 self.lines = []
610 self.lines = []
611 self.exists = False
611 self.exists = False
612 self.missing = True
612 self.missing = True
613 self.mode = gp.mode
613 self.mode = gp.mode
614 self.copysource = gp.oldpath
614 self.copysource = gp.oldpath
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
616 self.remove = gp.op == 'DELETE'
616 self.remove = gp.op == 'DELETE'
617 if self.copysource is None:
617 if self.copysource is None:
618 data, mode = backend.getfile(self.fname)
618 data, mode = backend.getfile(self.fname)
619 else:
619 else:
620 data, mode = store.getfile(self.copysource)[:2]
620 data, mode = store.getfile(self.copysource)[:2]
621 if data is not None:
621 if data is not None:
622 self.exists = self.copysource is None or backend.exists(self.fname)
622 self.exists = self.copysource is None or backend.exists(self.fname)
623 self.missing = False
623 self.missing = False
624 if data:
624 if data:
625 self.lines = mdiff.splitnewlines(data)
625 self.lines = mdiff.splitnewlines(data)
626 if self.mode is None:
626 if self.mode is None:
627 self.mode = mode
627 self.mode = mode
628 if self.lines:
628 if self.lines:
629 # Normalize line endings
629 # Normalize line endings
630 if self.lines[0].endswith('\r\n'):
630 if self.lines[0].endswith('\r\n'):
631 self.eol = '\r\n'
631 self.eol = '\r\n'
632 elif self.lines[0].endswith('\n'):
632 elif self.lines[0].endswith('\n'):
633 self.eol = '\n'
633 self.eol = '\n'
634 if eolmode != 'strict':
634 if eolmode != 'strict':
635 nlines = []
635 nlines = []
636 for l in self.lines:
636 for l in self.lines:
637 if l.endswith('\r\n'):
637 if l.endswith('\r\n'):
638 l = l[:-2] + '\n'
638 l = l[:-2] + '\n'
639 nlines.append(l)
639 nlines.append(l)
640 self.lines = nlines
640 self.lines = nlines
641 else:
641 else:
642 if self.create:
642 if self.create:
643 self.missing = False
643 self.missing = False
644 if self.mode is None:
644 if self.mode is None:
645 self.mode = (False, False)
645 self.mode = (False, False)
646 if self.missing:
646 if self.missing:
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
648
648
649 self.hash = {}
649 self.hash = {}
650 self.dirty = 0
650 self.dirty = 0
651 self.offset = 0
651 self.offset = 0
652 self.skew = 0
652 self.skew = 0
653 self.rej = []
653 self.rej = []
654 self.fileprinted = False
654 self.fileprinted = False
655 self.printfile(False)
655 self.printfile(False)
656 self.hunks = 0
656 self.hunks = 0
657
657
658 def writelines(self, fname, lines, mode):
658 def writelines(self, fname, lines, mode):
659 if self.eolmode == 'auto':
659 if self.eolmode == 'auto':
660 eol = self.eol
660 eol = self.eol
661 elif self.eolmode == 'crlf':
661 elif self.eolmode == 'crlf':
662 eol = '\r\n'
662 eol = '\r\n'
663 else:
663 else:
664 eol = '\n'
664 eol = '\n'
665
665
666 if self.eolmode != 'strict' and eol and eol != '\n':
666 if self.eolmode != 'strict' and eol and eol != '\n':
667 rawlines = []
667 rawlines = []
668 for l in lines:
668 for l in lines:
669 if l and l[-1] == '\n':
669 if l and l[-1] == '\n':
670 l = l[:-1] + eol
670 l = l[:-1] + eol
671 rawlines.append(l)
671 rawlines.append(l)
672 lines = rawlines
672 lines = rawlines
673
673
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
675
675
676 def printfile(self, warn):
676 def printfile(self, warn):
677 if self.fileprinted:
677 if self.fileprinted:
678 return
678 return
679 if warn or self.ui.verbose:
679 if warn or self.ui.verbose:
680 self.fileprinted = True
680 self.fileprinted = True
681 s = _("patching file %s\n") % self.fname
681 s = _("patching file %s\n") % self.fname
682 if warn:
682 if warn:
683 self.ui.warn(s)
683 self.ui.warn(s)
684 else:
684 else:
685 self.ui.note(s)
685 self.ui.note(s)
686
686
687
687
688 def findlines(self, l, linenum):
688 def findlines(self, l, linenum):
689 # looks through the hash and finds candidate lines. The
689 # looks through the hash and finds candidate lines. The
690 # result is a list of line numbers sorted based on distance
690 # result is a list of line numbers sorted based on distance
691 # from linenum
691 # from linenum
692
692
693 cand = self.hash.get(l, [])
693 cand = self.hash.get(l, [])
694 if len(cand) > 1:
694 if len(cand) > 1:
695 # resort our list of potentials forward then back.
695 # resort our list of potentials forward then back.
696 cand.sort(key=lambda x: abs(x - linenum))
696 cand.sort(key=lambda x: abs(x - linenum))
697 return cand
697 return cand
698
698
699 def write_rej(self):
699 def write_rej(self):
700 # our rejects are a little different from patch(1). This always
700 # our rejects are a little different from patch(1). This always
701 # creates rejects in the same form as the original patch. A file
701 # creates rejects in the same form as the original patch. A file
702 # header is inserted so that you can run the reject through patch again
702 # header is inserted so that you can run the reject through patch again
703 # without having to type the filename.
703 # without having to type the filename.
704 if not self.rej:
704 if not self.rej:
705 return
705 return
706 base = os.path.basename(self.fname)
706 base = os.path.basename(self.fname)
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
708 for x in self.rej:
708 for x in self.rej:
709 for l in x.hunk:
709 for l in x.hunk:
710 lines.append(l)
710 lines.append(l)
711 if l[-1] != '\n':
711 if l[-1] != '\n':
712 lines.append("\n\ No newline at end of file\n")
712 lines.append("\n\ No newline at end of file\n")
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
714
714
715 def apply(self, h):
715 def apply(self, h):
716 if not h.complete():
716 if not h.complete():
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
719 h.lenb))
719 h.lenb))
720
720
721 self.hunks += 1
721 self.hunks += 1
722
722
723 if self.missing:
723 if self.missing:
724 self.rej.append(h)
724 self.rej.append(h)
725 return -1
725 return -1
726
726
727 if self.exists and self.create:
727 if self.exists and self.create:
728 if self.copysource:
728 if self.copysource:
729 self.ui.warn(_("cannot create %s: destination already "
729 self.ui.warn(_("cannot create %s: destination already "
730 "exists\n") % self.fname)
730 "exists\n") % self.fname)
731 else:
731 else:
732 self.ui.warn(_("file %s already exists\n") % self.fname)
732 self.ui.warn(_("file %s already exists\n") % self.fname)
733 self.rej.append(h)
733 self.rej.append(h)
734 return -1
734 return -1
735
735
736 if isinstance(h, binhunk):
736 if isinstance(h, binhunk):
737 if self.remove:
737 if self.remove:
738 self.backend.unlink(self.fname)
738 self.backend.unlink(self.fname)
739 else:
739 else:
740 l = h.new(self.lines)
740 l = h.new(self.lines)
741 self.lines[:] = l
741 self.lines[:] = l
742 self.offset += len(l)
742 self.offset += len(l)
743 self.dirty = True
743 self.dirty = True
744 return 0
744 return 0
745
745
746 horig = h
746 horig = h
747 if (self.eolmode in ('crlf', 'lf')
747 if (self.eolmode in ('crlf', 'lf')
748 or self.eolmode == 'auto' and self.eol):
748 or self.eolmode == 'auto' and self.eol):
749 # If new eols are going to be normalized, then normalize
749 # If new eols are going to be normalized, then normalize
750 # hunk data before patching. Otherwise, preserve input
750 # hunk data before patching. Otherwise, preserve input
751 # line-endings.
751 # line-endings.
752 h = h.getnormalized()
752 h = h.getnormalized()
753
753
754 # fast case first, no offsets, no fuzz
754 # fast case first, no offsets, no fuzz
755 old, oldstart, new, newstart = h.fuzzit(0, False)
755 old, oldstart, new, newstart = h.fuzzit(0, False)
756 oldstart += self.offset
756 oldstart += self.offset
757 orig_start = oldstart
757 orig_start = oldstart
758 # if there's skew we want to emit the "(offset %d lines)" even
758 # if there's skew we want to emit the "(offset %d lines)" even
759 # when the hunk cleanly applies at start + skew, so skip the
759 # when the hunk cleanly applies at start + skew, so skip the
760 # fast case code
760 # fast case code
761 if (self.skew == 0 and
761 if (self.skew == 0 and
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 self.lines[oldstart:oldstart + len(old)] = new
766 self.lines[oldstart:oldstart + len(old)] = new
767 self.offset += len(new) - len(old)
767 self.offset += len(new) - len(old)
768 self.dirty = True
768 self.dirty = True
769 return 0
769 return 0
770
770
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
772 self.hash = {}
772 self.hash = {}
773 for x, s in enumerate(self.lines):
773 for x, s in enumerate(self.lines):
774 self.hash.setdefault(s, []).append(x)
774 self.hash.setdefault(s, []).append(x)
775
775
776 for fuzzlen in xrange(3):
776 for fuzzlen in xrange(3):
777 for toponly in [True, False]:
777 for toponly in [True, False]:
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
779 oldstart = oldstart + self.offset + self.skew
779 oldstart = oldstart + self.offset + self.skew
780 oldstart = min(oldstart, len(self.lines))
780 oldstart = min(oldstart, len(self.lines))
781 if old:
781 if old:
782 cand = self.findlines(old[0][1:], oldstart)
782 cand = self.findlines(old[0][1:], oldstart)
783 else:
783 else:
784 # Only adding lines with no or fuzzed context, just
784 # Only adding lines with no or fuzzed context, just
785 # take the skew in account
785 # take the skew in account
786 cand = [oldstart]
786 cand = [oldstart]
787
787
788 for l in cand:
788 for l in cand:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
790 self.lines[l : l + len(old)] = new
790 self.lines[l : l + len(old)] = new
791 self.offset += len(new) - len(old)
791 self.offset += len(new) - len(old)
792 self.skew = l - orig_start
792 self.skew = l - orig_start
793 self.dirty = True
793 self.dirty = True
794 offset = l - orig_start - fuzzlen
794 offset = l - orig_start - fuzzlen
795 if fuzzlen:
795 if fuzzlen:
796 msg = _("Hunk #%d succeeded at %d "
796 msg = _("Hunk #%d succeeded at %d "
797 "with fuzz %d "
797 "with fuzz %d "
798 "(offset %d lines).\n")
798 "(offset %d lines).\n")
799 self.printfile(True)
799 self.printfile(True)
800 self.ui.warn(msg %
800 self.ui.warn(msg %
801 (h.number, l + 1, fuzzlen, offset))
801 (h.number, l + 1, fuzzlen, offset))
802 else:
802 else:
803 msg = _("Hunk #%d succeeded at %d "
803 msg = _("Hunk #%d succeeded at %d "
804 "(offset %d lines).\n")
804 "(offset %d lines).\n")
805 self.ui.note(msg % (h.number, l + 1, offset))
805 self.ui.note(msg % (h.number, l + 1, offset))
806 return fuzzlen
806 return fuzzlen
807 self.printfile(True)
807 self.printfile(True)
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
809 self.rej.append(horig)
809 self.rej.append(horig)
810 return -1
810 return -1
811
811
812 def close(self):
812 def close(self):
813 if self.dirty:
813 if self.dirty:
814 self.writelines(self.fname, self.lines, self.mode)
814 self.writelines(self.fname, self.lines, self.mode)
815 self.write_rej()
815 self.write_rej()
816 return len(self.rej)
816 return len(self.rej)
817
817
818 class header(object):
818 class header(object):
819 """patch header
819 """patch header
820 """
820 """
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
823 allhunks_re = re.compile('(?:index|deleted file) ')
823 allhunks_re = re.compile('(?:index|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
825 special_re = re.compile('(?:index|deleted|copy|rename) ')
825 special_re = re.compile('(?:index|deleted|copy|rename) ')
826 newfile_re = re.compile('(?:new file)')
826 newfile_re = re.compile('(?:new file)')
827
827
828 def __init__(self, header):
828 def __init__(self, header):
829 self.header = header
829 self.header = header
830 self.hunks = []
830 self.hunks = []
831
831
832 def binary(self):
832 def binary(self):
833 return any(h.startswith('index ') for h in self.header)
833 return any(h.startswith('index ') for h in self.header)
834
834
835 def pretty(self, fp):
835 def pretty(self, fp):
836 for h in self.header:
836 for h in self.header:
837 if h.startswith('index '):
837 if h.startswith('index '):
838 fp.write(_('this modifies a binary file (all or nothing)\n'))
838 fp.write(_('this modifies a binary file (all or nothing)\n'))
839 break
839 break
840 if self.pretty_re.match(h):
840 if self.pretty_re.match(h):
841 fp.write(h)
841 fp.write(h)
842 if self.binary():
842 if self.binary():
843 fp.write(_('this is a binary file\n'))
843 fp.write(_('this is a binary file\n'))
844 break
844 break
845 if h.startswith('---'):
845 if h.startswith('---'):
846 fp.write(_('%d hunks, %d lines changed\n') %
846 fp.write(_('%d hunks, %d lines changed\n') %
847 (len(self.hunks),
847 (len(self.hunks),
848 sum([max(h.added, h.removed) for h in self.hunks])))
848 sum([max(h.added, h.removed) for h in self.hunks])))
849 break
849 break
850 fp.write(h)
850 fp.write(h)
851
851
852 def write(self, fp):
852 def write(self, fp):
853 fp.write(''.join(self.header))
853 fp.write(''.join(self.header))
854
854
855 def allhunks(self):
855 def allhunks(self):
856 return any(self.allhunks_re.match(h) for h in self.header)
856 return any(self.allhunks_re.match(h) for h in self.header)
857
857
858 def files(self):
858 def files(self):
859 match = self.diffgit_re.match(self.header[0])
859 match = self.diffgit_re.match(self.header[0])
860 if match:
860 if match:
861 fromfile, tofile = match.groups()
861 fromfile, tofile = match.groups()
862 if fromfile == tofile:
862 if fromfile == tofile:
863 return [fromfile]
863 return [fromfile]
864 return [fromfile, tofile]
864 return [fromfile, tofile]
865 else:
865 else:
866 return self.diff_re.match(self.header[0]).groups()
866 return self.diff_re.match(self.header[0]).groups()
867
867
868 def filename(self):
868 def filename(self):
869 return self.files()[-1]
869 return self.files()[-1]
870
870
871 def __repr__(self):
871 def __repr__(self):
872 return '<header %s>' % (' '.join(map(repr, self.files())))
872 return '<header %s>' % (' '.join(map(repr, self.files())))
873
873
874 def isnewfile(self):
874 def isnewfile(self):
875 return any(self.newfile_re.match(h) for h in self.header)
875 return any(self.newfile_re.match(h) for h in self.header)
876
876
877 def special(self):
877 def special(self):
878 # Special files are shown only at the header level and not at the hunk
878 # Special files are shown only at the header level and not at the hunk
879 # level for example a file that has been deleted is a special file.
879 # level for example a file that has been deleted is a special file.
880 # The user cannot change the content of the operation, in the case of
880 # The user cannot change the content of the operation, in the case of
881 # the deleted file he has to take the deletion or not take it, he
881 # the deleted file he has to take the deletion or not take it, he
882 # cannot take some of it.
882 # cannot take some of it.
883 # Newly added files are special if they are empty, they are not special
883 # Newly added files are special if they are empty, they are not special
884 # if they have some content as we want to be able to change it
884 # if they have some content as we want to be able to change it
885 nocontent = len(self.header) == 2
885 nocontent = len(self.header) == 2
886 emptynewfile = self.isnewfile() and nocontent
886 emptynewfile = self.isnewfile() and nocontent
887 return emptynewfile or \
887 return emptynewfile or \
888 any(self.special_re.match(h) for h in self.header)
888 any(self.special_re.match(h) for h in self.header)
889
889
890 class recordhunk(object):
890 class recordhunk(object):
891 """patch hunk
891 """patch hunk
892
892
893 XXX shouldn't we merge this with the other hunk class?
893 XXX shouldn't we merge this with the other hunk class?
894 """
894 """
895 maxcontext = 3
895 maxcontext = 3
896
896
897 def __init__(self, header, fromline, toline, proc, before, hunk, after):
897 def __init__(self, header, fromline, toline, proc, before, hunk, after):
898 def trimcontext(number, lines):
898 def trimcontext(number, lines):
899 delta = len(lines) - self.maxcontext
899 delta = len(lines) - self.maxcontext
900 if False and delta > 0:
900 if False and delta > 0:
901 return number + delta, lines[:self.maxcontext]
901 return number + delta, lines[:self.maxcontext]
902 return number, lines
902 return number, lines
903
903
904 self.header = header
904 self.header = header
905 self.fromline, self.before = trimcontext(fromline, before)
905 self.fromline, self.before = trimcontext(fromline, before)
906 self.toline, self.after = trimcontext(toline, after)
906 self.toline, self.after = trimcontext(toline, after)
907 self.proc = proc
907 self.proc = proc
908 self.hunk = hunk
908 self.hunk = hunk
909 self.added, self.removed = self.countchanges(self.hunk)
909 self.added, self.removed = self.countchanges(self.hunk)
910
910
911 def __eq__(self, v):
911 def __eq__(self, v):
912 if not isinstance(v, recordhunk):
912 if not isinstance(v, recordhunk):
913 return False
913 return False
914
914
915 return ((v.hunk == self.hunk) and
915 return ((v.hunk == self.hunk) and
916 (v.proc == self.proc) and
916 (v.proc == self.proc) and
917 (self.fromline == v.fromline) and
917 (self.fromline == v.fromline) and
918 (self.header.files() == v.header.files()))
918 (self.header.files() == v.header.files()))
919
919
920 def __hash__(self):
920 def __hash__(self):
921 return hash((tuple(self.hunk),
921 return hash((tuple(self.hunk),
922 tuple(self.header.files()),
922 tuple(self.header.files()),
923 self.fromline,
923 self.fromline,
924 self.proc))
924 self.proc))
925
925
926 def countchanges(self, hunk):
926 def countchanges(self, hunk):
927 """hunk -> (n+,n-)"""
927 """hunk -> (n+,n-)"""
928 add = len([h for h in hunk if h[0] == '+'])
928 add = len([h for h in hunk if h[0] == '+'])
929 rem = len([h for h in hunk if h[0] == '-'])
929 rem = len([h for h in hunk if h[0] == '-'])
930 return add, rem
930 return add, rem
931
931
932 def write(self, fp):
932 def write(self, fp):
933 delta = len(self.before) + len(self.after)
933 delta = len(self.before) + len(self.after)
934 if self.after and self.after[-1] == '\\ No newline at end of file\n':
934 if self.after and self.after[-1] == '\\ No newline at end of file\n':
935 delta -= 1
935 delta -= 1
936 fromlen = delta + self.removed
936 fromlen = delta + self.removed
937 tolen = delta + self.added
937 tolen = delta + self.added
938 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
938 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
939 (self.fromline, fromlen, self.toline, tolen,
939 (self.fromline, fromlen, self.toline, tolen,
940 self.proc and (' ' + self.proc)))
940 self.proc and (' ' + self.proc)))
941 fp.write(''.join(self.before + self.hunk + self.after))
941 fp.write(''.join(self.before + self.hunk + self.after))
942
942
943 pretty = write
943 pretty = write
944
944
945 def filename(self):
945 def filename(self):
946 return self.header.filename()
946 return self.header.filename()
947
947
948 def __repr__(self):
948 def __repr__(self):
949 return '<hunk %r@%d>' % (self.filename(), self.fromline)
949 return '<hunk %r@%d>' % (self.filename(), self.fromline)
950
950
951 def filterpatch(ui, headers, operation=None):
951 def filterpatch(ui, headers, operation=None):
952 """Interactively filter patch chunks into applied-only chunks"""
952 """Interactively filter patch chunks into applied-only chunks"""
953 if operation is None:
953 if operation is None:
954 operation = _('record')
954 operation = _('record')
955
955
956 def prompt(skipfile, skipall, query, chunk):
956 def prompt(skipfile, skipall, query, chunk):
957 """prompt query, and process base inputs
957 """prompt query, and process base inputs
958
958
959 - y/n for the rest of file
959 - y/n for the rest of file
960 - y/n for the rest
960 - y/n for the rest
961 - ? (help)
961 - ? (help)
962 - q (quit)
962 - q (quit)
963
963
964 Return True/False and possibly updated skipfile and skipall.
964 Return True/False and possibly updated skipfile and skipall.
965 """
965 """
966 newpatches = None
966 newpatches = None
967 if skipall is not None:
967 if skipall is not None:
968 return skipall, skipfile, skipall, newpatches
968 return skipall, skipfile, skipall, newpatches
969 if skipfile is not None:
969 if skipfile is not None:
970 return skipfile, skipfile, skipall, newpatches
970 return skipfile, skipfile, skipall, newpatches
971 while True:
971 while True:
972 resps = _('[Ynesfdaq?]'
972 resps = _('[Ynesfdaq?]'
973 '$$ &Yes, record this change'
973 '$$ &Yes, record this change'
974 '$$ &No, skip this change'
974 '$$ &No, skip this change'
975 '$$ &Edit this change manually'
975 '$$ &Edit this change manually'
976 '$$ &Skip remaining changes to this file'
976 '$$ &Skip remaining changes to this file'
977 '$$ Record remaining changes to this &file'
977 '$$ Record remaining changes to this &file'
978 '$$ &Done, skip remaining changes and files'
978 '$$ &Done, skip remaining changes and files'
979 '$$ Record &all changes to all remaining files'
979 '$$ Record &all changes to all remaining files'
980 '$$ &Quit, recording no changes'
980 '$$ &Quit, recording no changes'
981 '$$ &? (display help)')
981 '$$ &? (display help)')
982 r = ui.promptchoice("%s %s" % (query, resps))
982 r = ui.promptchoice("%s %s" % (query, resps))
983 ui.write("\n")
983 ui.write("\n")
984 if r == 8: # ?
984 if r == 8: # ?
985 for c, t in ui.extractchoices(resps)[1]:
985 for c, t in ui.extractchoices(resps)[1]:
986 ui.write('%s - %s\n' % (c, t.lower()))
986 ui.write('%s - %s\n' % (c, t.lower()))
987 continue
987 continue
988 elif r == 0: # yes
988 elif r == 0: # yes
989 ret = True
989 ret = True
990 elif r == 1: # no
990 elif r == 1: # no
991 ret = False
991 ret = False
992 elif r == 2: # Edit patch
992 elif r == 2: # Edit patch
993 if chunk is None:
993 if chunk is None:
994 ui.write(_('cannot edit patch for whole file'))
994 ui.write(_('cannot edit patch for whole file'))
995 ui.write("\n")
995 ui.write("\n")
996 continue
996 continue
997 if chunk.header.binary():
997 if chunk.header.binary():
998 ui.write(_('cannot edit patch for binary file'))
998 ui.write(_('cannot edit patch for binary file'))
999 ui.write("\n")
999 ui.write("\n")
1000 continue
1000 continue
1001 # Patch comment based on the Git one (based on comment at end of
1001 # Patch comment based on the Git one (based on comment at end of
1002 # http://mercurial.selenic.com/wiki/RecordExtension)
1002 # http://mercurial.selenic.com/wiki/RecordExtension)
1003 phelp = '---' + _("""
1003 phelp = '---' + _("""
1004 To remove '-' lines, make them ' ' lines (context).
1004 To remove '-' lines, make them ' ' lines (context).
1005 To remove '+' lines, delete them.
1005 To remove '+' lines, delete them.
1006 Lines starting with # will be removed from the patch.
1006 Lines starting with # will be removed from the patch.
1007
1007
1008 If the patch applies cleanly, the edited hunk will immediately be
1008 If the patch applies cleanly, the edited hunk will immediately be
1009 added to the record list. If it does not apply cleanly, a rejects
1009 added to the record list. If it does not apply cleanly, a rejects
1010 file will be generated: you can use that when you try again. If
1010 file will be generated: you can use that when you try again. If
1011 all lines of the hunk are removed, then the edit is aborted and
1011 all lines of the hunk are removed, then the edit is aborted and
1012 the hunk is left unchanged.
1012 the hunk is left unchanged.
1013 """)
1013 """)
1014 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1014 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1015 suffix=".diff", text=True)
1015 suffix=".diff", text=True)
1016 ncpatchfp = None
1016 ncpatchfp = None
1017 try:
1017 try:
1018 # Write the initial patch
1018 # Write the initial patch
1019 f = os.fdopen(patchfd, "w")
1019 f = os.fdopen(patchfd, "w")
1020 chunk.header.write(f)
1020 chunk.header.write(f)
1021 chunk.write(f)
1021 chunk.write(f)
1022 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1022 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1023 f.close()
1023 f.close()
1024 # Start the editor and wait for it to complete
1024 # Start the editor and wait for it to complete
1025 editor = ui.geteditor()
1025 editor = ui.geteditor()
1026 ui.system("%s \"%s\"" % (editor, patchfn),
1026 ui.system("%s \"%s\"" % (editor, patchfn),
1027 environ={'HGUSER': ui.username()},
1027 environ={'HGUSER': ui.username()},
1028 onerr=util.Abort, errprefix=_("edit failed"))
1028 onerr=util.Abort, errprefix=_("edit failed"))
1029 # Remove comment lines
1029 # Remove comment lines
1030 patchfp = open(patchfn)
1030 patchfp = open(patchfn)
1031 ncpatchfp = cStringIO.StringIO()
1031 ncpatchfp = cStringIO.StringIO()
1032 for line in patchfp:
1032 for line in patchfp:
1033 if not line.startswith('#'):
1033 if not line.startswith('#'):
1034 ncpatchfp.write(line)
1034 ncpatchfp.write(line)
1035 patchfp.close()
1035 patchfp.close()
1036 ncpatchfp.seek(0)
1036 ncpatchfp.seek(0)
1037 newpatches = parsepatch(ncpatchfp)
1037 newpatches = parsepatch(ncpatchfp)
1038 finally:
1038 finally:
1039 os.unlink(patchfn)
1039 os.unlink(patchfn)
1040 del ncpatchfp
1040 del ncpatchfp
1041 # Signal that the chunk shouldn't be applied as-is, but
1041 # Signal that the chunk shouldn't be applied as-is, but
1042 # provide the new patch to be used instead.
1042 # provide the new patch to be used instead.
1043 ret = False
1043 ret = False
1044 elif r == 3: # Skip
1044 elif r == 3: # Skip
1045 ret = skipfile = False
1045 ret = skipfile = False
1046 elif r == 4: # file (Record remaining)
1046 elif r == 4: # file (Record remaining)
1047 ret = skipfile = True
1047 ret = skipfile = True
1048 elif r == 5: # done, skip remaining
1048 elif r == 5: # done, skip remaining
1049 ret = skipall = False
1049 ret = skipall = False
1050 elif r == 6: # all
1050 elif r == 6: # all
1051 ret = skipall = True
1051 ret = skipall = True
1052 elif r == 7: # quit
1052 elif r == 7: # quit
1053 raise util.Abort(_('user quit'))
1053 raise util.Abort(_('user quit'))
1054 return ret, skipfile, skipall, newpatches
1054 return ret, skipfile, skipall, newpatches
1055
1055
1056 seen = set()
1056 seen = set()
1057 applied = {} # 'filename' -> [] of chunks
1057 applied = {} # 'filename' -> [] of chunks
1058 skipfile, skipall = None, None
1058 skipfile, skipall = None, None
1059 pos, total = 1, sum(len(h.hunks) for h in headers)
1059 pos, total = 1, sum(len(h.hunks) for h in headers)
1060 for h in headers:
1060 for h in headers:
1061 pos += len(h.hunks)
1061 pos += len(h.hunks)
1062 skipfile = None
1062 skipfile = None
1063 fixoffset = 0
1063 fixoffset = 0
1064 hdr = ''.join(h.header)
1064 hdr = ''.join(h.header)
1065 if hdr in seen:
1065 if hdr in seen:
1066 continue
1066 continue
1067 seen.add(hdr)
1067 seen.add(hdr)
1068 if skipall is None:
1068 if skipall is None:
1069 h.pretty(ui)
1069 h.pretty(ui)
1070 msg = (_('examine changes to %s?') %
1070 msg = (_('examine changes to %s?') %
1071 _(' and ').join("'%s'" % f for f in h.files()))
1071 _(' and ').join("'%s'" % f for f in h.files()))
1072 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1072 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1073 if not r:
1073 if not r:
1074 continue
1074 continue
1075 applied[h.filename()] = [h]
1075 applied[h.filename()] = [h]
1076 if h.allhunks():
1076 if h.allhunks():
1077 applied[h.filename()] += h.hunks
1077 applied[h.filename()] += h.hunks
1078 continue
1078 continue
1079 for i, chunk in enumerate(h.hunks):
1079 for i, chunk in enumerate(h.hunks):
1080 if skipfile is None and skipall is None:
1080 if skipfile is None and skipall is None:
1081 chunk.pretty(ui)
1081 chunk.pretty(ui)
1082 if total == 1:
1082 if total == 1:
1083 msg = _("record this change to '%s'?") % chunk.filename()
1083 msg = _("record this change to '%s'?") % chunk.filename()
1084 else:
1084 else:
1085 idx = pos - len(h.hunks) + i
1085 idx = pos - len(h.hunks) + i
1086 msg = _("record change %d/%d to '%s'?") % (idx, total,
1086 msg = _("record change %d/%d to '%s'?") % (idx, total,
1087 chunk.filename())
1087 chunk.filename())
1088 r, skipfile, skipall, newpatches = prompt(skipfile,
1088 r, skipfile, skipall, newpatches = prompt(skipfile,
1089 skipall, msg, chunk)
1089 skipall, msg, chunk)
1090 if r:
1090 if r:
1091 if fixoffset:
1091 if fixoffset:
1092 chunk = copy.copy(chunk)
1092 chunk = copy.copy(chunk)
1093 chunk.toline += fixoffset
1093 chunk.toline += fixoffset
1094 applied[chunk.filename()].append(chunk)
1094 applied[chunk.filename()].append(chunk)
1095 elif newpatches is not None:
1095 elif newpatches is not None:
1096 for newpatch in newpatches:
1096 for newpatch in newpatches:
1097 for newhunk in newpatch.hunks:
1097 for newhunk in newpatch.hunks:
1098 if fixoffset:
1098 if fixoffset:
1099 newhunk.toline += fixoffset
1099 newhunk.toline += fixoffset
1100 applied[newhunk.filename()].append(newhunk)
1100 applied[newhunk.filename()].append(newhunk)
1101 else:
1101 else:
1102 fixoffset += chunk.removed - chunk.added
1102 fixoffset += chunk.removed - chunk.added
1103 return sum([h for h in applied.itervalues()
1103 return sum([h for h in applied.itervalues()
1104 if h[0].special() or len(h) > 1], [])
1104 if h[0].special() or len(h) > 1], [])
1105 class hunk(object):
1105 class hunk(object):
1106 def __init__(self, desc, num, lr, context):
1106 def __init__(self, desc, num, lr, context):
1107 self.number = num
1107 self.number = num
1108 self.desc = desc
1108 self.desc = desc
1109 self.hunk = [desc]
1109 self.hunk = [desc]
1110 self.a = []
1110 self.a = []
1111 self.b = []
1111 self.b = []
1112 self.starta = self.lena = None
1112 self.starta = self.lena = None
1113 self.startb = self.lenb = None
1113 self.startb = self.lenb = None
1114 if lr is not None:
1114 if lr is not None:
1115 if context:
1115 if context:
1116 self.read_context_hunk(lr)
1116 self.read_context_hunk(lr)
1117 else:
1117 else:
1118 self.read_unified_hunk(lr)
1118 self.read_unified_hunk(lr)
1119
1119
1120 def getnormalized(self):
1120 def getnormalized(self):
1121 """Return a copy with line endings normalized to LF."""
1121 """Return a copy with line endings normalized to LF."""
1122
1122
1123 def normalize(lines):
1123 def normalize(lines):
1124 nlines = []
1124 nlines = []
1125 for line in lines:
1125 for line in lines:
1126 if line.endswith('\r\n'):
1126 if line.endswith('\r\n'):
1127 line = line[:-2] + '\n'
1127 line = line[:-2] + '\n'
1128 nlines.append(line)
1128 nlines.append(line)
1129 return nlines
1129 return nlines
1130
1130
1131 # Dummy object, it is rebuilt manually
1131 # Dummy object, it is rebuilt manually
1132 nh = hunk(self.desc, self.number, None, None)
1132 nh = hunk(self.desc, self.number, None, None)
1133 nh.number = self.number
1133 nh.number = self.number
1134 nh.desc = self.desc
1134 nh.desc = self.desc
1135 nh.hunk = self.hunk
1135 nh.hunk = self.hunk
1136 nh.a = normalize(self.a)
1136 nh.a = normalize(self.a)
1137 nh.b = normalize(self.b)
1137 nh.b = normalize(self.b)
1138 nh.starta = self.starta
1138 nh.starta = self.starta
1139 nh.startb = self.startb
1139 nh.startb = self.startb
1140 nh.lena = self.lena
1140 nh.lena = self.lena
1141 nh.lenb = self.lenb
1141 nh.lenb = self.lenb
1142 return nh
1142 return nh
1143
1143
1144 def read_unified_hunk(self, lr):
1144 def read_unified_hunk(self, lr):
1145 m = unidesc.match(self.desc)
1145 m = unidesc.match(self.desc)
1146 if not m:
1146 if not m:
1147 raise PatchError(_("bad hunk #%d") % self.number)
1147 raise PatchError(_("bad hunk #%d") % self.number)
1148 self.starta, self.lena, self.startb, self.lenb = m.groups()
1148 self.starta, self.lena, self.startb, self.lenb = m.groups()
1149 if self.lena is None:
1149 if self.lena is None:
1150 self.lena = 1
1150 self.lena = 1
1151 else:
1151 else:
1152 self.lena = int(self.lena)
1152 self.lena = int(self.lena)
1153 if self.lenb is None:
1153 if self.lenb is None:
1154 self.lenb = 1
1154 self.lenb = 1
1155 else:
1155 else:
1156 self.lenb = int(self.lenb)
1156 self.lenb = int(self.lenb)
1157 self.starta = int(self.starta)
1157 self.starta = int(self.starta)
1158 self.startb = int(self.startb)
1158 self.startb = int(self.startb)
1159 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1159 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1160 self.b)
1160 self.b)
1161 # if we hit eof before finishing out the hunk, the last line will
1161 # if we hit eof before finishing out the hunk, the last line will
1162 # be zero length. Lets try to fix it up.
1162 # be zero length. Lets try to fix it up.
1163 while len(self.hunk[-1]) == 0:
1163 while len(self.hunk[-1]) == 0:
1164 del self.hunk[-1]
1164 del self.hunk[-1]
1165 del self.a[-1]
1165 del self.a[-1]
1166 del self.b[-1]
1166 del self.b[-1]
1167 self.lena -= 1
1167 self.lena -= 1
1168 self.lenb -= 1
1168 self.lenb -= 1
1169 self._fixnewline(lr)
1169 self._fixnewline(lr)
1170
1170
1171 def read_context_hunk(self, lr):
1171 def read_context_hunk(self, lr):
1172 self.desc = lr.readline()
1172 self.desc = lr.readline()
1173 m = contextdesc.match(self.desc)
1173 m = contextdesc.match(self.desc)
1174 if not m:
1174 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.starta, aend = m.groups()
1176 self.starta, aend = m.groups()
1177 self.starta = int(self.starta)
1177 self.starta = int(self.starta)
1178 if aend is None:
1178 if aend is None:
1179 aend = self.starta
1179 aend = self.starta
1180 self.lena = int(aend) - self.starta
1180 self.lena = int(aend) - self.starta
1181 if self.starta:
1181 if self.starta:
1182 self.lena += 1
1182 self.lena += 1
1183 for x in xrange(self.lena):
1183 for x in xrange(self.lena):
1184 l = lr.readline()
1184 l = lr.readline()
1185 if l.startswith('---'):
1185 if l.startswith('---'):
1186 # lines addition, old block is empty
1186 # lines addition, old block is empty
1187 lr.push(l)
1187 lr.push(l)
1188 break
1188 break
1189 s = l[2:]
1189 s = l[2:]
1190 if l.startswith('- ') or l.startswith('! '):
1190 if l.startswith('- ') or l.startswith('! '):
1191 u = '-' + s
1191 u = '-' + s
1192 elif l.startswith(' '):
1192 elif l.startswith(' '):
1193 u = ' ' + s
1193 u = ' ' + s
1194 else:
1194 else:
1195 raise PatchError(_("bad hunk #%d old text line %d") %
1195 raise PatchError(_("bad hunk #%d old text line %d") %
1196 (self.number, x))
1196 (self.number, x))
1197 self.a.append(u)
1197 self.a.append(u)
1198 self.hunk.append(u)
1198 self.hunk.append(u)
1199
1199
1200 l = lr.readline()
1200 l = lr.readline()
1201 if l.startswith('\ '):
1201 if l.startswith('\ '):
1202 s = self.a[-1][:-1]
1202 s = self.a[-1][:-1]
1203 self.a[-1] = s
1203 self.a[-1] = s
1204 self.hunk[-1] = s
1204 self.hunk[-1] = s
1205 l = lr.readline()
1205 l = lr.readline()
1206 m = contextdesc.match(l)
1206 m = contextdesc.match(l)
1207 if not m:
1207 if not m:
1208 raise PatchError(_("bad hunk #%d") % self.number)
1208 raise PatchError(_("bad hunk #%d") % self.number)
1209 self.startb, bend = m.groups()
1209 self.startb, bend = m.groups()
1210 self.startb = int(self.startb)
1210 self.startb = int(self.startb)
1211 if bend is None:
1211 if bend is None:
1212 bend = self.startb
1212 bend = self.startb
1213 self.lenb = int(bend) - self.startb
1213 self.lenb = int(bend) - self.startb
1214 if self.startb:
1214 if self.startb:
1215 self.lenb += 1
1215 self.lenb += 1
1216 hunki = 1
1216 hunki = 1
1217 for x in xrange(self.lenb):
1217 for x in xrange(self.lenb):
1218 l = lr.readline()
1218 l = lr.readline()
1219 if l.startswith('\ '):
1219 if l.startswith('\ '):
1220 # XXX: the only way to hit this is with an invalid line range.
1220 # XXX: the only way to hit this is with an invalid line range.
1221 # The no-eol marker is not counted in the line range, but I
1221 # The no-eol marker is not counted in the line range, but I
1222 # guess there are diff(1) out there which behave differently.
1222 # guess there are diff(1) out there which behave differently.
1223 s = self.b[-1][:-1]
1223 s = self.b[-1][:-1]
1224 self.b[-1] = s
1224 self.b[-1] = s
1225 self.hunk[hunki - 1] = s
1225 self.hunk[hunki - 1] = s
1226 continue
1226 continue
1227 if not l:
1227 if not l:
1228 # line deletions, new block is empty and we hit EOF
1228 # line deletions, new block is empty and we hit EOF
1229 lr.push(l)
1229 lr.push(l)
1230 break
1230 break
1231 s = l[2:]
1231 s = l[2:]
1232 if l.startswith('+ ') or l.startswith('! '):
1232 if l.startswith('+ ') or l.startswith('! '):
1233 u = '+' + s
1233 u = '+' + s
1234 elif l.startswith(' '):
1234 elif l.startswith(' '):
1235 u = ' ' + s
1235 u = ' ' + s
1236 elif len(self.b) == 0:
1236 elif len(self.b) == 0:
1237 # line deletions, new block is empty
1237 # line deletions, new block is empty
1238 lr.push(l)
1238 lr.push(l)
1239 break
1239 break
1240 else:
1240 else:
1241 raise PatchError(_("bad hunk #%d old text line %d") %
1241 raise PatchError(_("bad hunk #%d old text line %d") %
1242 (self.number, x))
1242 (self.number, x))
1243 self.b.append(s)
1243 self.b.append(s)
1244 while True:
1244 while True:
1245 if hunki >= len(self.hunk):
1245 if hunki >= len(self.hunk):
1246 h = ""
1246 h = ""
1247 else:
1247 else:
1248 h = self.hunk[hunki]
1248 h = self.hunk[hunki]
1249 hunki += 1
1249 hunki += 1
1250 if h == u:
1250 if h == u:
1251 break
1251 break
1252 elif h.startswith('-'):
1252 elif h.startswith('-'):
1253 continue
1253 continue
1254 else:
1254 else:
1255 self.hunk.insert(hunki - 1, u)
1255 self.hunk.insert(hunki - 1, u)
1256 break
1256 break
1257
1257
1258 if not self.a:
1258 if not self.a:
1259 # this happens when lines were only added to the hunk
1259 # this happens when lines were only added to the hunk
1260 for x in self.hunk:
1260 for x in self.hunk:
1261 if x.startswith('-') or x.startswith(' '):
1261 if x.startswith('-') or x.startswith(' '):
1262 self.a.append(x)
1262 self.a.append(x)
1263 if not self.b:
1263 if not self.b:
1264 # this happens when lines were only deleted from the hunk
1264 # this happens when lines were only deleted from the hunk
1265 for x in self.hunk:
1265 for x in self.hunk:
1266 if x.startswith('+') or x.startswith(' '):
1266 if x.startswith('+') or x.startswith(' '):
1267 self.b.append(x[1:])
1267 self.b.append(x[1:])
1268 # @@ -start,len +start,len @@
1268 # @@ -start,len +start,len @@
1269 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1269 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1270 self.startb, self.lenb)
1270 self.startb, self.lenb)
1271 self.hunk[0] = self.desc
1271 self.hunk[0] = self.desc
1272 self._fixnewline(lr)
1272 self._fixnewline(lr)
1273
1273
1274 def _fixnewline(self, lr):
1274 def _fixnewline(self, lr):
1275 l = lr.readline()
1275 l = lr.readline()
1276 if l.startswith('\ '):
1276 if l.startswith('\ '):
1277 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1277 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1278 else:
1278 else:
1279 lr.push(l)
1279 lr.push(l)
1280
1280
1281 def complete(self):
1281 def complete(self):
1282 return len(self.a) == self.lena and len(self.b) == self.lenb
1282 return len(self.a) == self.lena and len(self.b) == self.lenb
1283
1283
1284 def _fuzzit(self, old, new, fuzz, toponly):
1284 def _fuzzit(self, old, new, fuzz, toponly):
1285 # this removes context lines from the top and bottom of list 'l'. It
1285 # this removes context lines from the top and bottom of list 'l'. It
1286 # checks the hunk to make sure only context lines are removed, and then
1286 # checks the hunk to make sure only context lines are removed, and then
1287 # returns a new shortened list of lines.
1287 # returns a new shortened list of lines.
1288 fuzz = min(fuzz, len(old))
1288 fuzz = min(fuzz, len(old))
1289 if fuzz:
1289 if fuzz:
1290 top = 0
1290 top = 0
1291 bot = 0
1291 bot = 0
1292 hlen = len(self.hunk)
1292 hlen = len(self.hunk)
1293 for x in xrange(hlen - 1):
1293 for x in xrange(hlen - 1):
1294 # the hunk starts with the @@ line, so use x+1
1294 # the hunk starts with the @@ line, so use x+1
1295 if self.hunk[x + 1][0] == ' ':
1295 if self.hunk[x + 1][0] == ' ':
1296 top += 1
1296 top += 1
1297 else:
1297 else:
1298 break
1298 break
1299 if not toponly:
1299 if not toponly:
1300 for x in xrange(hlen - 1):
1300 for x in xrange(hlen - 1):
1301 if self.hunk[hlen - bot - 1][0] == ' ':
1301 if self.hunk[hlen - bot - 1][0] == ' ':
1302 bot += 1
1302 bot += 1
1303 else:
1303 else:
1304 break
1304 break
1305
1305
1306 bot = min(fuzz, bot)
1306 bot = min(fuzz, bot)
1307 top = min(fuzz, top)
1307 top = min(fuzz, top)
1308 return old[top:len(old) - bot], new[top:len(new) - bot], top
1308 return old[top:len(old) - bot], new[top:len(new) - bot], top
1309 return old, new, 0
1309 return old, new, 0
1310
1310
1311 def fuzzit(self, fuzz, toponly):
1311 def fuzzit(self, fuzz, toponly):
1312 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1312 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1313 oldstart = self.starta + top
1313 oldstart = self.starta + top
1314 newstart = self.startb + top
1314 newstart = self.startb + top
1315 # zero length hunk ranges already have their start decremented
1315 # zero length hunk ranges already have their start decremented
1316 if self.lena and oldstart > 0:
1316 if self.lena and oldstart > 0:
1317 oldstart -= 1
1317 oldstart -= 1
1318 if self.lenb and newstart > 0:
1318 if self.lenb and newstart > 0:
1319 newstart -= 1
1319 newstart -= 1
1320 return old, oldstart, new, newstart
1320 return old, oldstart, new, newstart
1321
1321
1322 class binhunk(object):
1322 class binhunk(object):
1323 'A binary patch file.'
1323 'A binary patch file.'
1324 def __init__(self, lr, fname):
1324 def __init__(self, lr, fname):
1325 self.text = None
1325 self.text = None
1326 self.delta = False
1326 self.delta = False
1327 self.hunk = ['GIT binary patch\n']
1327 self.hunk = ['GIT binary patch\n']
1328 self._fname = fname
1328 self._fname = fname
1329 self._read(lr)
1329 self._read(lr)
1330
1330
1331 def complete(self):
1331 def complete(self):
1332 return self.text is not None
1332 return self.text is not None
1333
1333
1334 def new(self, lines):
1334 def new(self, lines):
1335 if self.delta:
1335 if self.delta:
1336 return [applybindelta(self.text, ''.join(lines))]
1336 return [applybindelta(self.text, ''.join(lines))]
1337 return [self.text]
1337 return [self.text]
1338
1338
1339 def _read(self, lr):
1339 def _read(self, lr):
1340 def getline(lr, hunk):
1340 def getline(lr, hunk):
1341 l = lr.readline()
1341 l = lr.readline()
1342 hunk.append(l)
1342 hunk.append(l)
1343 return l.rstrip('\r\n')
1343 return l.rstrip('\r\n')
1344
1344
1345 size = 0
1345 size = 0
1346 while True:
1346 while True:
1347 line = getline(lr, self.hunk)
1347 line = getline(lr, self.hunk)
1348 if not line:
1348 if not line:
1349 raise PatchError(_('could not extract "%s" binary data')
1349 raise PatchError(_('could not extract "%s" binary data')
1350 % self._fname)
1350 % self._fname)
1351 if line.startswith('literal '):
1351 if line.startswith('literal '):
1352 size = int(line[8:].rstrip())
1352 size = int(line[8:].rstrip())
1353 break
1353 break
1354 if line.startswith('delta '):
1354 if line.startswith('delta '):
1355 size = int(line[6:].rstrip())
1355 size = int(line[6:].rstrip())
1356 self.delta = True
1356 self.delta = True
1357 break
1357 break
1358 dec = []
1358 dec = []
1359 line = getline(lr, self.hunk)
1359 line = getline(lr, self.hunk)
1360 while len(line) > 1:
1360 while len(line) > 1:
1361 l = line[0]
1361 l = line[0]
1362 if l <= 'Z' and l >= 'A':
1362 if l <= 'Z' and l >= 'A':
1363 l = ord(l) - ord('A') + 1
1363 l = ord(l) - ord('A') + 1
1364 else:
1364 else:
1365 l = ord(l) - ord('a') + 27
1365 l = ord(l) - ord('a') + 27
1366 try:
1366 try:
1367 dec.append(base85.b85decode(line[1:])[:l])
1367 dec.append(base85.b85decode(line[1:])[:l])
1368 except ValueError, e:
1368 except ValueError, e:
1369 raise PatchError(_('could not decode "%s" binary patch: %s')
1369 raise PatchError(_('could not decode "%s" binary patch: %s')
1370 % (self._fname, str(e)))
1370 % (self._fname, str(e)))
1371 line = getline(lr, self.hunk)
1371 line = getline(lr, self.hunk)
1372 text = zlib.decompress(''.join(dec))
1372 text = zlib.decompress(''.join(dec))
1373 if len(text) != size:
1373 if len(text) != size:
1374 raise PatchError(_('"%s" length is %d bytes, should be %d')
1374 raise PatchError(_('"%s" length is %d bytes, should be %d')
1375 % (self._fname, len(text), size))
1375 % (self._fname, len(text), size))
1376 self.text = text
1376 self.text = text
1377
1377
1378 def parsefilename(str):
1378 def parsefilename(str):
1379 # --- filename \t|space stuff
1379 # --- filename \t|space stuff
1380 s = str[4:].rstrip('\r\n')
1380 s = str[4:].rstrip('\r\n')
1381 i = s.find('\t')
1381 i = s.find('\t')
1382 if i < 0:
1382 if i < 0:
1383 i = s.find(' ')
1383 i = s.find(' ')
1384 if i < 0:
1384 if i < 0:
1385 return s
1385 return s
1386 return s[:i]
1386 return s[:i]
1387
1387
1388 def reversehunks(hunks):
1389 '''reverse the signs in the hunks given as argument
1390
1391 This function operates on hunks coming out of patch.filterpatch, that is
1392 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1393
1394 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1395 ... --- a/folder1/g
1396 ... +++ b/folder1/g
1397 ... @@ -1,7 +1,7 @@
1398 ... +firstline
1399 ... c
1400 ... 1
1401 ... 2
1402 ... + 3
1403 ... -4
1404 ... 5
1405 ... d
1406 ... +lastline"""
1407 >>> hunks = parsepatch(rawpatch)
1408 >>> hunkscomingfromfilterpatch = []
1409 >>> for h in hunks:
1410 ... hunkscomingfromfilterpatch.append(h)
1411 ... hunkscomingfromfilterpatch.extend(h.hunks)
1412
1413 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1414 >>> fp = cStringIO.StringIO()
1415 >>> for c in reversedhunks:
1416 ... c.write(fp)
1417 >>> fp.seek(0)
1418 >>> reversedpatch = fp.read()
1419 >>> print reversedpatch
1420 diff --git a/folder1/g b/folder1/g
1421 --- a/folder1/g
1422 +++ b/folder1/g
1423 @@ -1,4 +1,3 @@
1424 -firstline
1425 c
1426 1
1427 2
1428 @@ -1,6 +2,6 @@
1429 c
1430 1
1431 2
1432 - 3
1433 +4
1434 5
1435 d
1436 @@ -5,3 +6,2 @@
1437 5
1438 d
1439 -lastline
1440
1441 '''
1442
1443 import crecord as crecordmod
1444 newhunks = []
1445 for c in hunks:
1446 if isinstance(c, crecordmod.uihunk):
1447 # curses hunks encapsulate the record hunk in _hunk
1448 c = c._hunk
1449 if isinstance(c, recordhunk):
1450 for j, line in enumerate(c.hunk):
1451 if line.startswith("-"):
1452 c.hunk[j] = "+" + c.hunk[j][1:]
1453 elif line.startswith("+"):
1454 c.hunk[j] = "-" + c.hunk[j][1:]
1455 c.added, c.removed = c.removed, c.added
1456 newhunks.append(c)
1457 return newhunks
1458
1388 def parsepatch(originalchunks):
1459 def parsepatch(originalchunks):
1389 """patch -> [] of headers -> [] of hunks """
1460 """patch -> [] of headers -> [] of hunks """
1390 class parser(object):
1461 class parser(object):
1391 """patch parsing state machine"""
1462 """patch parsing state machine"""
1392 def __init__(self):
1463 def __init__(self):
1393 self.fromline = 0
1464 self.fromline = 0
1394 self.toline = 0
1465 self.toline = 0
1395 self.proc = ''
1466 self.proc = ''
1396 self.header = None
1467 self.header = None
1397 self.context = []
1468 self.context = []
1398 self.before = []
1469 self.before = []
1399 self.hunk = []
1470 self.hunk = []
1400 self.headers = []
1471 self.headers = []
1401
1472
1402 def addrange(self, limits):
1473 def addrange(self, limits):
1403 fromstart, fromend, tostart, toend, proc = limits
1474 fromstart, fromend, tostart, toend, proc = limits
1404 self.fromline = int(fromstart)
1475 self.fromline = int(fromstart)
1405 self.toline = int(tostart)
1476 self.toline = int(tostart)
1406 self.proc = proc
1477 self.proc = proc
1407
1478
1408 def addcontext(self, context):
1479 def addcontext(self, context):
1409 if self.hunk:
1480 if self.hunk:
1410 h = recordhunk(self.header, self.fromline, self.toline,
1481 h = recordhunk(self.header, self.fromline, self.toline,
1411 self.proc, self.before, self.hunk, context)
1482 self.proc, self.before, self.hunk, context)
1412 self.header.hunks.append(h)
1483 self.header.hunks.append(h)
1413 self.fromline += len(self.before) + h.removed
1484 self.fromline += len(self.before) + h.removed
1414 self.toline += len(self.before) + h.added
1485 self.toline += len(self.before) + h.added
1415 self.before = []
1486 self.before = []
1416 self.hunk = []
1487 self.hunk = []
1417 self.proc = ''
1488 self.proc = ''
1418 self.context = context
1489 self.context = context
1419
1490
1420 def addhunk(self, hunk):
1491 def addhunk(self, hunk):
1421 if self.context:
1492 if self.context:
1422 self.before = self.context
1493 self.before = self.context
1423 self.context = []
1494 self.context = []
1424 self.hunk = hunk
1495 self.hunk = hunk
1425
1496
1426 def newfile(self, hdr):
1497 def newfile(self, hdr):
1427 self.addcontext([])
1498 self.addcontext([])
1428 h = header(hdr)
1499 h = header(hdr)
1429 self.headers.append(h)
1500 self.headers.append(h)
1430 self.header = h
1501 self.header = h
1431
1502
1432 def addother(self, line):
1503 def addother(self, line):
1433 pass # 'other' lines are ignored
1504 pass # 'other' lines are ignored
1434
1505
1435 def finished(self):
1506 def finished(self):
1436 self.addcontext([])
1507 self.addcontext([])
1437 return self.headers
1508 return self.headers
1438
1509
1439 transitions = {
1510 transitions = {
1440 'file': {'context': addcontext,
1511 'file': {'context': addcontext,
1441 'file': newfile,
1512 'file': newfile,
1442 'hunk': addhunk,
1513 'hunk': addhunk,
1443 'range': addrange},
1514 'range': addrange},
1444 'context': {'file': newfile,
1515 'context': {'file': newfile,
1445 'hunk': addhunk,
1516 'hunk': addhunk,
1446 'range': addrange,
1517 'range': addrange,
1447 'other': addother},
1518 'other': addother},
1448 'hunk': {'context': addcontext,
1519 'hunk': {'context': addcontext,
1449 'file': newfile,
1520 'file': newfile,
1450 'range': addrange},
1521 'range': addrange},
1451 'range': {'context': addcontext,
1522 'range': {'context': addcontext,
1452 'hunk': addhunk},
1523 'hunk': addhunk},
1453 'other': {'other': addother},
1524 'other': {'other': addother},
1454 }
1525 }
1455
1526
1456 p = parser()
1527 p = parser()
1457 fp = cStringIO.StringIO()
1528 fp = cStringIO.StringIO()
1458 fp.write(''.join(originalchunks))
1529 fp.write(''.join(originalchunks))
1459 fp.seek(0)
1530 fp.seek(0)
1460
1531
1461 state = 'context'
1532 state = 'context'
1462 for newstate, data in scanpatch(fp):
1533 for newstate, data in scanpatch(fp):
1463 try:
1534 try:
1464 p.transitions[state][newstate](p, data)
1535 p.transitions[state][newstate](p, data)
1465 except KeyError:
1536 except KeyError:
1466 raise PatchError('unhandled transition: %s -> %s' %
1537 raise PatchError('unhandled transition: %s -> %s' %
1467 (state, newstate))
1538 (state, newstate))
1468 state = newstate
1539 state = newstate
1469 del fp
1540 del fp
1470 return p.finished()
1541 return p.finished()
1471
1542
1472 def pathtransform(path, strip, prefix):
1543 def pathtransform(path, strip, prefix):
1473 '''turn a path from a patch into a path suitable for the repository
1544 '''turn a path from a patch into a path suitable for the repository
1474
1545
1475 prefix, if not empty, is expected to be normalized with a / at the end.
1546 prefix, if not empty, is expected to be normalized with a / at the end.
1476
1547
1477 Returns (stripped components, path in repository).
1548 Returns (stripped components, path in repository).
1478
1549
1479 >>> pathtransform('a/b/c', 0, '')
1550 >>> pathtransform('a/b/c', 0, '')
1480 ('', 'a/b/c')
1551 ('', 'a/b/c')
1481 >>> pathtransform(' a/b/c ', 0, '')
1552 >>> pathtransform(' a/b/c ', 0, '')
1482 ('', ' a/b/c')
1553 ('', ' a/b/c')
1483 >>> pathtransform(' a/b/c ', 2, '')
1554 >>> pathtransform(' a/b/c ', 2, '')
1484 ('a/b/', 'c')
1555 ('a/b/', 'c')
1485 >>> pathtransform('a/b/c', 0, 'd/e/')
1556 >>> pathtransform('a/b/c', 0, 'd/e/')
1486 ('', 'd/e/a/b/c')
1557 ('', 'd/e/a/b/c')
1487 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1558 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1488 ('a//b/', 'd/e/c')
1559 ('a//b/', 'd/e/c')
1489 >>> pathtransform('a/b/c', 3, '')
1560 >>> pathtransform('a/b/c', 3, '')
1490 Traceback (most recent call last):
1561 Traceback (most recent call last):
1491 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1562 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1492 '''
1563 '''
1493 pathlen = len(path)
1564 pathlen = len(path)
1494 i = 0
1565 i = 0
1495 if strip == 0:
1566 if strip == 0:
1496 return '', prefix + path.rstrip()
1567 return '', prefix + path.rstrip()
1497 count = strip
1568 count = strip
1498 while count > 0:
1569 while count > 0:
1499 i = path.find('/', i)
1570 i = path.find('/', i)
1500 if i == -1:
1571 if i == -1:
1501 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1572 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1502 (count, strip, path))
1573 (count, strip, path))
1503 i += 1
1574 i += 1
1504 # consume '//' in the path
1575 # consume '//' in the path
1505 while i < pathlen - 1 and path[i] == '/':
1576 while i < pathlen - 1 and path[i] == '/':
1506 i += 1
1577 i += 1
1507 count -= 1
1578 count -= 1
1508 return path[:i].lstrip(), prefix + path[i:].rstrip()
1579 return path[:i].lstrip(), prefix + path[i:].rstrip()
1509
1580
1510 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1581 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1511 nulla = afile_orig == "/dev/null"
1582 nulla = afile_orig == "/dev/null"
1512 nullb = bfile_orig == "/dev/null"
1583 nullb = bfile_orig == "/dev/null"
1513 create = nulla and hunk.starta == 0 and hunk.lena == 0
1584 create = nulla and hunk.starta == 0 and hunk.lena == 0
1514 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1585 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1515 abase, afile = pathtransform(afile_orig, strip, prefix)
1586 abase, afile = pathtransform(afile_orig, strip, prefix)
1516 gooda = not nulla and backend.exists(afile)
1587 gooda = not nulla and backend.exists(afile)
1517 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1588 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1518 if afile == bfile:
1589 if afile == bfile:
1519 goodb = gooda
1590 goodb = gooda
1520 else:
1591 else:
1521 goodb = not nullb and backend.exists(bfile)
1592 goodb = not nullb and backend.exists(bfile)
1522 missing = not goodb and not gooda and not create
1593 missing = not goodb and not gooda and not create
1523
1594
1524 # some diff programs apparently produce patches where the afile is
1595 # some diff programs apparently produce patches where the afile is
1525 # not /dev/null, but afile starts with bfile
1596 # not /dev/null, but afile starts with bfile
1526 abasedir = afile[:afile.rfind('/') + 1]
1597 abasedir = afile[:afile.rfind('/') + 1]
1527 bbasedir = bfile[:bfile.rfind('/') + 1]
1598 bbasedir = bfile[:bfile.rfind('/') + 1]
1528 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1599 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1529 and hunk.starta == 0 and hunk.lena == 0):
1600 and hunk.starta == 0 and hunk.lena == 0):
1530 create = True
1601 create = True
1531 missing = False
1602 missing = False
1532
1603
1533 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1604 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1534 # diff is between a file and its backup. In this case, the original
1605 # diff is between a file and its backup. In this case, the original
1535 # file should be patched (see original mpatch code).
1606 # file should be patched (see original mpatch code).
1536 isbackup = (abase == bbase and bfile.startswith(afile))
1607 isbackup = (abase == bbase and bfile.startswith(afile))
1537 fname = None
1608 fname = None
1538 if not missing:
1609 if not missing:
1539 if gooda and goodb:
1610 if gooda and goodb:
1540 if isbackup:
1611 if isbackup:
1541 fname = afile
1612 fname = afile
1542 else:
1613 else:
1543 fname = bfile
1614 fname = bfile
1544 elif gooda:
1615 elif gooda:
1545 fname = afile
1616 fname = afile
1546
1617
1547 if not fname:
1618 if not fname:
1548 if not nullb:
1619 if not nullb:
1549 if isbackup:
1620 if isbackup:
1550 fname = afile
1621 fname = afile
1551 else:
1622 else:
1552 fname = bfile
1623 fname = bfile
1553 elif not nulla:
1624 elif not nulla:
1554 fname = afile
1625 fname = afile
1555 else:
1626 else:
1556 raise PatchError(_("undefined source and destination files"))
1627 raise PatchError(_("undefined source and destination files"))
1557
1628
1558 gp = patchmeta(fname)
1629 gp = patchmeta(fname)
1559 if create:
1630 if create:
1560 gp.op = 'ADD'
1631 gp.op = 'ADD'
1561 elif remove:
1632 elif remove:
1562 gp.op = 'DELETE'
1633 gp.op = 'DELETE'
1563 return gp
1634 return gp
1564
1635
1565 def scanpatch(fp):
1636 def scanpatch(fp):
1566 """like patch.iterhunks, but yield different events
1637 """like patch.iterhunks, but yield different events
1567
1638
1568 - ('file', [header_lines + fromfile + tofile])
1639 - ('file', [header_lines + fromfile + tofile])
1569 - ('context', [context_lines])
1640 - ('context', [context_lines])
1570 - ('hunk', [hunk_lines])
1641 - ('hunk', [hunk_lines])
1571 - ('range', (-start,len, +start,len, proc))
1642 - ('range', (-start,len, +start,len, proc))
1572 """
1643 """
1573 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1644 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1574 lr = linereader(fp)
1645 lr = linereader(fp)
1575
1646
1576 def scanwhile(first, p):
1647 def scanwhile(first, p):
1577 """scan lr while predicate holds"""
1648 """scan lr while predicate holds"""
1578 lines = [first]
1649 lines = [first]
1579 while True:
1650 while True:
1580 line = lr.readline()
1651 line = lr.readline()
1581 if not line:
1652 if not line:
1582 break
1653 break
1583 if p(line):
1654 if p(line):
1584 lines.append(line)
1655 lines.append(line)
1585 else:
1656 else:
1586 lr.push(line)
1657 lr.push(line)
1587 break
1658 break
1588 return lines
1659 return lines
1589
1660
1590 while True:
1661 while True:
1591 line = lr.readline()
1662 line = lr.readline()
1592 if not line:
1663 if not line:
1593 break
1664 break
1594 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1665 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1595 def notheader(line):
1666 def notheader(line):
1596 s = line.split(None, 1)
1667 s = line.split(None, 1)
1597 return not s or s[0] not in ('---', 'diff')
1668 return not s or s[0] not in ('---', 'diff')
1598 header = scanwhile(line, notheader)
1669 header = scanwhile(line, notheader)
1599 fromfile = lr.readline()
1670 fromfile = lr.readline()
1600 if fromfile.startswith('---'):
1671 if fromfile.startswith('---'):
1601 tofile = lr.readline()
1672 tofile = lr.readline()
1602 header += [fromfile, tofile]
1673 header += [fromfile, tofile]
1603 else:
1674 else:
1604 lr.push(fromfile)
1675 lr.push(fromfile)
1605 yield 'file', header
1676 yield 'file', header
1606 elif line[0] == ' ':
1677 elif line[0] == ' ':
1607 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1678 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1608 elif line[0] in '-+':
1679 elif line[0] in '-+':
1609 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1680 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1610 else:
1681 else:
1611 m = lines_re.match(line)
1682 m = lines_re.match(line)
1612 if m:
1683 if m:
1613 yield 'range', m.groups()
1684 yield 'range', m.groups()
1614 else:
1685 else:
1615 yield 'other', line
1686 yield 'other', line
1616
1687
1617 def scangitpatch(lr, firstline):
1688 def scangitpatch(lr, firstline):
1618 """
1689 """
1619 Git patches can emit:
1690 Git patches can emit:
1620 - rename a to b
1691 - rename a to b
1621 - change b
1692 - change b
1622 - copy a to c
1693 - copy a to c
1623 - change c
1694 - change c
1624
1695
1625 We cannot apply this sequence as-is, the renamed 'a' could not be
1696 We cannot apply this sequence as-is, the renamed 'a' could not be
1626 found for it would have been renamed already. And we cannot copy
1697 found for it would have been renamed already. And we cannot copy
1627 from 'b' instead because 'b' would have been changed already. So
1698 from 'b' instead because 'b' would have been changed already. So
1628 we scan the git patch for copy and rename commands so we can
1699 we scan the git patch for copy and rename commands so we can
1629 perform the copies ahead of time.
1700 perform the copies ahead of time.
1630 """
1701 """
1631 pos = 0
1702 pos = 0
1632 try:
1703 try:
1633 pos = lr.fp.tell()
1704 pos = lr.fp.tell()
1634 fp = lr.fp
1705 fp = lr.fp
1635 except IOError:
1706 except IOError:
1636 fp = cStringIO.StringIO(lr.fp.read())
1707 fp = cStringIO.StringIO(lr.fp.read())
1637 gitlr = linereader(fp)
1708 gitlr = linereader(fp)
1638 gitlr.push(firstline)
1709 gitlr.push(firstline)
1639 gitpatches = readgitpatch(gitlr)
1710 gitpatches = readgitpatch(gitlr)
1640 fp.seek(pos)
1711 fp.seek(pos)
1641 return gitpatches
1712 return gitpatches
1642
1713
1643 def iterhunks(fp):
1714 def iterhunks(fp):
1644 """Read a patch and yield the following events:
1715 """Read a patch and yield the following events:
1645 - ("file", afile, bfile, firsthunk): select a new target file.
1716 - ("file", afile, bfile, firsthunk): select a new target file.
1646 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1717 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1647 "file" event.
1718 "file" event.
1648 - ("git", gitchanges): current diff is in git format, gitchanges
1719 - ("git", gitchanges): current diff is in git format, gitchanges
1649 maps filenames to gitpatch records. Unique event.
1720 maps filenames to gitpatch records. Unique event.
1650 """
1721 """
1651 afile = ""
1722 afile = ""
1652 bfile = ""
1723 bfile = ""
1653 state = None
1724 state = None
1654 hunknum = 0
1725 hunknum = 0
1655 emitfile = newfile = False
1726 emitfile = newfile = False
1656 gitpatches = None
1727 gitpatches = None
1657
1728
1658 # our states
1729 # our states
1659 BFILE = 1
1730 BFILE = 1
1660 context = None
1731 context = None
1661 lr = linereader(fp)
1732 lr = linereader(fp)
1662
1733
1663 while True:
1734 while True:
1664 x = lr.readline()
1735 x = lr.readline()
1665 if not x:
1736 if not x:
1666 break
1737 break
1667 if state == BFILE and (
1738 if state == BFILE and (
1668 (not context and x[0] == '@')
1739 (not context and x[0] == '@')
1669 or (context is not False and x.startswith('***************'))
1740 or (context is not False and x.startswith('***************'))
1670 or x.startswith('GIT binary patch')):
1741 or x.startswith('GIT binary patch')):
1671 gp = None
1742 gp = None
1672 if (gitpatches and
1743 if (gitpatches and
1673 gitpatches[-1].ispatching(afile, bfile)):
1744 gitpatches[-1].ispatching(afile, bfile)):
1674 gp = gitpatches.pop()
1745 gp = gitpatches.pop()
1675 if x.startswith('GIT binary patch'):
1746 if x.startswith('GIT binary patch'):
1676 h = binhunk(lr, gp.path)
1747 h = binhunk(lr, gp.path)
1677 else:
1748 else:
1678 if context is None and x.startswith('***************'):
1749 if context is None and x.startswith('***************'):
1679 context = True
1750 context = True
1680 h = hunk(x, hunknum + 1, lr, context)
1751 h = hunk(x, hunknum + 1, lr, context)
1681 hunknum += 1
1752 hunknum += 1
1682 if emitfile:
1753 if emitfile:
1683 emitfile = False
1754 emitfile = False
1684 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1755 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1685 yield 'hunk', h
1756 yield 'hunk', h
1686 elif x.startswith('diff --git a/'):
1757 elif x.startswith('diff --git a/'):
1687 m = gitre.match(x.rstrip(' \r\n'))
1758 m = gitre.match(x.rstrip(' \r\n'))
1688 if not m:
1759 if not m:
1689 continue
1760 continue
1690 if gitpatches is None:
1761 if gitpatches is None:
1691 # scan whole input for git metadata
1762 # scan whole input for git metadata
1692 gitpatches = scangitpatch(lr, x)
1763 gitpatches = scangitpatch(lr, x)
1693 yield 'git', [g.copy() for g in gitpatches
1764 yield 'git', [g.copy() for g in gitpatches
1694 if g.op in ('COPY', 'RENAME')]
1765 if g.op in ('COPY', 'RENAME')]
1695 gitpatches.reverse()
1766 gitpatches.reverse()
1696 afile = 'a/' + m.group(1)
1767 afile = 'a/' + m.group(1)
1697 bfile = 'b/' + m.group(2)
1768 bfile = 'b/' + m.group(2)
1698 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1769 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1699 gp = gitpatches.pop()
1770 gp = gitpatches.pop()
1700 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1771 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1701 if not gitpatches:
1772 if not gitpatches:
1702 raise PatchError(_('failed to synchronize metadata for "%s"')
1773 raise PatchError(_('failed to synchronize metadata for "%s"')
1703 % afile[2:])
1774 % afile[2:])
1704 gp = gitpatches[-1]
1775 gp = gitpatches[-1]
1705 newfile = True
1776 newfile = True
1706 elif x.startswith('---'):
1777 elif x.startswith('---'):
1707 # check for a unified diff
1778 # check for a unified diff
1708 l2 = lr.readline()
1779 l2 = lr.readline()
1709 if not l2.startswith('+++'):
1780 if not l2.startswith('+++'):
1710 lr.push(l2)
1781 lr.push(l2)
1711 continue
1782 continue
1712 newfile = True
1783 newfile = True
1713 context = False
1784 context = False
1714 afile = parsefilename(x)
1785 afile = parsefilename(x)
1715 bfile = parsefilename(l2)
1786 bfile = parsefilename(l2)
1716 elif x.startswith('***'):
1787 elif x.startswith('***'):
1717 # check for a context diff
1788 # check for a context diff
1718 l2 = lr.readline()
1789 l2 = lr.readline()
1719 if not l2.startswith('---'):
1790 if not l2.startswith('---'):
1720 lr.push(l2)
1791 lr.push(l2)
1721 continue
1792 continue
1722 l3 = lr.readline()
1793 l3 = lr.readline()
1723 lr.push(l3)
1794 lr.push(l3)
1724 if not l3.startswith("***************"):
1795 if not l3.startswith("***************"):
1725 lr.push(l2)
1796 lr.push(l2)
1726 continue
1797 continue
1727 newfile = True
1798 newfile = True
1728 context = True
1799 context = True
1729 afile = parsefilename(x)
1800 afile = parsefilename(x)
1730 bfile = parsefilename(l2)
1801 bfile = parsefilename(l2)
1731
1802
1732 if newfile:
1803 if newfile:
1733 newfile = False
1804 newfile = False
1734 emitfile = True
1805 emitfile = True
1735 state = BFILE
1806 state = BFILE
1736 hunknum = 0
1807 hunknum = 0
1737
1808
1738 while gitpatches:
1809 while gitpatches:
1739 gp = gitpatches.pop()
1810 gp = gitpatches.pop()
1740 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1811 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1741
1812
1742 def applybindelta(binchunk, data):
1813 def applybindelta(binchunk, data):
1743 """Apply a binary delta hunk
1814 """Apply a binary delta hunk
1744 The algorithm used is the algorithm from git's patch-delta.c
1815 The algorithm used is the algorithm from git's patch-delta.c
1745 """
1816 """
1746 def deltahead(binchunk):
1817 def deltahead(binchunk):
1747 i = 0
1818 i = 0
1748 for c in binchunk:
1819 for c in binchunk:
1749 i += 1
1820 i += 1
1750 if not (ord(c) & 0x80):
1821 if not (ord(c) & 0x80):
1751 return i
1822 return i
1752 return i
1823 return i
1753 out = ""
1824 out = ""
1754 s = deltahead(binchunk)
1825 s = deltahead(binchunk)
1755 binchunk = binchunk[s:]
1826 binchunk = binchunk[s:]
1756 s = deltahead(binchunk)
1827 s = deltahead(binchunk)
1757 binchunk = binchunk[s:]
1828 binchunk = binchunk[s:]
1758 i = 0
1829 i = 0
1759 while i < len(binchunk):
1830 while i < len(binchunk):
1760 cmd = ord(binchunk[i])
1831 cmd = ord(binchunk[i])
1761 i += 1
1832 i += 1
1762 if (cmd & 0x80):
1833 if (cmd & 0x80):
1763 offset = 0
1834 offset = 0
1764 size = 0
1835 size = 0
1765 if (cmd & 0x01):
1836 if (cmd & 0x01):
1766 offset = ord(binchunk[i])
1837 offset = ord(binchunk[i])
1767 i += 1
1838 i += 1
1768 if (cmd & 0x02):
1839 if (cmd & 0x02):
1769 offset |= ord(binchunk[i]) << 8
1840 offset |= ord(binchunk[i]) << 8
1770 i += 1
1841 i += 1
1771 if (cmd & 0x04):
1842 if (cmd & 0x04):
1772 offset |= ord(binchunk[i]) << 16
1843 offset |= ord(binchunk[i]) << 16
1773 i += 1
1844 i += 1
1774 if (cmd & 0x08):
1845 if (cmd & 0x08):
1775 offset |= ord(binchunk[i]) << 24
1846 offset |= ord(binchunk[i]) << 24
1776 i += 1
1847 i += 1
1777 if (cmd & 0x10):
1848 if (cmd & 0x10):
1778 size = ord(binchunk[i])
1849 size = ord(binchunk[i])
1779 i += 1
1850 i += 1
1780 if (cmd & 0x20):
1851 if (cmd & 0x20):
1781 size |= ord(binchunk[i]) << 8
1852 size |= ord(binchunk[i]) << 8
1782 i += 1
1853 i += 1
1783 if (cmd & 0x40):
1854 if (cmd & 0x40):
1784 size |= ord(binchunk[i]) << 16
1855 size |= ord(binchunk[i]) << 16
1785 i += 1
1856 i += 1
1786 if size == 0:
1857 if size == 0:
1787 size = 0x10000
1858 size = 0x10000
1788 offset_end = offset + size
1859 offset_end = offset + size
1789 out += data[offset:offset_end]
1860 out += data[offset:offset_end]
1790 elif cmd != 0:
1861 elif cmd != 0:
1791 offset_end = i + cmd
1862 offset_end = i + cmd
1792 out += binchunk[i:offset_end]
1863 out += binchunk[i:offset_end]
1793 i += cmd
1864 i += cmd
1794 else:
1865 else:
1795 raise PatchError(_('unexpected delta opcode 0'))
1866 raise PatchError(_('unexpected delta opcode 0'))
1796 return out
1867 return out
1797
1868
1798 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1869 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1799 """Reads a patch from fp and tries to apply it.
1870 """Reads a patch from fp and tries to apply it.
1800
1871
1801 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1872 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1802 there was any fuzz.
1873 there was any fuzz.
1803
1874
1804 If 'eolmode' is 'strict', the patch content and patched file are
1875 If 'eolmode' is 'strict', the patch content and patched file are
1805 read in binary mode. Otherwise, line endings are ignored when
1876 read in binary mode. Otherwise, line endings are ignored when
1806 patching then normalized according to 'eolmode'.
1877 patching then normalized according to 'eolmode'.
1807 """
1878 """
1808 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1879 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1809 prefix=prefix, eolmode=eolmode)
1880 prefix=prefix, eolmode=eolmode)
1810
1881
1811 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1882 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1812 eolmode='strict'):
1883 eolmode='strict'):
1813
1884
1814 if prefix:
1885 if prefix:
1815 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1886 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1816 prefix)
1887 prefix)
1817 if prefix != '':
1888 if prefix != '':
1818 prefix += '/'
1889 prefix += '/'
1819 def pstrip(p):
1890 def pstrip(p):
1820 return pathtransform(p, strip - 1, prefix)[1]
1891 return pathtransform(p, strip - 1, prefix)[1]
1821
1892
1822 rejects = 0
1893 rejects = 0
1823 err = 0
1894 err = 0
1824 current_file = None
1895 current_file = None
1825
1896
1826 for state, values in iterhunks(fp):
1897 for state, values in iterhunks(fp):
1827 if state == 'hunk':
1898 if state == 'hunk':
1828 if not current_file:
1899 if not current_file:
1829 continue
1900 continue
1830 ret = current_file.apply(values)
1901 ret = current_file.apply(values)
1831 if ret > 0:
1902 if ret > 0:
1832 err = 1
1903 err = 1
1833 elif state == 'file':
1904 elif state == 'file':
1834 if current_file:
1905 if current_file:
1835 rejects += current_file.close()
1906 rejects += current_file.close()
1836 current_file = None
1907 current_file = None
1837 afile, bfile, first_hunk, gp = values
1908 afile, bfile, first_hunk, gp = values
1838 if gp:
1909 if gp:
1839 gp.path = pstrip(gp.path)
1910 gp.path = pstrip(gp.path)
1840 if gp.oldpath:
1911 if gp.oldpath:
1841 gp.oldpath = pstrip(gp.oldpath)
1912 gp.oldpath = pstrip(gp.oldpath)
1842 else:
1913 else:
1843 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1914 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1844 prefix)
1915 prefix)
1845 if gp.op == 'RENAME':
1916 if gp.op == 'RENAME':
1846 backend.unlink(gp.oldpath)
1917 backend.unlink(gp.oldpath)
1847 if not first_hunk:
1918 if not first_hunk:
1848 if gp.op == 'DELETE':
1919 if gp.op == 'DELETE':
1849 backend.unlink(gp.path)
1920 backend.unlink(gp.path)
1850 continue
1921 continue
1851 data, mode = None, None
1922 data, mode = None, None
1852 if gp.op in ('RENAME', 'COPY'):
1923 if gp.op in ('RENAME', 'COPY'):
1853 data, mode = store.getfile(gp.oldpath)[:2]
1924 data, mode = store.getfile(gp.oldpath)[:2]
1854 # FIXME: failing getfile has never been handled here
1925 # FIXME: failing getfile has never been handled here
1855 assert data is not None
1926 assert data is not None
1856 if gp.mode:
1927 if gp.mode:
1857 mode = gp.mode
1928 mode = gp.mode
1858 if gp.op == 'ADD':
1929 if gp.op == 'ADD':
1859 # Added files without content have no hunk and
1930 # Added files without content have no hunk and
1860 # must be created
1931 # must be created
1861 data = ''
1932 data = ''
1862 if data or mode:
1933 if data or mode:
1863 if (gp.op in ('ADD', 'RENAME', 'COPY')
1934 if (gp.op in ('ADD', 'RENAME', 'COPY')
1864 and backend.exists(gp.path)):
1935 and backend.exists(gp.path)):
1865 raise PatchError(_("cannot create %s: destination "
1936 raise PatchError(_("cannot create %s: destination "
1866 "already exists") % gp.path)
1937 "already exists") % gp.path)
1867 backend.setfile(gp.path, data, mode, gp.oldpath)
1938 backend.setfile(gp.path, data, mode, gp.oldpath)
1868 continue
1939 continue
1869 try:
1940 try:
1870 current_file = patcher(ui, gp, backend, store,
1941 current_file = patcher(ui, gp, backend, store,
1871 eolmode=eolmode)
1942 eolmode=eolmode)
1872 except PatchError, inst:
1943 except PatchError, inst:
1873 ui.warn(str(inst) + '\n')
1944 ui.warn(str(inst) + '\n')
1874 current_file = None
1945 current_file = None
1875 rejects += 1
1946 rejects += 1
1876 continue
1947 continue
1877 elif state == 'git':
1948 elif state == 'git':
1878 for gp in values:
1949 for gp in values:
1879 path = pstrip(gp.oldpath)
1950 path = pstrip(gp.oldpath)
1880 data, mode = backend.getfile(path)
1951 data, mode = backend.getfile(path)
1881 if data is None:
1952 if data is None:
1882 # The error ignored here will trigger a getfile()
1953 # The error ignored here will trigger a getfile()
1883 # error in a place more appropriate for error
1954 # error in a place more appropriate for error
1884 # handling, and will not interrupt the patching
1955 # handling, and will not interrupt the patching
1885 # process.
1956 # process.
1886 pass
1957 pass
1887 else:
1958 else:
1888 store.setfile(path, data, mode)
1959 store.setfile(path, data, mode)
1889 else:
1960 else:
1890 raise util.Abort(_('unsupported parser state: %s') % state)
1961 raise util.Abort(_('unsupported parser state: %s') % state)
1891
1962
1892 if current_file:
1963 if current_file:
1893 rejects += current_file.close()
1964 rejects += current_file.close()
1894
1965
1895 if rejects:
1966 if rejects:
1896 return -1
1967 return -1
1897 return err
1968 return err
1898
1969
1899 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1970 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1900 similarity):
1971 similarity):
1901 """use <patcher> to apply <patchname> to the working directory.
1972 """use <patcher> to apply <patchname> to the working directory.
1902 returns whether patch was applied with fuzz factor."""
1973 returns whether patch was applied with fuzz factor."""
1903
1974
1904 fuzz = False
1975 fuzz = False
1905 args = []
1976 args = []
1906 cwd = repo.root
1977 cwd = repo.root
1907 if cwd:
1978 if cwd:
1908 args.append('-d %s' % util.shellquote(cwd))
1979 args.append('-d %s' % util.shellquote(cwd))
1909 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1980 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1910 util.shellquote(patchname)))
1981 util.shellquote(patchname)))
1911 try:
1982 try:
1912 for line in fp:
1983 for line in fp:
1913 line = line.rstrip()
1984 line = line.rstrip()
1914 ui.note(line + '\n')
1985 ui.note(line + '\n')
1915 if line.startswith('patching file '):
1986 if line.startswith('patching file '):
1916 pf = util.parsepatchoutput(line)
1987 pf = util.parsepatchoutput(line)
1917 printed_file = False
1988 printed_file = False
1918 files.add(pf)
1989 files.add(pf)
1919 elif line.find('with fuzz') >= 0:
1990 elif line.find('with fuzz') >= 0:
1920 fuzz = True
1991 fuzz = True
1921 if not printed_file:
1992 if not printed_file:
1922 ui.warn(pf + '\n')
1993 ui.warn(pf + '\n')
1923 printed_file = True
1994 printed_file = True
1924 ui.warn(line + '\n')
1995 ui.warn(line + '\n')
1925 elif line.find('saving rejects to file') >= 0:
1996 elif line.find('saving rejects to file') >= 0:
1926 ui.warn(line + '\n')
1997 ui.warn(line + '\n')
1927 elif line.find('FAILED') >= 0:
1998 elif line.find('FAILED') >= 0:
1928 if not printed_file:
1999 if not printed_file:
1929 ui.warn(pf + '\n')
2000 ui.warn(pf + '\n')
1930 printed_file = True
2001 printed_file = True
1931 ui.warn(line + '\n')
2002 ui.warn(line + '\n')
1932 finally:
2003 finally:
1933 if files:
2004 if files:
1934 scmutil.marktouched(repo, files, similarity)
2005 scmutil.marktouched(repo, files, similarity)
1935 code = fp.close()
2006 code = fp.close()
1936 if code:
2007 if code:
1937 raise PatchError(_("patch command failed: %s") %
2008 raise PatchError(_("patch command failed: %s") %
1938 util.explainexit(code)[0])
2009 util.explainexit(code)[0])
1939 return fuzz
2010 return fuzz
1940
2011
1941 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2012 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1942 eolmode='strict'):
2013 eolmode='strict'):
1943 if files is None:
2014 if files is None:
1944 files = set()
2015 files = set()
1945 if eolmode is None:
2016 if eolmode is None:
1946 eolmode = ui.config('patch', 'eol', 'strict')
2017 eolmode = ui.config('patch', 'eol', 'strict')
1947 if eolmode.lower() not in eolmodes:
2018 if eolmode.lower() not in eolmodes:
1948 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2019 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1949 eolmode = eolmode.lower()
2020 eolmode = eolmode.lower()
1950
2021
1951 store = filestore()
2022 store = filestore()
1952 try:
2023 try:
1953 fp = open(patchobj, 'rb')
2024 fp = open(patchobj, 'rb')
1954 except TypeError:
2025 except TypeError:
1955 fp = patchobj
2026 fp = patchobj
1956 try:
2027 try:
1957 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2028 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1958 eolmode=eolmode)
2029 eolmode=eolmode)
1959 finally:
2030 finally:
1960 if fp != patchobj:
2031 if fp != patchobj:
1961 fp.close()
2032 fp.close()
1962 files.update(backend.close())
2033 files.update(backend.close())
1963 store.close()
2034 store.close()
1964 if ret < 0:
2035 if ret < 0:
1965 raise PatchError(_('patch failed to apply'))
2036 raise PatchError(_('patch failed to apply'))
1966 return ret > 0
2037 return ret > 0
1967
2038
1968 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2039 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1969 eolmode='strict', similarity=0):
2040 eolmode='strict', similarity=0):
1970 """use builtin patch to apply <patchobj> to the working directory.
2041 """use builtin patch to apply <patchobj> to the working directory.
1971 returns whether patch was applied with fuzz factor."""
2042 returns whether patch was applied with fuzz factor."""
1972 backend = workingbackend(ui, repo, similarity)
2043 backend = workingbackend(ui, repo, similarity)
1973 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2044 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1974
2045
1975 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2046 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1976 eolmode='strict'):
2047 eolmode='strict'):
1977 backend = repobackend(ui, repo, ctx, store)
2048 backend = repobackend(ui, repo, ctx, store)
1978 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2049 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1979
2050
1980 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2051 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1981 similarity=0):
2052 similarity=0):
1982 """Apply <patchname> to the working directory.
2053 """Apply <patchname> to the working directory.
1983
2054
1984 'eolmode' specifies how end of lines should be handled. It can be:
2055 'eolmode' specifies how end of lines should be handled. It can be:
1985 - 'strict': inputs are read in binary mode, EOLs are preserved
2056 - 'strict': inputs are read in binary mode, EOLs are preserved
1986 - 'crlf': EOLs are ignored when patching and reset to CRLF
2057 - 'crlf': EOLs are ignored when patching and reset to CRLF
1987 - 'lf': EOLs are ignored when patching and reset to LF
2058 - 'lf': EOLs are ignored when patching and reset to LF
1988 - None: get it from user settings, default to 'strict'
2059 - None: get it from user settings, default to 'strict'
1989 'eolmode' is ignored when using an external patcher program.
2060 'eolmode' is ignored when using an external patcher program.
1990
2061
1991 Returns whether patch was applied with fuzz factor.
2062 Returns whether patch was applied with fuzz factor.
1992 """
2063 """
1993 patcher = ui.config('ui', 'patch')
2064 patcher = ui.config('ui', 'patch')
1994 if files is None:
2065 if files is None:
1995 files = set()
2066 files = set()
1996 if patcher:
2067 if patcher:
1997 return _externalpatch(ui, repo, patcher, patchname, strip,
2068 return _externalpatch(ui, repo, patcher, patchname, strip,
1998 files, similarity)
2069 files, similarity)
1999 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2070 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2000 similarity)
2071 similarity)
2001
2072
2002 def changedfiles(ui, repo, patchpath, strip=1):
2073 def changedfiles(ui, repo, patchpath, strip=1):
2003 backend = fsbackend(ui, repo.root)
2074 backend = fsbackend(ui, repo.root)
2004 fp = open(patchpath, 'rb')
2075 fp = open(patchpath, 'rb')
2005 try:
2076 try:
2006 changed = set()
2077 changed = set()
2007 for state, values in iterhunks(fp):
2078 for state, values in iterhunks(fp):
2008 if state == 'file':
2079 if state == 'file':
2009 afile, bfile, first_hunk, gp = values
2080 afile, bfile, first_hunk, gp = values
2010 if gp:
2081 if gp:
2011 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2082 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2012 if gp.oldpath:
2083 if gp.oldpath:
2013 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2084 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2014 else:
2085 else:
2015 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2086 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2016 '')
2087 '')
2017 changed.add(gp.path)
2088 changed.add(gp.path)
2018 if gp.op == 'RENAME':
2089 if gp.op == 'RENAME':
2019 changed.add(gp.oldpath)
2090 changed.add(gp.oldpath)
2020 elif state not in ('hunk', 'git'):
2091 elif state not in ('hunk', 'git'):
2021 raise util.Abort(_('unsupported parser state: %s') % state)
2092 raise util.Abort(_('unsupported parser state: %s') % state)
2022 return changed
2093 return changed
2023 finally:
2094 finally:
2024 fp.close()
2095 fp.close()
2025
2096
2026 class GitDiffRequired(Exception):
2097 class GitDiffRequired(Exception):
2027 pass
2098 pass
2028
2099
2029 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2100 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2030 '''return diffopts with all features supported and parsed'''
2101 '''return diffopts with all features supported and parsed'''
2031 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2102 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2032 git=True, whitespace=True, formatchanging=True)
2103 git=True, whitespace=True, formatchanging=True)
2033
2104
2034 diffopts = diffallopts
2105 diffopts = diffallopts
2035
2106
2036 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2107 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2037 whitespace=False, formatchanging=False):
2108 whitespace=False, formatchanging=False):
2038 '''return diffopts with only opted-in features parsed
2109 '''return diffopts with only opted-in features parsed
2039
2110
2040 Features:
2111 Features:
2041 - git: git-style diffs
2112 - git: git-style diffs
2042 - whitespace: whitespace options like ignoreblanklines and ignorews
2113 - whitespace: whitespace options like ignoreblanklines and ignorews
2043 - formatchanging: options that will likely break or cause correctness issues
2114 - formatchanging: options that will likely break or cause correctness issues
2044 with most diff parsers
2115 with most diff parsers
2045 '''
2116 '''
2046 def get(key, name=None, getter=ui.configbool, forceplain=None):
2117 def get(key, name=None, getter=ui.configbool, forceplain=None):
2047 if opts:
2118 if opts:
2048 v = opts.get(key)
2119 v = opts.get(key)
2049 if v:
2120 if v:
2050 return v
2121 return v
2051 if forceplain is not None and ui.plain():
2122 if forceplain is not None and ui.plain():
2052 return forceplain
2123 return forceplain
2053 return getter(section, name or key, None, untrusted=untrusted)
2124 return getter(section, name or key, None, untrusted=untrusted)
2054
2125
2055 # core options, expected to be understood by every diff parser
2126 # core options, expected to be understood by every diff parser
2056 buildopts = {
2127 buildopts = {
2057 'nodates': get('nodates'),
2128 'nodates': get('nodates'),
2058 'showfunc': get('show_function', 'showfunc'),
2129 'showfunc': get('show_function', 'showfunc'),
2059 'context': get('unified', getter=ui.config),
2130 'context': get('unified', getter=ui.config),
2060 }
2131 }
2061
2132
2062 if git:
2133 if git:
2063 buildopts['git'] = get('git')
2134 buildopts['git'] = get('git')
2064 if whitespace:
2135 if whitespace:
2065 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2136 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2066 buildopts['ignorewsamount'] = get('ignore_space_change',
2137 buildopts['ignorewsamount'] = get('ignore_space_change',
2067 'ignorewsamount')
2138 'ignorewsamount')
2068 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2139 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2069 'ignoreblanklines')
2140 'ignoreblanklines')
2070 if formatchanging:
2141 if formatchanging:
2071 buildopts['text'] = opts and opts.get('text')
2142 buildopts['text'] = opts and opts.get('text')
2072 buildopts['nobinary'] = get('nobinary')
2143 buildopts['nobinary'] = get('nobinary')
2073 buildopts['noprefix'] = get('noprefix', forceplain=False)
2144 buildopts['noprefix'] = get('noprefix', forceplain=False)
2074
2145
2075 return mdiff.diffopts(**buildopts)
2146 return mdiff.diffopts(**buildopts)
2076
2147
2077 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2148 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2078 losedatafn=None, prefix='', relroot=''):
2149 losedatafn=None, prefix='', relroot=''):
2079 '''yields diff of changes to files between two nodes, or node and
2150 '''yields diff of changes to files between two nodes, or node and
2080 working directory.
2151 working directory.
2081
2152
2082 if node1 is None, use first dirstate parent instead.
2153 if node1 is None, use first dirstate parent instead.
2083 if node2 is None, compare node1 with working directory.
2154 if node2 is None, compare node1 with working directory.
2084
2155
2085 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2156 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2086 every time some change cannot be represented with the current
2157 every time some change cannot be represented with the current
2087 patch format. Return False to upgrade to git patch format, True to
2158 patch format. Return False to upgrade to git patch format, True to
2088 accept the loss or raise an exception to abort the diff. It is
2159 accept the loss or raise an exception to abort the diff. It is
2089 called with the name of current file being diffed as 'fn'. If set
2160 called with the name of current file being diffed as 'fn'. If set
2090 to None, patches will always be upgraded to git format when
2161 to None, patches will always be upgraded to git format when
2091 necessary.
2162 necessary.
2092
2163
2093 prefix is a filename prefix that is prepended to all filenames on
2164 prefix is a filename prefix that is prepended to all filenames on
2094 display (used for subrepos).
2165 display (used for subrepos).
2095
2166
2096 relroot, if not empty, must be normalized with a trailing /. Any match
2167 relroot, if not empty, must be normalized with a trailing /. Any match
2097 patterns that fall outside it will be ignored.'''
2168 patterns that fall outside it will be ignored.'''
2098
2169
2099 if opts is None:
2170 if opts is None:
2100 opts = mdiff.defaultopts
2171 opts = mdiff.defaultopts
2101
2172
2102 if not node1 and not node2:
2173 if not node1 and not node2:
2103 node1 = repo.dirstate.p1()
2174 node1 = repo.dirstate.p1()
2104
2175
2105 def lrugetfilectx():
2176 def lrugetfilectx():
2106 cache = {}
2177 cache = {}
2107 order = collections.deque()
2178 order = collections.deque()
2108 def getfilectx(f, ctx):
2179 def getfilectx(f, ctx):
2109 fctx = ctx.filectx(f, filelog=cache.get(f))
2180 fctx = ctx.filectx(f, filelog=cache.get(f))
2110 if f not in cache:
2181 if f not in cache:
2111 if len(cache) > 20:
2182 if len(cache) > 20:
2112 del cache[order.popleft()]
2183 del cache[order.popleft()]
2113 cache[f] = fctx.filelog()
2184 cache[f] = fctx.filelog()
2114 else:
2185 else:
2115 order.remove(f)
2186 order.remove(f)
2116 order.append(f)
2187 order.append(f)
2117 return fctx
2188 return fctx
2118 return getfilectx
2189 return getfilectx
2119 getfilectx = lrugetfilectx()
2190 getfilectx = lrugetfilectx()
2120
2191
2121 ctx1 = repo[node1]
2192 ctx1 = repo[node1]
2122 ctx2 = repo[node2]
2193 ctx2 = repo[node2]
2123
2194
2124 relfiltered = False
2195 relfiltered = False
2125 if relroot != '' and match.always():
2196 if relroot != '' and match.always():
2126 # as a special case, create a new matcher with just the relroot
2197 # as a special case, create a new matcher with just the relroot
2127 pats = [relroot]
2198 pats = [relroot]
2128 match = scmutil.match(ctx2, pats, default='path')
2199 match = scmutil.match(ctx2, pats, default='path')
2129 relfiltered = True
2200 relfiltered = True
2130
2201
2131 if not changes:
2202 if not changes:
2132 changes = repo.status(ctx1, ctx2, match=match)
2203 changes = repo.status(ctx1, ctx2, match=match)
2133 modified, added, removed = changes[:3]
2204 modified, added, removed = changes[:3]
2134
2205
2135 if not modified and not added and not removed:
2206 if not modified and not added and not removed:
2136 return []
2207 return []
2137
2208
2138 if repo.ui.debugflag:
2209 if repo.ui.debugflag:
2139 hexfunc = hex
2210 hexfunc = hex
2140 else:
2211 else:
2141 hexfunc = short
2212 hexfunc = short
2142 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2213 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2143
2214
2144 copy = {}
2215 copy = {}
2145 if opts.git or opts.upgrade:
2216 if opts.git or opts.upgrade:
2146 copy = copies.pathcopies(ctx1, ctx2, match=match)
2217 copy = copies.pathcopies(ctx1, ctx2, match=match)
2147
2218
2148 if relroot is not None:
2219 if relroot is not None:
2149 if not relfiltered:
2220 if not relfiltered:
2150 # XXX this would ideally be done in the matcher, but that is
2221 # XXX this would ideally be done in the matcher, but that is
2151 # generally meant to 'or' patterns, not 'and' them. In this case we
2222 # generally meant to 'or' patterns, not 'and' them. In this case we
2152 # need to 'and' all the patterns from the matcher with relroot.
2223 # need to 'and' all the patterns from the matcher with relroot.
2153 def filterrel(l):
2224 def filterrel(l):
2154 return [f for f in l if f.startswith(relroot)]
2225 return [f for f in l if f.startswith(relroot)]
2155 modified = filterrel(modified)
2226 modified = filterrel(modified)
2156 added = filterrel(added)
2227 added = filterrel(added)
2157 removed = filterrel(removed)
2228 removed = filterrel(removed)
2158 relfiltered = True
2229 relfiltered = True
2159 # filter out copies where either side isn't inside the relative root
2230 # filter out copies where either side isn't inside the relative root
2160 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2231 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2161 if dst.startswith(relroot)
2232 if dst.startswith(relroot)
2162 and src.startswith(relroot)))
2233 and src.startswith(relroot)))
2163
2234
2164 def difffn(opts, losedata):
2235 def difffn(opts, losedata):
2165 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2236 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2166 copy, getfilectx, opts, losedata, prefix, relroot)
2237 copy, getfilectx, opts, losedata, prefix, relroot)
2167 if opts.upgrade and not opts.git:
2238 if opts.upgrade and not opts.git:
2168 try:
2239 try:
2169 def losedata(fn):
2240 def losedata(fn):
2170 if not losedatafn or not losedatafn(fn=fn):
2241 if not losedatafn or not losedatafn(fn=fn):
2171 raise GitDiffRequired
2242 raise GitDiffRequired
2172 # Buffer the whole output until we are sure it can be generated
2243 # Buffer the whole output until we are sure it can be generated
2173 return list(difffn(opts.copy(git=False), losedata))
2244 return list(difffn(opts.copy(git=False), losedata))
2174 except GitDiffRequired:
2245 except GitDiffRequired:
2175 return difffn(opts.copy(git=True), None)
2246 return difffn(opts.copy(git=True), None)
2176 else:
2247 else:
2177 return difffn(opts, None)
2248 return difffn(opts, None)
2178
2249
2179 def difflabel(func, *args, **kw):
2250 def difflabel(func, *args, **kw):
2180 '''yields 2-tuples of (output, label) based on the output of func()'''
2251 '''yields 2-tuples of (output, label) based on the output of func()'''
2181 headprefixes = [('diff', 'diff.diffline'),
2252 headprefixes = [('diff', 'diff.diffline'),
2182 ('copy', 'diff.extended'),
2253 ('copy', 'diff.extended'),
2183 ('rename', 'diff.extended'),
2254 ('rename', 'diff.extended'),
2184 ('old', 'diff.extended'),
2255 ('old', 'diff.extended'),
2185 ('new', 'diff.extended'),
2256 ('new', 'diff.extended'),
2186 ('deleted', 'diff.extended'),
2257 ('deleted', 'diff.extended'),
2187 ('---', 'diff.file_a'),
2258 ('---', 'diff.file_a'),
2188 ('+++', 'diff.file_b')]
2259 ('+++', 'diff.file_b')]
2189 textprefixes = [('@', 'diff.hunk'),
2260 textprefixes = [('@', 'diff.hunk'),
2190 ('-', 'diff.deleted'),
2261 ('-', 'diff.deleted'),
2191 ('+', 'diff.inserted')]
2262 ('+', 'diff.inserted')]
2192 head = False
2263 head = False
2193 for chunk in func(*args, **kw):
2264 for chunk in func(*args, **kw):
2194 lines = chunk.split('\n')
2265 lines = chunk.split('\n')
2195 for i, line in enumerate(lines):
2266 for i, line in enumerate(lines):
2196 if i != 0:
2267 if i != 0:
2197 yield ('\n', '')
2268 yield ('\n', '')
2198 if head:
2269 if head:
2199 if line.startswith('@'):
2270 if line.startswith('@'):
2200 head = False
2271 head = False
2201 else:
2272 else:
2202 if line and line[0] not in ' +-@\\':
2273 if line and line[0] not in ' +-@\\':
2203 head = True
2274 head = True
2204 stripline = line
2275 stripline = line
2205 diffline = False
2276 diffline = False
2206 if not head and line and line[0] in '+-':
2277 if not head and line and line[0] in '+-':
2207 # highlight tabs and trailing whitespace, but only in
2278 # highlight tabs and trailing whitespace, but only in
2208 # changed lines
2279 # changed lines
2209 stripline = line.rstrip()
2280 stripline = line.rstrip()
2210 diffline = True
2281 diffline = True
2211
2282
2212 prefixes = textprefixes
2283 prefixes = textprefixes
2213 if head:
2284 if head:
2214 prefixes = headprefixes
2285 prefixes = headprefixes
2215 for prefix, label in prefixes:
2286 for prefix, label in prefixes:
2216 if stripline.startswith(prefix):
2287 if stripline.startswith(prefix):
2217 if diffline:
2288 if diffline:
2218 for token in tabsplitter.findall(stripline):
2289 for token in tabsplitter.findall(stripline):
2219 if '\t' == token[0]:
2290 if '\t' == token[0]:
2220 yield (token, 'diff.tab')
2291 yield (token, 'diff.tab')
2221 else:
2292 else:
2222 yield (token, label)
2293 yield (token, label)
2223 else:
2294 else:
2224 yield (stripline, label)
2295 yield (stripline, label)
2225 break
2296 break
2226 else:
2297 else:
2227 yield (line, '')
2298 yield (line, '')
2228 if line != stripline:
2299 if line != stripline:
2229 yield (line[len(stripline):], 'diff.trailingwhitespace')
2300 yield (line[len(stripline):], 'diff.trailingwhitespace')
2230
2301
2231 def diffui(*args, **kw):
2302 def diffui(*args, **kw):
2232 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2303 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2233 return difflabel(diff, *args, **kw)
2304 return difflabel(diff, *args, **kw)
2234
2305
2235 def _filepairs(ctx1, modified, added, removed, copy, opts):
2306 def _filepairs(ctx1, modified, added, removed, copy, opts):
2236 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2307 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2237 before and f2 is the the name after. For added files, f1 will be None,
2308 before and f2 is the the name after. For added files, f1 will be None,
2238 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2309 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2239 or 'rename' (the latter two only if opts.git is set).'''
2310 or 'rename' (the latter two only if opts.git is set).'''
2240 gone = set()
2311 gone = set()
2241
2312
2242 copyto = dict([(v, k) for k, v in copy.items()])
2313 copyto = dict([(v, k) for k, v in copy.items()])
2243
2314
2244 addedset, removedset = set(added), set(removed)
2315 addedset, removedset = set(added), set(removed)
2245 # Fix up added, since merged-in additions appear as
2316 # Fix up added, since merged-in additions appear as
2246 # modifications during merges
2317 # modifications during merges
2247 for f in modified:
2318 for f in modified:
2248 if f not in ctx1:
2319 if f not in ctx1:
2249 addedset.add(f)
2320 addedset.add(f)
2250
2321
2251 for f in sorted(modified + added + removed):
2322 for f in sorted(modified + added + removed):
2252 copyop = None
2323 copyop = None
2253 f1, f2 = f, f
2324 f1, f2 = f, f
2254 if f in addedset:
2325 if f in addedset:
2255 f1 = None
2326 f1 = None
2256 if f in copy:
2327 if f in copy:
2257 if opts.git:
2328 if opts.git:
2258 f1 = copy[f]
2329 f1 = copy[f]
2259 if f1 in removedset and f1 not in gone:
2330 if f1 in removedset and f1 not in gone:
2260 copyop = 'rename'
2331 copyop = 'rename'
2261 gone.add(f1)
2332 gone.add(f1)
2262 else:
2333 else:
2263 copyop = 'copy'
2334 copyop = 'copy'
2264 elif f in removedset:
2335 elif f in removedset:
2265 f2 = None
2336 f2 = None
2266 if opts.git:
2337 if opts.git:
2267 # have we already reported a copy above?
2338 # have we already reported a copy above?
2268 if (f in copyto and copyto[f] in addedset
2339 if (f in copyto and copyto[f] in addedset
2269 and copy[copyto[f]] == f):
2340 and copy[copyto[f]] == f):
2270 continue
2341 continue
2271 yield f1, f2, copyop
2342 yield f1, f2, copyop
2272
2343
2273 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2344 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2274 copy, getfilectx, opts, losedatafn, prefix, relroot):
2345 copy, getfilectx, opts, losedatafn, prefix, relroot):
2275 '''given input data, generate a diff and yield it in blocks
2346 '''given input data, generate a diff and yield it in blocks
2276
2347
2277 If generating a diff would lose data like flags or binary data and
2348 If generating a diff would lose data like flags or binary data and
2278 losedatafn is not None, it will be called.
2349 losedatafn is not None, it will be called.
2279
2350
2280 relroot is removed and prefix is added to every path in the diff output.
2351 relroot is removed and prefix is added to every path in the diff output.
2281
2352
2282 If relroot is not empty, this function expects every path in modified,
2353 If relroot is not empty, this function expects every path in modified,
2283 added, removed and copy to start with it.'''
2354 added, removed and copy to start with it.'''
2284
2355
2285 def gitindex(text):
2356 def gitindex(text):
2286 if not text:
2357 if not text:
2287 text = ""
2358 text = ""
2288 l = len(text)
2359 l = len(text)
2289 s = util.sha1('blob %d\0' % l)
2360 s = util.sha1('blob %d\0' % l)
2290 s.update(text)
2361 s.update(text)
2291 return s.hexdigest()
2362 return s.hexdigest()
2292
2363
2293 if opts.noprefix:
2364 if opts.noprefix:
2294 aprefix = bprefix = ''
2365 aprefix = bprefix = ''
2295 else:
2366 else:
2296 aprefix = 'a/'
2367 aprefix = 'a/'
2297 bprefix = 'b/'
2368 bprefix = 'b/'
2298
2369
2299 def diffline(f, revs):
2370 def diffline(f, revs):
2300 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2371 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2301 return 'diff %s %s' % (revinfo, f)
2372 return 'diff %s %s' % (revinfo, f)
2302
2373
2303 date1 = util.datestr(ctx1.date())
2374 date1 = util.datestr(ctx1.date())
2304 date2 = util.datestr(ctx2.date())
2375 date2 = util.datestr(ctx2.date())
2305
2376
2306 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2377 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2307
2378
2308 if relroot != '' and (repo.ui.configbool('devel', 'all')
2379 if relroot != '' and (repo.ui.configbool('devel', 'all')
2309 or repo.ui.configbool('devel', 'check-relroot')):
2380 or repo.ui.configbool('devel', 'check-relroot')):
2310 for f in modified + added + removed + copy.keys() + copy.values():
2381 for f in modified + added + removed + copy.keys() + copy.values():
2311 if f is not None and not f.startswith(relroot):
2382 if f is not None and not f.startswith(relroot):
2312 raise AssertionError(
2383 raise AssertionError(
2313 "file %s doesn't start with relroot %s" % (f, relroot))
2384 "file %s doesn't start with relroot %s" % (f, relroot))
2314
2385
2315 for f1, f2, copyop in _filepairs(
2386 for f1, f2, copyop in _filepairs(
2316 ctx1, modified, added, removed, copy, opts):
2387 ctx1, modified, added, removed, copy, opts):
2317 content1 = None
2388 content1 = None
2318 content2 = None
2389 content2 = None
2319 flag1 = None
2390 flag1 = None
2320 flag2 = None
2391 flag2 = None
2321 if f1:
2392 if f1:
2322 content1 = getfilectx(f1, ctx1).data()
2393 content1 = getfilectx(f1, ctx1).data()
2323 if opts.git or losedatafn:
2394 if opts.git or losedatafn:
2324 flag1 = ctx1.flags(f1)
2395 flag1 = ctx1.flags(f1)
2325 if f2:
2396 if f2:
2326 content2 = getfilectx(f2, ctx2).data()
2397 content2 = getfilectx(f2, ctx2).data()
2327 if opts.git or losedatafn:
2398 if opts.git or losedatafn:
2328 flag2 = ctx2.flags(f2)
2399 flag2 = ctx2.flags(f2)
2329 binary = False
2400 binary = False
2330 if opts.git or losedatafn:
2401 if opts.git or losedatafn:
2331 binary = util.binary(content1) or util.binary(content2)
2402 binary = util.binary(content1) or util.binary(content2)
2332
2403
2333 if losedatafn and not opts.git:
2404 if losedatafn and not opts.git:
2334 if (binary or
2405 if (binary or
2335 # copy/rename
2406 # copy/rename
2336 f2 in copy or
2407 f2 in copy or
2337 # empty file creation
2408 # empty file creation
2338 (not f1 and not content2) or
2409 (not f1 and not content2) or
2339 # empty file deletion
2410 # empty file deletion
2340 (not content1 and not f2) or
2411 (not content1 and not f2) or
2341 # create with flags
2412 # create with flags
2342 (not f1 and flag2) or
2413 (not f1 and flag2) or
2343 # change flags
2414 # change flags
2344 (f1 and f2 and flag1 != flag2)):
2415 (f1 and f2 and flag1 != flag2)):
2345 losedatafn(f2 or f1)
2416 losedatafn(f2 or f1)
2346
2417
2347 path1 = f1 or f2
2418 path1 = f1 or f2
2348 path2 = f2 or f1
2419 path2 = f2 or f1
2349 path1 = posixpath.join(prefix, path1[len(relroot):])
2420 path1 = posixpath.join(prefix, path1[len(relroot):])
2350 path2 = posixpath.join(prefix, path2[len(relroot):])
2421 path2 = posixpath.join(prefix, path2[len(relroot):])
2351 header = []
2422 header = []
2352 if opts.git:
2423 if opts.git:
2353 header.append('diff --git %s%s %s%s' %
2424 header.append('diff --git %s%s %s%s' %
2354 (aprefix, path1, bprefix, path2))
2425 (aprefix, path1, bprefix, path2))
2355 if not f1: # added
2426 if not f1: # added
2356 header.append('new file mode %s' % gitmode[flag2])
2427 header.append('new file mode %s' % gitmode[flag2])
2357 elif not f2: # removed
2428 elif not f2: # removed
2358 header.append('deleted file mode %s' % gitmode[flag1])
2429 header.append('deleted file mode %s' % gitmode[flag1])
2359 else: # modified/copied/renamed
2430 else: # modified/copied/renamed
2360 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2431 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2361 if mode1 != mode2:
2432 if mode1 != mode2:
2362 header.append('old mode %s' % mode1)
2433 header.append('old mode %s' % mode1)
2363 header.append('new mode %s' % mode2)
2434 header.append('new mode %s' % mode2)
2364 if copyop is not None:
2435 if copyop is not None:
2365 header.append('%s from %s' % (copyop, path1))
2436 header.append('%s from %s' % (copyop, path1))
2366 header.append('%s to %s' % (copyop, path2))
2437 header.append('%s to %s' % (copyop, path2))
2367 elif revs and not repo.ui.quiet:
2438 elif revs and not repo.ui.quiet:
2368 header.append(diffline(path1, revs))
2439 header.append(diffline(path1, revs))
2369
2440
2370 if binary and opts.git and not opts.nobinary:
2441 if binary and opts.git and not opts.nobinary:
2371 text = mdiff.b85diff(content1, content2)
2442 text = mdiff.b85diff(content1, content2)
2372 if text:
2443 if text:
2373 header.append('index %s..%s' %
2444 header.append('index %s..%s' %
2374 (gitindex(content1), gitindex(content2)))
2445 (gitindex(content1), gitindex(content2)))
2375 else:
2446 else:
2376 text = mdiff.unidiff(content1, date1,
2447 text = mdiff.unidiff(content1, date1,
2377 content2, date2,
2448 content2, date2,
2378 path1, path2, opts=opts)
2449 path1, path2, opts=opts)
2379 if header and (text or len(header) > 1):
2450 if header and (text or len(header) > 1):
2380 yield '\n'.join(header) + '\n'
2451 yield '\n'.join(header) + '\n'
2381 if text:
2452 if text:
2382 yield text
2453 yield text
2383
2454
2384 def diffstatsum(stats):
2455 def diffstatsum(stats):
2385 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2456 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2386 for f, a, r, b in stats:
2457 for f, a, r, b in stats:
2387 maxfile = max(maxfile, encoding.colwidth(f))
2458 maxfile = max(maxfile, encoding.colwidth(f))
2388 maxtotal = max(maxtotal, a + r)
2459 maxtotal = max(maxtotal, a + r)
2389 addtotal += a
2460 addtotal += a
2390 removetotal += r
2461 removetotal += r
2391 binary = binary or b
2462 binary = binary or b
2392
2463
2393 return maxfile, maxtotal, addtotal, removetotal, binary
2464 return maxfile, maxtotal, addtotal, removetotal, binary
2394
2465
2395 def diffstatdata(lines):
2466 def diffstatdata(lines):
2396 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2467 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2397
2468
2398 results = []
2469 results = []
2399 filename, adds, removes, isbinary = None, 0, 0, False
2470 filename, adds, removes, isbinary = None, 0, 0, False
2400
2471
2401 def addresult():
2472 def addresult():
2402 if filename:
2473 if filename:
2403 results.append((filename, adds, removes, isbinary))
2474 results.append((filename, adds, removes, isbinary))
2404
2475
2405 for line in lines:
2476 for line in lines:
2406 if line.startswith('diff'):
2477 if line.startswith('diff'):
2407 addresult()
2478 addresult()
2408 # set numbers to 0 anyway when starting new file
2479 # set numbers to 0 anyway when starting new file
2409 adds, removes, isbinary = 0, 0, False
2480 adds, removes, isbinary = 0, 0, False
2410 if line.startswith('diff --git a/'):
2481 if line.startswith('diff --git a/'):
2411 filename = gitre.search(line).group(2)
2482 filename = gitre.search(line).group(2)
2412 elif line.startswith('diff -r'):
2483 elif line.startswith('diff -r'):
2413 # format: "diff -r ... -r ... filename"
2484 # format: "diff -r ... -r ... filename"
2414 filename = diffre.search(line).group(1)
2485 filename = diffre.search(line).group(1)
2415 elif line.startswith('+') and not line.startswith('+++ '):
2486 elif line.startswith('+') and not line.startswith('+++ '):
2416 adds += 1
2487 adds += 1
2417 elif line.startswith('-') and not line.startswith('--- '):
2488 elif line.startswith('-') and not line.startswith('--- '):
2418 removes += 1
2489 removes += 1
2419 elif (line.startswith('GIT binary patch') or
2490 elif (line.startswith('GIT binary patch') or
2420 line.startswith('Binary file')):
2491 line.startswith('Binary file')):
2421 isbinary = True
2492 isbinary = True
2422 addresult()
2493 addresult()
2423 return results
2494 return results
2424
2495
2425 def diffstat(lines, width=80, git=False):
2496 def diffstat(lines, width=80, git=False):
2426 output = []
2497 output = []
2427 stats = diffstatdata(lines)
2498 stats = diffstatdata(lines)
2428 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2499 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2429
2500
2430 countwidth = len(str(maxtotal))
2501 countwidth = len(str(maxtotal))
2431 if hasbinary and countwidth < 3:
2502 if hasbinary and countwidth < 3:
2432 countwidth = 3
2503 countwidth = 3
2433 graphwidth = width - countwidth - maxname - 6
2504 graphwidth = width - countwidth - maxname - 6
2434 if graphwidth < 10:
2505 if graphwidth < 10:
2435 graphwidth = 10
2506 graphwidth = 10
2436
2507
2437 def scale(i):
2508 def scale(i):
2438 if maxtotal <= graphwidth:
2509 if maxtotal <= graphwidth:
2439 return i
2510 return i
2440 # If diffstat runs out of room it doesn't print anything,
2511 # If diffstat runs out of room it doesn't print anything,
2441 # which isn't very useful, so always print at least one + or -
2512 # which isn't very useful, so always print at least one + or -
2442 # if there were at least some changes.
2513 # if there were at least some changes.
2443 return max(i * graphwidth // maxtotal, int(bool(i)))
2514 return max(i * graphwidth // maxtotal, int(bool(i)))
2444
2515
2445 for filename, adds, removes, isbinary in stats:
2516 for filename, adds, removes, isbinary in stats:
2446 if isbinary:
2517 if isbinary:
2447 count = 'Bin'
2518 count = 'Bin'
2448 else:
2519 else:
2449 count = adds + removes
2520 count = adds + removes
2450 pluses = '+' * scale(adds)
2521 pluses = '+' * scale(adds)
2451 minuses = '-' * scale(removes)
2522 minuses = '-' * scale(removes)
2452 output.append(' %s%s | %*s %s%s\n' %
2523 output.append(' %s%s | %*s %s%s\n' %
2453 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2524 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2454 countwidth, count, pluses, minuses))
2525 countwidth, count, pluses, minuses))
2455
2526
2456 if stats:
2527 if stats:
2457 output.append(_(' %d files changed, %d insertions(+), '
2528 output.append(_(' %d files changed, %d insertions(+), '
2458 '%d deletions(-)\n')
2529 '%d deletions(-)\n')
2459 % (len(stats), totaladds, totalremoves))
2530 % (len(stats), totaladds, totalremoves))
2460
2531
2461 return ''.join(output)
2532 return ''.join(output)
2462
2533
2463 def diffstatui(*args, **kw):
2534 def diffstatui(*args, **kw):
2464 '''like diffstat(), but yields 2-tuples of (output, label) for
2535 '''like diffstat(), but yields 2-tuples of (output, label) for
2465 ui.write()
2536 ui.write()
2466 '''
2537 '''
2467
2538
2468 for line in diffstat(*args, **kw).splitlines():
2539 for line in diffstat(*args, **kw).splitlines():
2469 if line and line[-1] in '+-':
2540 if line and line[-1] in '+-':
2470 name, graph = line.rsplit(' ', 1)
2541 name, graph = line.rsplit(' ', 1)
2471 yield (name + ' ', '')
2542 yield (name + ' ', '')
2472 m = re.search(r'\++', graph)
2543 m = re.search(r'\++', graph)
2473 if m:
2544 if m:
2474 yield (m.group(0), 'diffstat.inserted')
2545 yield (m.group(0), 'diffstat.inserted')
2475 m = re.search(r'-+', graph)
2546 m = re.search(r'-+', graph)
2476 if m:
2547 if m:
2477 yield (m.group(0), 'diffstat.deleted')
2548 yield (m.group(0), 'diffstat.deleted')
2478 else:
2549 else:
2479 yield (line, '')
2550 yield (line, '')
2480 yield ('\n', '')
2551 yield ('\n', '')
@@ -1,322 +1,393 b''
1 Revert interactive tests
1 Revert interactive tests
2 1 add and commit file f
2 1 add and commit file f
3 2 add commit file folder1/g
3 2 add commit file folder1/g
4 3 add and commit file folder2/h
4 3 add and commit file folder2/h
5 4 add and commit file folder1/i
5 4 add and commit file folder1/i
6 5 commit change to file f
6 5 commit change to file f
7 6 commit changes to files folder1/g folder2/h
7 6 commit changes to files folder1/g folder2/h
8 7 commit changes to files folder1/g folder2/h
8 7 commit changes to files folder1/g folder2/h
9 8 revert interactive to commit id 2 (line 3 above), check that folder1/i is removed and
9 8 revert interactive to commit id 2 (line 3 above), check that folder1/i is removed and
10 9 make workdir match 7
10 9 make workdir match 7
11 10 run the same test than 8 from within folder1 and check same expectations
11 10 run the same test than 8 from within folder1 and check same expectations
12
12
13 $ cat <<EOF >> $HGRCPATH
13 $ cat <<EOF >> $HGRCPATH
14 > [ui]
14 > [ui]
15 > interactive = true
15 > interactive = true
16 > [extensions]
16 > [extensions]
17 > record =
17 > record =
18 > EOF
18 > EOF
19
19
20
20
21 $ mkdir -p a/folder1 a/folder2
21 $ mkdir -p a/folder1 a/folder2
22 $ cd a
22 $ cd a
23 $ hg init
23 $ hg init
24 >>> open('f', 'wb').write("1\n2\n3\n4\n5\n")
24 >>> open('f', 'wb').write("1\n2\n3\n4\n5\n")
25 $ hg add f ; hg commit -m "adding f"
25 $ hg add f ; hg commit -m "adding f"
26 $ cat f > folder1/g ; hg add folder1/g ; hg commit -m "adding folder1/g"
26 $ cat f > folder1/g ; hg add folder1/g ; hg commit -m "adding folder1/g"
27 $ cat f > folder2/h ; hg add folder2/h ; hg commit -m "adding folder2/h"
27 $ cat f > folder2/h ; hg add folder2/h ; hg commit -m "adding folder2/h"
28 $ cat f > folder1/i ; hg add folder1/i ; hg commit -m "adding folder1/i"
28 $ cat f > folder1/i ; hg add folder1/i ; hg commit -m "adding folder1/i"
29 >>> open('f', 'wb').write("a\n1\n2\n3\n4\n5\nb\n")
29 >>> open('f', 'wb').write("a\n1\n2\n3\n4\n5\nb\n")
30 $ hg commit -m "modifying f"
30 $ hg commit -m "modifying f"
31 >>> open('folder1/g', 'wb').write("c\n1\n2\n3\n4\n5\nd\n")
31 >>> open('folder1/g', 'wb').write("c\n1\n2\n3\n4\n5\nd\n")
32 $ hg commit -m "modifying folder1/g"
32 $ hg commit -m "modifying folder1/g"
33 >>> open('folder2/h', 'wb').write("e\n1\n2\n3\n4\n5\nf\n")
33 >>> open('folder2/h', 'wb').write("e\n1\n2\n3\n4\n5\nf\n")
34 $ hg commit -m "modifying folder2/h"
34 $ hg commit -m "modifying folder2/h"
35 $ hg tip
35 $ hg tip
36 changeset: 6:59dd6e4ab63a
36 changeset: 6:59dd6e4ab63a
37 tag: tip
37 tag: tip
38 user: test
38 user: test
39 date: Thu Jan 01 00:00:00 1970 +0000
39 date: Thu Jan 01 00:00:00 1970 +0000
40 summary: modifying folder2/h
40 summary: modifying folder2/h
41
41
42 $ hg revert -i -r 2 --all -- << EOF
42 $ hg revert -i -r 2 --all -- << EOF
43 > y
43 > y
44 > y
44 > y
45 > y
45 > y
46 > y
46 > y
47 > y
47 > y
48 > n
48 > n
49 > n
49 > n
50 > EOF
50 > EOF
51 reverting f
51 reverting f
52 reverting folder1/g (glob)
52 reverting folder1/g (glob)
53 removing folder1/i (glob)
53 removing folder1/i (glob)
54 reverting folder2/h (glob)
54 reverting folder2/h (glob)
55 diff --git a/f b/f
55 diff --git a/f b/f
56 2 hunks, 2 lines changed
56 2 hunks, 2 lines changed
57 examine changes to 'f'? [Ynesfdaq?] y
57 examine changes to 'f'? [Ynesfdaq?] y
58
58
59 @@ -1,6 +1,5 @@
59 @@ -1,6 +1,5 @@
60 -a
60 -a
61 1
61 1
62 2
62 2
63 3
63 3
64 4
64 4
65 5
65 5
66 record change 1/6 to 'f'? [Ynesfdaq?] y
66 record change 1/6 to 'f'? [Ynesfdaq?] y
67
67
68 @@ -2,6 +1,5 @@
68 @@ -2,6 +1,5 @@
69 1
69 1
70 2
70 2
71 3
71 3
72 4
72 4
73 5
73 5
74 -b
74 -b
75 record change 2/6 to 'f'? [Ynesfdaq?] y
75 record change 2/6 to 'f'? [Ynesfdaq?] y
76
76
77 diff --git a/folder1/g b/folder1/g
77 diff --git a/folder1/g b/folder1/g
78 2 hunks, 2 lines changed
78 2 hunks, 2 lines changed
79 examine changes to 'folder1/g'? [Ynesfdaq?] y
79 examine changes to 'folder1/g'? [Ynesfdaq?] y
80
80
81 @@ -1,6 +1,5 @@
81 @@ -1,6 +1,5 @@
82 -c
82 -c
83 1
83 1
84 2
84 2
85 3
85 3
86 4
86 4
87 5
87 5
88 record change 3/6 to 'folder1/g'? [Ynesfdaq?] y
88 record change 3/6 to 'folder1/g'? [Ynesfdaq?] y
89
89
90 @@ -2,6 +1,5 @@
90 @@ -2,6 +1,5 @@
91 1
91 1
92 2
92 2
93 3
93 3
94 4
94 4
95 5
95 5
96 -d
96 -d
97 record change 4/6 to 'folder1/g'? [Ynesfdaq?] n
97 record change 4/6 to 'folder1/g'? [Ynesfdaq?] n
98
98
99 diff --git a/folder2/h b/folder2/h
99 diff --git a/folder2/h b/folder2/h
100 2 hunks, 2 lines changed
100 2 hunks, 2 lines changed
101 examine changes to 'folder2/h'? [Ynesfdaq?] n
101 examine changes to 'folder2/h'? [Ynesfdaq?] n
102
102
103 $ cat f
103 $ cat f
104 1
104 1
105 2
105 2
106 3
106 3
107 4
107 4
108 5
108 5
109 $ cat folder1/g
109 $ cat folder1/g
110 1
110 1
111 2
111 2
112 3
112 3
113 4
113 4
114 5
114 5
115 d
115 d
116 $ cat folder2/h
116 $ cat folder2/h
117 e
117 e
118 1
118 1
119 2
119 2
120 3
120 3
121 4
121 4
122 5
122 5
123 f
123 f
124
124
125 Test that --interactive lift the need for --all
125 Test that --interactive lift the need for --all
126
126
127 $ echo q | hg revert -i -r 2
127 $ echo q | hg revert -i -r 2
128 reverting folder1/g (glob)
128 reverting folder1/g (glob)
129 reverting folder2/h (glob)
129 reverting folder2/h (glob)
130 diff --git a/folder1/g b/folder1/g
130 diff --git a/folder1/g b/folder1/g
131 1 hunks, 1 lines changed
131 1 hunks, 1 lines changed
132 examine changes to 'folder1/g'? [Ynesfdaq?] q
132 examine changes to 'folder1/g'? [Ynesfdaq?] q
133
133
134 abort: user quit
134 abort: user quit
135 [255]
135 [255]
136 $ rm folder1/g.orig
136 $ rm folder1/g.orig
137
137
138
138
139 $ hg update -C 6
139 $ hg update -C 6
140 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
140 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 $ hg revert -i -r 2 --all -- << EOF
141 $ hg revert -i -r 2 --all -- << EOF
142 > y
142 > y
143 > y
143 > y
144 > y
144 > y
145 > y
145 > y
146 > y
146 > y
147 > n
147 > n
148 > n
148 > n
149 > EOF
149 > EOF
150 reverting f
150 reverting f
151 reverting folder1/g (glob)
151 reverting folder1/g (glob)
152 removing folder1/i (glob)
152 removing folder1/i (glob)
153 reverting folder2/h (glob)
153 reverting folder2/h (glob)
154 diff --git a/f b/f
154 diff --git a/f b/f
155 2 hunks, 2 lines changed
155 2 hunks, 2 lines changed
156 examine changes to 'f'? [Ynesfdaq?] y
156 examine changes to 'f'? [Ynesfdaq?] y
157
157
158 @@ -1,6 +1,5 @@
158 @@ -1,6 +1,5 @@
159 -a
159 -a
160 1
160 1
161 2
161 2
162 3
162 3
163 4
163 4
164 5
164 5
165 record change 1/6 to 'f'? [Ynesfdaq?] y
165 record change 1/6 to 'f'? [Ynesfdaq?] y
166
166
167 @@ -2,6 +1,5 @@
167 @@ -2,6 +1,5 @@
168 1
168 1
169 2
169 2
170 3
170 3
171 4
171 4
172 5
172 5
173 -b
173 -b
174 record change 2/6 to 'f'? [Ynesfdaq?] y
174 record change 2/6 to 'f'? [Ynesfdaq?] y
175
175
176 diff --git a/folder1/g b/folder1/g
176 diff --git a/folder1/g b/folder1/g
177 2 hunks, 2 lines changed
177 2 hunks, 2 lines changed
178 examine changes to 'folder1/g'? [Ynesfdaq?] y
178 examine changes to 'folder1/g'? [Ynesfdaq?] y
179
179
180 @@ -1,6 +1,5 @@
180 @@ -1,6 +1,5 @@
181 -c
181 -c
182 1
182 1
183 2
183 2
184 3
184 3
185 4
185 4
186 5
186 5
187 record change 3/6 to 'folder1/g'? [Ynesfdaq?] y
187 record change 3/6 to 'folder1/g'? [Ynesfdaq?] y
188
188
189 @@ -2,6 +1,5 @@
189 @@ -2,6 +1,5 @@
190 1
190 1
191 2
191 2
192 3
192 3
193 4
193 4
194 5
194 5
195 -d
195 -d
196 record change 4/6 to 'folder1/g'? [Ynesfdaq?] n
196 record change 4/6 to 'folder1/g'? [Ynesfdaq?] n
197
197
198 diff --git a/folder2/h b/folder2/h
198 diff --git a/folder2/h b/folder2/h
199 2 hunks, 2 lines changed
199 2 hunks, 2 lines changed
200 examine changes to 'folder2/h'? [Ynesfdaq?] n
200 examine changes to 'folder2/h'? [Ynesfdaq?] n
201
201
202 $ cat f
202 $ cat f
203 1
203 1
204 2
204 2
205 3
205 3
206 4
206 4
207 5
207 5
208 $ cat folder1/g
208 $ cat folder1/g
209 1
209 1
210 2
210 2
211 3
211 3
212 4
212 4
213 5
213 5
214 d
214 d
215 $ cat folder2/h
215 $ cat folder2/h
216 e
216 e
217 1
217 1
218 2
218 2
219 3
219 3
220 4
220 4
221 5
221 5
222 f
222 f
223 $ hg st
223 $ hg st
224 M f
224 M f
225 M folder1/g
225 M folder1/g
226 R folder1/i
226 R folder1/i
227 $ hg revert --interactive f << EOF
227 $ hg revert --interactive f << EOF
228 > y
228 > y
229 > y
229 > y
230 > n
230 > n
231 > n
231 > n
232 > EOF
232 > EOF
233 diff --git a/f b/f
233 diff --git a/f b/f
234 2 hunks, 2 lines changed
234 2 hunks, 2 lines changed
235 examine changes to 'f'? [Ynesfdaq?] y
235 examine changes to 'f'? [Ynesfdaq?] y
236
236
237 @@ -1,5 +1,6 @@
237 @@ -1,5 +1,6 @@
238 +a
238 +a
239 1
239 1
240 2
240 2
241 3
241 3
242 4
242 4
243 5
243 5
244 record change 1/2 to 'f'? [Ynesfdaq?] y
244 record change 1/2 to 'f'? [Ynesfdaq?] y
245
245
246 @@ -1,5 +2,6 @@
246 @@ -1,5 +2,6 @@
247 1
247 1
248 2
248 2
249 3
249 3
250 4
250 4
251 5
251 5
252 +b
252 +b
253 record change 2/2 to 'f'? [Ynesfdaq?] n
253 record change 2/2 to 'f'? [Ynesfdaq?] n
254
254
255 $ hg st
255 $ hg st
256 M f
256 M f
257 M folder1/g
257 M folder1/g
258 R folder1/i
258 R folder1/i
259 ? f.orig
259 ? f.orig
260 $ cat f
260 $ cat f
261 a
261 a
262 1
262 1
263 2
263 2
264 3
264 3
265 4
265 4
266 5
266 5
267 $ cat f.orig
267 $ cat f.orig
268 1
268 1
269 2
269 2
270 3
270 3
271 4
271 4
272 5
272 5
273 $ rm f.orig
273 $ rm f.orig
274 $ hg update -C .
274 $ hg update -C .
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
275 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
276
276
277 Check editing files newly added by a revert
277 Check editing files newly added by a revert
278
278
279 1) Create a dummy editor changing 1 to 42
279 1) Create a dummy editor changing 1 to 42
280 $ cat > $TESTTMP/editor.sh << '__EOF__'
280 $ cat > $TESTTMP/editor.sh << '__EOF__'
281 > cat "$1" | sed "s/1/42/g" > tt
281 > cat "$1" | sed "s/1/42/g" > tt
282 > mv tt "$1"
282 > mv tt "$1"
283 > __EOF__
283 > __EOF__
284
284
285 2) Remove f
285 2) Remove f
286 $ hg rm f
286 $ hg rm f
287 $ hg commit -m "remove f"
287 $ hg commit -m "remove f"
288
288
289 3) Do another commit on top
289 3) Do another commit on top
290 $ touch k; hg add k
290 $ touch k; hg add k
291 $ hg commit -m "add k"
291 $ hg commit -m "add k"
292 $ hg st
292 $ hg st
293
293
294 4) Use interactive revert to recover f and change it on the fly
294 4) Use interactive revert to recover f and change it on the fly
295 $ HGEDITOR="\"sh\" \"${TESTTMP}/editor.sh\"" hg revert -i -r ".^^" <<EOF
295 $ HGEDITOR="\"sh\" \"${TESTTMP}/editor.sh\"" hg revert -i -r ".^^" <<EOF
296 > y
296 > y
297 > e
297 > e
298 > EOF
298 > EOF
299 adding f
299 adding f
300 removing k
300 removing k
301 diff --git a/f b/f
301 diff --git a/f b/f
302 new file mode 100644
302 new file mode 100644
303 examine changes to 'f'? [Ynesfdaq?] y
303 examine changes to 'f'? [Ynesfdaq?] y
304
304
305 @@ -0,0 +1,7 @@
305 @@ -0,0 +1,7 @@
306 +a
306 +a
307 +1
307 +1
308 +2
308 +2
309 +3
309 +3
310 +4
310 +4
311 +5
311 +5
312 +b
312 +b
313 record this change to 'f'? [Ynesfdaq?] e
313 record this change to 'f'? [Ynesfdaq?] e
314
314
315 $ cat f
315 $ cat f
316 a
316 a
317 42
317 42
318 2
318 2
319 3
319 3
320 4
320 4
321 5
321 5
322 b
322 b
323
324 Check the experimental config to invert the selection:
325 $ cat <<EOF >> $HGRCPATH
326 > [experimental]
327 > revertalternateinteractivemode=True
328 > EOF
329
330
331 $ hg up -C .
332 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
333 $ printf 'firstline\nc\n1\n2\n3\n 3\n5\nd\nlastline\n' > folder1/g
334 $ hg diff --nodates
335 diff -r 5a858e056dc0 folder1/g
336 --- a/folder1/g
337 +++ b/folder1/g
338 @@ -1,7 +1,9 @@
339 +firstline
340 c
341 1
342 2
343 3
344 -4
345 + 3
346 5
347 d
348 +lastline
349 $ hg revert -i <<EOF
350 > y
351 > y
352 > y
353 > n
354 > EOF
355 reverting folder1/g (glob)
356 diff --git a/folder1/g b/folder1/g
357 3 hunks, 3 lines changed
358 examine changes to 'folder1/g'? [Ynesfdaq?] y
359
360 @@ -1,4 +1,5 @@
361 +firstline
362 c
363 1
364 2
365 3
366 record change 1/3 to 'folder1/g'? [Ynesfdaq?] y
367
368 @@ -1,7 +2,7 @@
369 c
370 1
371 2
372 3
373 -4
374 + 3
375 5
376 d
377 record change 2/3 to 'folder1/g'? [Ynesfdaq?] y
378
379 @@ -6,2 +7,3 @@
380 5
381 d
382 +lastline
383 record change 3/3 to 'folder1/g'? [Ynesfdaq?] n
384
385 $ hg diff --nodates
386 diff -r 5a858e056dc0 folder1/g
387 --- a/folder1/g
388 +++ b/folder1/g
389 @@ -5,3 +5,4 @@
390 4
391 5
392 d
393 +lastline
General Comments 0
You need to be logged in to leave comments. Login now