##// END OF EJS Templates
patch: move 'extract' return to a dictionnary...
Pierre-Yves David -
r26547:b9be8ab6 default
parent child Browse files
Show More
@@ -1,3398 +1,3405 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, bin, nullid, nullrev, short
8 from node import hex, bin, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import repair, graphmod, revset, phases, obsolete, pathutil
13 import repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import formatter
17 import formatter
18 import crecord as crecordmod
18 import crecord as crecordmod
19 import lock as lockmod
19 import lock as lockmod
20
20
21 def ishunk(x):
21 def ishunk(x):
22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
23 return isinstance(x, hunkclasses)
23 return isinstance(x, hunkclasses)
24
24
25 def newandmodified(chunks, originalchunks):
25 def newandmodified(chunks, originalchunks):
26 newlyaddedandmodifiedfiles = set()
26 newlyaddedandmodifiedfiles = set()
27 for chunk in chunks:
27 for chunk in chunks:
28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
29 originalchunks:
29 originalchunks:
30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
31 return newlyaddedandmodifiedfiles
31 return newlyaddedandmodifiedfiles
32
32
33 def parsealiases(cmd):
33 def parsealiases(cmd):
34 return cmd.lstrip("^").split("|")
34 return cmd.lstrip("^").split("|")
35
35
36 def setupwrapcolorwrite(ui):
36 def setupwrapcolorwrite(ui):
37 # wrap ui.write so diff output can be labeled/colorized
37 # wrap ui.write so diff output can be labeled/colorized
38 def wrapwrite(orig, *args, **kw):
38 def wrapwrite(orig, *args, **kw):
39 label = kw.pop('label', '')
39 label = kw.pop('label', '')
40 for chunk, l in patch.difflabel(lambda: args):
40 for chunk, l in patch.difflabel(lambda: args):
41 orig(chunk, label=label + l)
41 orig(chunk, label=label + l)
42
42
43 oldwrite = ui.write
43 oldwrite = ui.write
44 def wrap(*args, **kwargs):
44 def wrap(*args, **kwargs):
45 return wrapwrite(oldwrite, *args, **kwargs)
45 return wrapwrite(oldwrite, *args, **kwargs)
46 setattr(ui, 'write', wrap)
46 setattr(ui, 'write', wrap)
47 return oldwrite
47 return oldwrite
48
48
49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
50 if usecurses:
50 if usecurses:
51 if testfile:
51 if testfile:
52 recordfn = crecordmod.testdecorator(testfile,
52 recordfn = crecordmod.testdecorator(testfile,
53 crecordmod.testchunkselector)
53 crecordmod.testchunkselector)
54 else:
54 else:
55 recordfn = crecordmod.chunkselector
55 recordfn = crecordmod.chunkselector
56
56
57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
58
58
59 else:
59 else:
60 return patch.filterpatch(ui, originalhunks, operation)
60 return patch.filterpatch(ui, originalhunks, operation)
61
61
62 def recordfilter(ui, originalhunks, operation=None):
62 def recordfilter(ui, originalhunks, operation=None):
63 """ Prompts the user to filter the originalhunks and return a list of
63 """ Prompts the user to filter the originalhunks and return a list of
64 selected hunks.
64 selected hunks.
65 *operation* is used for ui purposes to indicate the user
65 *operation* is used for ui purposes to indicate the user
66 what kind of filtering they are doing: reverting, commiting, shelving, etc.
66 what kind of filtering they are doing: reverting, commiting, shelving, etc.
67 *operation* has to be a translated string.
67 *operation* has to be a translated string.
68 """
68 """
69 usecurses = ui.configbool('experimental', 'crecord', False)
69 usecurses = ui.configbool('experimental', 'crecord', False)
70 testfile = ui.config('experimental', 'crecordtest', None)
70 testfile = ui.config('experimental', 'crecordtest', None)
71 oldwrite = setupwrapcolorwrite(ui)
71 oldwrite = setupwrapcolorwrite(ui)
72 try:
72 try:
73 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
73 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
74 operation)
74 operation)
75 finally:
75 finally:
76 ui.write = oldwrite
76 ui.write = oldwrite
77 return newchunks
77 return newchunks
78
78
79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
80 filterfn, *pats, **opts):
80 filterfn, *pats, **opts):
81 import merge as mergemod
81 import merge as mergemod
82
82
83 if not ui.interactive():
83 if not ui.interactive():
84 if cmdsuggest:
84 if cmdsuggest:
85 msg = _('running non-interactively, use %s instead') % cmdsuggest
85 msg = _('running non-interactively, use %s instead') % cmdsuggest
86 else:
86 else:
87 msg = _('running non-interactively')
87 msg = _('running non-interactively')
88 raise util.Abort(msg)
88 raise util.Abort(msg)
89
89
90 # make sure username is set before going interactive
90 # make sure username is set before going interactive
91 if not opts.get('user'):
91 if not opts.get('user'):
92 ui.username() # raise exception, username not provided
92 ui.username() # raise exception, username not provided
93
93
94 def recordfunc(ui, repo, message, match, opts):
94 def recordfunc(ui, repo, message, match, opts):
95 """This is generic record driver.
95 """This is generic record driver.
96
96
97 Its job is to interactively filter local changes, and
97 Its job is to interactively filter local changes, and
98 accordingly prepare working directory into a state in which the
98 accordingly prepare working directory into a state in which the
99 job can be delegated to a non-interactive commit command such as
99 job can be delegated to a non-interactive commit command such as
100 'commit' or 'qrefresh'.
100 'commit' or 'qrefresh'.
101
101
102 After the actual job is done by non-interactive command, the
102 After the actual job is done by non-interactive command, the
103 working directory is restored to its original state.
103 working directory is restored to its original state.
104
104
105 In the end we'll record interesting changes, and everything else
105 In the end we'll record interesting changes, and everything else
106 will be left in place, so the user can continue working.
106 will be left in place, so the user can continue working.
107 """
107 """
108
108
109 checkunfinished(repo, commit=True)
109 checkunfinished(repo, commit=True)
110 merge = len(repo[None].parents()) > 1
110 merge = len(repo[None].parents()) > 1
111 if merge:
111 if merge:
112 raise util.Abort(_('cannot partially commit a merge '
112 raise util.Abort(_('cannot partially commit a merge '
113 '(use "hg commit" instead)'))
113 '(use "hg commit" instead)'))
114
114
115 status = repo.status(match=match)
115 status = repo.status(match=match)
116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
117 diffopts.nodates = True
117 diffopts.nodates = True
118 diffopts.git = True
118 diffopts.git = True
119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
120 originalchunks = patch.parsepatch(originaldiff)
120 originalchunks = patch.parsepatch(originaldiff)
121
121
122 # 1. filter patch, so we have intending-to apply subset of it
122 # 1. filter patch, so we have intending-to apply subset of it
123 try:
123 try:
124 chunks = filterfn(ui, originalchunks)
124 chunks = filterfn(ui, originalchunks)
125 except patch.PatchError as err:
125 except patch.PatchError as err:
126 raise util.Abort(_('error parsing patch: %s') % err)
126 raise util.Abort(_('error parsing patch: %s') % err)
127
127
128 # We need to keep a backup of files that have been newly added and
128 # We need to keep a backup of files that have been newly added and
129 # modified during the recording process because there is a previous
129 # modified during the recording process because there is a previous
130 # version without the edit in the workdir
130 # version without the edit in the workdir
131 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
131 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
132 contenders = set()
132 contenders = set()
133 for h in chunks:
133 for h in chunks:
134 try:
134 try:
135 contenders.update(set(h.files()))
135 contenders.update(set(h.files()))
136 except AttributeError:
136 except AttributeError:
137 pass
137 pass
138
138
139 changed = status.modified + status.added + status.removed
139 changed = status.modified + status.added + status.removed
140 newfiles = [f for f in changed if f in contenders]
140 newfiles = [f for f in changed if f in contenders]
141 if not newfiles:
141 if not newfiles:
142 ui.status(_('no changes to record\n'))
142 ui.status(_('no changes to record\n'))
143 return 0
143 return 0
144
144
145 modified = set(status.modified)
145 modified = set(status.modified)
146
146
147 # 2. backup changed files, so we can restore them in the end
147 # 2. backup changed files, so we can restore them in the end
148
148
149 if backupall:
149 if backupall:
150 tobackup = changed
150 tobackup = changed
151 else:
151 else:
152 tobackup = [f for f in newfiles if f in modified or f in \
152 tobackup = [f for f in newfiles if f in modified or f in \
153 newlyaddedandmodifiedfiles]
153 newlyaddedandmodifiedfiles]
154 backups = {}
154 backups = {}
155 if tobackup:
155 if tobackup:
156 backupdir = repo.join('record-backups')
156 backupdir = repo.join('record-backups')
157 try:
157 try:
158 os.mkdir(backupdir)
158 os.mkdir(backupdir)
159 except OSError as err:
159 except OSError as err:
160 if err.errno != errno.EEXIST:
160 if err.errno != errno.EEXIST:
161 raise
161 raise
162 try:
162 try:
163 # backup continues
163 # backup continues
164 for f in tobackup:
164 for f in tobackup:
165 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
165 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
166 dir=backupdir)
166 dir=backupdir)
167 os.close(fd)
167 os.close(fd)
168 ui.debug('backup %r as %r\n' % (f, tmpname))
168 ui.debug('backup %r as %r\n' % (f, tmpname))
169 util.copyfile(repo.wjoin(f), tmpname)
169 util.copyfile(repo.wjoin(f), tmpname)
170 shutil.copystat(repo.wjoin(f), tmpname)
170 shutil.copystat(repo.wjoin(f), tmpname)
171 backups[f] = tmpname
171 backups[f] = tmpname
172
172
173 fp = cStringIO.StringIO()
173 fp = cStringIO.StringIO()
174 for c in chunks:
174 for c in chunks:
175 fname = c.filename()
175 fname = c.filename()
176 if fname in backups:
176 if fname in backups:
177 c.write(fp)
177 c.write(fp)
178 dopatch = fp.tell()
178 dopatch = fp.tell()
179 fp.seek(0)
179 fp.seek(0)
180
180
181 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
181 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
182 # 3a. apply filtered patch to clean repo (clean)
182 # 3a. apply filtered patch to clean repo (clean)
183 if backups:
183 if backups:
184 # Equivalent to hg.revert
184 # Equivalent to hg.revert
185 choices = lambda key: key in backups
185 choices = lambda key: key in backups
186 mergemod.update(repo, repo.dirstate.p1(),
186 mergemod.update(repo, repo.dirstate.p1(),
187 False, True, choices)
187 False, True, choices)
188
188
189 # 3b. (apply)
189 # 3b. (apply)
190 if dopatch:
190 if dopatch:
191 try:
191 try:
192 ui.debug('applying patch\n')
192 ui.debug('applying patch\n')
193 ui.debug(fp.getvalue())
193 ui.debug(fp.getvalue())
194 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
194 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
195 except patch.PatchError as err:
195 except patch.PatchError as err:
196 raise util.Abort(str(err))
196 raise util.Abort(str(err))
197 del fp
197 del fp
198
198
199 # 4. We prepared working directory according to filtered
199 # 4. We prepared working directory according to filtered
200 # patch. Now is the time to delegate the job to
200 # patch. Now is the time to delegate the job to
201 # commit/qrefresh or the like!
201 # commit/qrefresh or the like!
202
202
203 # Make all of the pathnames absolute.
203 # Make all of the pathnames absolute.
204 newfiles = [repo.wjoin(nf) for nf in newfiles]
204 newfiles = [repo.wjoin(nf) for nf in newfiles]
205 return commitfunc(ui, repo, *newfiles, **opts)
205 return commitfunc(ui, repo, *newfiles, **opts)
206 finally:
206 finally:
207 # 5. finally restore backed-up files
207 # 5. finally restore backed-up files
208 try:
208 try:
209 dirstate = repo.dirstate
209 dirstate = repo.dirstate
210 for realname, tmpname in backups.iteritems():
210 for realname, tmpname in backups.iteritems():
211 ui.debug('restoring %r to %r\n' % (tmpname, realname))
211 ui.debug('restoring %r to %r\n' % (tmpname, realname))
212
212
213 if dirstate[realname] == 'n':
213 if dirstate[realname] == 'n':
214 # without normallookup, restoring timestamp
214 # without normallookup, restoring timestamp
215 # may cause partially committed files
215 # may cause partially committed files
216 # to be treated as unmodified
216 # to be treated as unmodified
217 dirstate.normallookup(realname)
217 dirstate.normallookup(realname)
218
218
219 util.copyfile(tmpname, repo.wjoin(realname))
219 util.copyfile(tmpname, repo.wjoin(realname))
220 # Our calls to copystat() here and above are a
220 # Our calls to copystat() here and above are a
221 # hack to trick any editors that have f open that
221 # hack to trick any editors that have f open that
222 # we haven't modified them.
222 # we haven't modified them.
223 #
223 #
224 # Also note that this racy as an editor could
224 # Also note that this racy as an editor could
225 # notice the file's mtime before we've finished
225 # notice the file's mtime before we've finished
226 # writing it.
226 # writing it.
227 shutil.copystat(tmpname, repo.wjoin(realname))
227 shutil.copystat(tmpname, repo.wjoin(realname))
228 os.unlink(tmpname)
228 os.unlink(tmpname)
229 if tobackup:
229 if tobackup:
230 os.rmdir(backupdir)
230 os.rmdir(backupdir)
231 except OSError:
231 except OSError:
232 pass
232 pass
233
233
234 def recordinwlock(ui, repo, message, match, opts):
234 def recordinwlock(ui, repo, message, match, opts):
235 wlock = repo.wlock()
235 wlock = repo.wlock()
236 try:
236 try:
237 return recordfunc(ui, repo, message, match, opts)
237 return recordfunc(ui, repo, message, match, opts)
238 finally:
238 finally:
239 wlock.release()
239 wlock.release()
240
240
241 return commit(ui, repo, recordinwlock, pats, opts)
241 return commit(ui, repo, recordinwlock, pats, opts)
242
242
243 def findpossible(cmd, table, strict=False):
243 def findpossible(cmd, table, strict=False):
244 """
244 """
245 Return cmd -> (aliases, command table entry)
245 Return cmd -> (aliases, command table entry)
246 for each matching command.
246 for each matching command.
247 Return debug commands (or their aliases) only if no normal command matches.
247 Return debug commands (or their aliases) only if no normal command matches.
248 """
248 """
249 choice = {}
249 choice = {}
250 debugchoice = {}
250 debugchoice = {}
251
251
252 if cmd in table:
252 if cmd in table:
253 # short-circuit exact matches, "log" alias beats "^log|history"
253 # short-circuit exact matches, "log" alias beats "^log|history"
254 keys = [cmd]
254 keys = [cmd]
255 else:
255 else:
256 keys = table.keys()
256 keys = table.keys()
257
257
258 allcmds = []
258 allcmds = []
259 for e in keys:
259 for e in keys:
260 aliases = parsealiases(e)
260 aliases = parsealiases(e)
261 allcmds.extend(aliases)
261 allcmds.extend(aliases)
262 found = None
262 found = None
263 if cmd in aliases:
263 if cmd in aliases:
264 found = cmd
264 found = cmd
265 elif not strict:
265 elif not strict:
266 for a in aliases:
266 for a in aliases:
267 if a.startswith(cmd):
267 if a.startswith(cmd):
268 found = a
268 found = a
269 break
269 break
270 if found is not None:
270 if found is not None:
271 if aliases[0].startswith("debug") or found.startswith("debug"):
271 if aliases[0].startswith("debug") or found.startswith("debug"):
272 debugchoice[found] = (aliases, table[e])
272 debugchoice[found] = (aliases, table[e])
273 else:
273 else:
274 choice[found] = (aliases, table[e])
274 choice[found] = (aliases, table[e])
275
275
276 if not choice and debugchoice:
276 if not choice and debugchoice:
277 choice = debugchoice
277 choice = debugchoice
278
278
279 return choice, allcmds
279 return choice, allcmds
280
280
281 def findcmd(cmd, table, strict=True):
281 def findcmd(cmd, table, strict=True):
282 """Return (aliases, command table entry) for command string."""
282 """Return (aliases, command table entry) for command string."""
283 choice, allcmds = findpossible(cmd, table, strict)
283 choice, allcmds = findpossible(cmd, table, strict)
284
284
285 if cmd in choice:
285 if cmd in choice:
286 return choice[cmd]
286 return choice[cmd]
287
287
288 if len(choice) > 1:
288 if len(choice) > 1:
289 clist = choice.keys()
289 clist = choice.keys()
290 clist.sort()
290 clist.sort()
291 raise error.AmbiguousCommand(cmd, clist)
291 raise error.AmbiguousCommand(cmd, clist)
292
292
293 if choice:
293 if choice:
294 return choice.values()[0]
294 return choice.values()[0]
295
295
296 raise error.UnknownCommand(cmd, allcmds)
296 raise error.UnknownCommand(cmd, allcmds)
297
297
298 def findrepo(p):
298 def findrepo(p):
299 while not os.path.isdir(os.path.join(p, ".hg")):
299 while not os.path.isdir(os.path.join(p, ".hg")):
300 oldp, p = p, os.path.dirname(p)
300 oldp, p = p, os.path.dirname(p)
301 if p == oldp:
301 if p == oldp:
302 return None
302 return None
303
303
304 return p
304 return p
305
305
306 def bailifchanged(repo, merge=True):
306 def bailifchanged(repo, merge=True):
307 if merge and repo.dirstate.p2() != nullid:
307 if merge and repo.dirstate.p2() != nullid:
308 raise util.Abort(_('outstanding uncommitted merge'))
308 raise util.Abort(_('outstanding uncommitted merge'))
309 modified, added, removed, deleted = repo.status()[:4]
309 modified, added, removed, deleted = repo.status()[:4]
310 if modified or added or removed or deleted:
310 if modified or added or removed or deleted:
311 raise util.Abort(_('uncommitted changes'))
311 raise util.Abort(_('uncommitted changes'))
312 ctx = repo[None]
312 ctx = repo[None]
313 for s in sorted(ctx.substate):
313 for s in sorted(ctx.substate):
314 ctx.sub(s).bailifchanged()
314 ctx.sub(s).bailifchanged()
315
315
316 def logmessage(ui, opts):
316 def logmessage(ui, opts):
317 """ get the log message according to -m and -l option """
317 """ get the log message according to -m and -l option """
318 message = opts.get('message')
318 message = opts.get('message')
319 logfile = opts.get('logfile')
319 logfile = opts.get('logfile')
320
320
321 if message and logfile:
321 if message and logfile:
322 raise util.Abort(_('options --message and --logfile are mutually '
322 raise util.Abort(_('options --message and --logfile are mutually '
323 'exclusive'))
323 'exclusive'))
324 if not message and logfile:
324 if not message and logfile:
325 try:
325 try:
326 if logfile == '-':
326 if logfile == '-':
327 message = ui.fin.read()
327 message = ui.fin.read()
328 else:
328 else:
329 message = '\n'.join(util.readfile(logfile).splitlines())
329 message = '\n'.join(util.readfile(logfile).splitlines())
330 except IOError as inst:
330 except IOError as inst:
331 raise util.Abort(_("can't read commit message '%s': %s") %
331 raise util.Abort(_("can't read commit message '%s': %s") %
332 (logfile, inst.strerror))
332 (logfile, inst.strerror))
333 return message
333 return message
334
334
335 def mergeeditform(ctxorbool, baseformname):
335 def mergeeditform(ctxorbool, baseformname):
336 """return appropriate editform name (referencing a committemplate)
336 """return appropriate editform name (referencing a committemplate)
337
337
338 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
338 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
339 merging is committed.
339 merging is committed.
340
340
341 This returns baseformname with '.merge' appended if it is a merge,
341 This returns baseformname with '.merge' appended if it is a merge,
342 otherwise '.normal' is appended.
342 otherwise '.normal' is appended.
343 """
343 """
344 if isinstance(ctxorbool, bool):
344 if isinstance(ctxorbool, bool):
345 if ctxorbool:
345 if ctxorbool:
346 return baseformname + ".merge"
346 return baseformname + ".merge"
347 elif 1 < len(ctxorbool.parents()):
347 elif 1 < len(ctxorbool.parents()):
348 return baseformname + ".merge"
348 return baseformname + ".merge"
349
349
350 return baseformname + ".normal"
350 return baseformname + ".normal"
351
351
352 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
352 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
353 editform='', **opts):
353 editform='', **opts):
354 """get appropriate commit message editor according to '--edit' option
354 """get appropriate commit message editor according to '--edit' option
355
355
356 'finishdesc' is a function to be called with edited commit message
356 'finishdesc' is a function to be called with edited commit message
357 (= 'description' of the new changeset) just after editing, but
357 (= 'description' of the new changeset) just after editing, but
358 before checking empty-ness. It should return actual text to be
358 before checking empty-ness. It should return actual text to be
359 stored into history. This allows to change description before
359 stored into history. This allows to change description before
360 storing.
360 storing.
361
361
362 'extramsg' is a extra message to be shown in the editor instead of
362 'extramsg' is a extra message to be shown in the editor instead of
363 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
363 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
364 is automatically added.
364 is automatically added.
365
365
366 'editform' is a dot-separated list of names, to distinguish
366 'editform' is a dot-separated list of names, to distinguish
367 the purpose of commit text editing.
367 the purpose of commit text editing.
368
368
369 'getcommiteditor' returns 'commitforceeditor' regardless of
369 'getcommiteditor' returns 'commitforceeditor' regardless of
370 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
370 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
371 they are specific for usage in MQ.
371 they are specific for usage in MQ.
372 """
372 """
373 if edit or finishdesc or extramsg:
373 if edit or finishdesc or extramsg:
374 return lambda r, c, s: commitforceeditor(r, c, s,
374 return lambda r, c, s: commitforceeditor(r, c, s,
375 finishdesc=finishdesc,
375 finishdesc=finishdesc,
376 extramsg=extramsg,
376 extramsg=extramsg,
377 editform=editform)
377 editform=editform)
378 elif editform:
378 elif editform:
379 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
379 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
380 else:
380 else:
381 return commiteditor
381 return commiteditor
382
382
383 def loglimit(opts):
383 def loglimit(opts):
384 """get the log limit according to option -l/--limit"""
384 """get the log limit according to option -l/--limit"""
385 limit = opts.get('limit')
385 limit = opts.get('limit')
386 if limit:
386 if limit:
387 try:
387 try:
388 limit = int(limit)
388 limit = int(limit)
389 except ValueError:
389 except ValueError:
390 raise util.Abort(_('limit must be a positive integer'))
390 raise util.Abort(_('limit must be a positive integer'))
391 if limit <= 0:
391 if limit <= 0:
392 raise util.Abort(_('limit must be positive'))
392 raise util.Abort(_('limit must be positive'))
393 else:
393 else:
394 limit = None
394 limit = None
395 return limit
395 return limit
396
396
397 def makefilename(repo, pat, node, desc=None,
397 def makefilename(repo, pat, node, desc=None,
398 total=None, seqno=None, revwidth=None, pathname=None):
398 total=None, seqno=None, revwidth=None, pathname=None):
399 node_expander = {
399 node_expander = {
400 'H': lambda: hex(node),
400 'H': lambda: hex(node),
401 'R': lambda: str(repo.changelog.rev(node)),
401 'R': lambda: str(repo.changelog.rev(node)),
402 'h': lambda: short(node),
402 'h': lambda: short(node),
403 'm': lambda: re.sub('[^\w]', '_', str(desc))
403 'm': lambda: re.sub('[^\w]', '_', str(desc))
404 }
404 }
405 expander = {
405 expander = {
406 '%': lambda: '%',
406 '%': lambda: '%',
407 'b': lambda: os.path.basename(repo.root),
407 'b': lambda: os.path.basename(repo.root),
408 }
408 }
409
409
410 try:
410 try:
411 if node:
411 if node:
412 expander.update(node_expander)
412 expander.update(node_expander)
413 if node:
413 if node:
414 expander['r'] = (lambda:
414 expander['r'] = (lambda:
415 str(repo.changelog.rev(node)).zfill(revwidth or 0))
415 str(repo.changelog.rev(node)).zfill(revwidth or 0))
416 if total is not None:
416 if total is not None:
417 expander['N'] = lambda: str(total)
417 expander['N'] = lambda: str(total)
418 if seqno is not None:
418 if seqno is not None:
419 expander['n'] = lambda: str(seqno)
419 expander['n'] = lambda: str(seqno)
420 if total is not None and seqno is not None:
420 if total is not None and seqno is not None:
421 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
421 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
422 if pathname is not None:
422 if pathname is not None:
423 expander['s'] = lambda: os.path.basename(pathname)
423 expander['s'] = lambda: os.path.basename(pathname)
424 expander['d'] = lambda: os.path.dirname(pathname) or '.'
424 expander['d'] = lambda: os.path.dirname(pathname) or '.'
425 expander['p'] = lambda: pathname
425 expander['p'] = lambda: pathname
426
426
427 newname = []
427 newname = []
428 patlen = len(pat)
428 patlen = len(pat)
429 i = 0
429 i = 0
430 while i < patlen:
430 while i < patlen:
431 c = pat[i]
431 c = pat[i]
432 if c == '%':
432 if c == '%':
433 i += 1
433 i += 1
434 c = pat[i]
434 c = pat[i]
435 c = expander[c]()
435 c = expander[c]()
436 newname.append(c)
436 newname.append(c)
437 i += 1
437 i += 1
438 return ''.join(newname)
438 return ''.join(newname)
439 except KeyError as inst:
439 except KeyError as inst:
440 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
440 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
441 inst.args[0])
441 inst.args[0])
442
442
443 def makefileobj(repo, pat, node=None, desc=None, total=None,
443 def makefileobj(repo, pat, node=None, desc=None, total=None,
444 seqno=None, revwidth=None, mode='wb', modemap=None,
444 seqno=None, revwidth=None, mode='wb', modemap=None,
445 pathname=None):
445 pathname=None):
446
446
447 writable = mode not in ('r', 'rb')
447 writable = mode not in ('r', 'rb')
448
448
449 if not pat or pat == '-':
449 if not pat or pat == '-':
450 if writable:
450 if writable:
451 fp = repo.ui.fout
451 fp = repo.ui.fout
452 else:
452 else:
453 fp = repo.ui.fin
453 fp = repo.ui.fin
454 if util.safehasattr(fp, 'fileno'):
454 if util.safehasattr(fp, 'fileno'):
455 return os.fdopen(os.dup(fp.fileno()), mode)
455 return os.fdopen(os.dup(fp.fileno()), mode)
456 else:
456 else:
457 # if this fp can't be duped properly, return
457 # if this fp can't be duped properly, return
458 # a dummy object that can be closed
458 # a dummy object that can be closed
459 class wrappedfileobj(object):
459 class wrappedfileobj(object):
460 noop = lambda x: None
460 noop = lambda x: None
461 def __init__(self, f):
461 def __init__(self, f):
462 self.f = f
462 self.f = f
463 def __getattr__(self, attr):
463 def __getattr__(self, attr):
464 if attr == 'close':
464 if attr == 'close':
465 return self.noop
465 return self.noop
466 else:
466 else:
467 return getattr(self.f, attr)
467 return getattr(self.f, attr)
468
468
469 return wrappedfileobj(fp)
469 return wrappedfileobj(fp)
470 if util.safehasattr(pat, 'write') and writable:
470 if util.safehasattr(pat, 'write') and writable:
471 return pat
471 return pat
472 if util.safehasattr(pat, 'read') and 'r' in mode:
472 if util.safehasattr(pat, 'read') and 'r' in mode:
473 return pat
473 return pat
474 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
474 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
475 if modemap is not None:
475 if modemap is not None:
476 mode = modemap.get(fn, mode)
476 mode = modemap.get(fn, mode)
477 if mode == 'wb':
477 if mode == 'wb':
478 modemap[fn] = 'ab'
478 modemap[fn] = 'ab'
479 return open(fn, mode)
479 return open(fn, mode)
480
480
481 def openrevlog(repo, cmd, file_, opts):
481 def openrevlog(repo, cmd, file_, opts):
482 """opens the changelog, manifest, a filelog or a given revlog"""
482 """opens the changelog, manifest, a filelog or a given revlog"""
483 cl = opts['changelog']
483 cl = opts['changelog']
484 mf = opts['manifest']
484 mf = opts['manifest']
485 dir = opts['dir']
485 dir = opts['dir']
486 msg = None
486 msg = None
487 if cl and mf:
487 if cl and mf:
488 msg = _('cannot specify --changelog and --manifest at the same time')
488 msg = _('cannot specify --changelog and --manifest at the same time')
489 elif cl and dir:
489 elif cl and dir:
490 msg = _('cannot specify --changelog and --dir at the same time')
490 msg = _('cannot specify --changelog and --dir at the same time')
491 elif cl or mf:
491 elif cl or mf:
492 if file_:
492 if file_:
493 msg = _('cannot specify filename with --changelog or --manifest')
493 msg = _('cannot specify filename with --changelog or --manifest')
494 elif not repo:
494 elif not repo:
495 msg = _('cannot specify --changelog or --manifest or --dir '
495 msg = _('cannot specify --changelog or --manifest or --dir '
496 'without a repository')
496 'without a repository')
497 if msg:
497 if msg:
498 raise util.Abort(msg)
498 raise util.Abort(msg)
499
499
500 r = None
500 r = None
501 if repo:
501 if repo:
502 if cl:
502 if cl:
503 r = repo.unfiltered().changelog
503 r = repo.unfiltered().changelog
504 elif dir:
504 elif dir:
505 if 'treemanifest' not in repo.requirements:
505 if 'treemanifest' not in repo.requirements:
506 raise util.Abort(_("--dir can only be used on repos with "
506 raise util.Abort(_("--dir can only be used on repos with "
507 "treemanifest enabled"))
507 "treemanifest enabled"))
508 dirlog = repo.dirlog(file_)
508 dirlog = repo.dirlog(file_)
509 if len(dirlog):
509 if len(dirlog):
510 r = dirlog
510 r = dirlog
511 elif mf:
511 elif mf:
512 r = repo.manifest
512 r = repo.manifest
513 elif file_:
513 elif file_:
514 filelog = repo.file(file_)
514 filelog = repo.file(file_)
515 if len(filelog):
515 if len(filelog):
516 r = filelog
516 r = filelog
517 if not r:
517 if not r:
518 if not file_:
518 if not file_:
519 raise error.CommandError(cmd, _('invalid arguments'))
519 raise error.CommandError(cmd, _('invalid arguments'))
520 if not os.path.isfile(file_):
520 if not os.path.isfile(file_):
521 raise util.Abort(_("revlog '%s' not found") % file_)
521 raise util.Abort(_("revlog '%s' not found") % file_)
522 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
522 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
523 file_[:-2] + ".i")
523 file_[:-2] + ".i")
524 return r
524 return r
525
525
526 def copy(ui, repo, pats, opts, rename=False):
526 def copy(ui, repo, pats, opts, rename=False):
527 # called with the repo lock held
527 # called with the repo lock held
528 #
528 #
529 # hgsep => pathname that uses "/" to separate directories
529 # hgsep => pathname that uses "/" to separate directories
530 # ossep => pathname that uses os.sep to separate directories
530 # ossep => pathname that uses os.sep to separate directories
531 cwd = repo.getcwd()
531 cwd = repo.getcwd()
532 targets = {}
532 targets = {}
533 after = opts.get("after")
533 after = opts.get("after")
534 dryrun = opts.get("dry_run")
534 dryrun = opts.get("dry_run")
535 wctx = repo[None]
535 wctx = repo[None]
536
536
537 def walkpat(pat):
537 def walkpat(pat):
538 srcs = []
538 srcs = []
539 if after:
539 if after:
540 badstates = '?'
540 badstates = '?'
541 else:
541 else:
542 badstates = '?r'
542 badstates = '?r'
543 m = scmutil.match(repo[None], [pat], opts, globbed=True)
543 m = scmutil.match(repo[None], [pat], opts, globbed=True)
544 for abs in repo.walk(m):
544 for abs in repo.walk(m):
545 state = repo.dirstate[abs]
545 state = repo.dirstate[abs]
546 rel = m.rel(abs)
546 rel = m.rel(abs)
547 exact = m.exact(abs)
547 exact = m.exact(abs)
548 if state in badstates:
548 if state in badstates:
549 if exact and state == '?':
549 if exact and state == '?':
550 ui.warn(_('%s: not copying - file is not managed\n') % rel)
550 ui.warn(_('%s: not copying - file is not managed\n') % rel)
551 if exact and state == 'r':
551 if exact and state == 'r':
552 ui.warn(_('%s: not copying - file has been marked for'
552 ui.warn(_('%s: not copying - file has been marked for'
553 ' remove\n') % rel)
553 ' remove\n') % rel)
554 continue
554 continue
555 # abs: hgsep
555 # abs: hgsep
556 # rel: ossep
556 # rel: ossep
557 srcs.append((abs, rel, exact))
557 srcs.append((abs, rel, exact))
558 return srcs
558 return srcs
559
559
560 # abssrc: hgsep
560 # abssrc: hgsep
561 # relsrc: ossep
561 # relsrc: ossep
562 # otarget: ossep
562 # otarget: ossep
563 def copyfile(abssrc, relsrc, otarget, exact):
563 def copyfile(abssrc, relsrc, otarget, exact):
564 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
564 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
565 if '/' in abstarget:
565 if '/' in abstarget:
566 # We cannot normalize abstarget itself, this would prevent
566 # We cannot normalize abstarget itself, this would prevent
567 # case only renames, like a => A.
567 # case only renames, like a => A.
568 abspath, absname = abstarget.rsplit('/', 1)
568 abspath, absname = abstarget.rsplit('/', 1)
569 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
569 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
570 reltarget = repo.pathto(abstarget, cwd)
570 reltarget = repo.pathto(abstarget, cwd)
571 target = repo.wjoin(abstarget)
571 target = repo.wjoin(abstarget)
572 src = repo.wjoin(abssrc)
572 src = repo.wjoin(abssrc)
573 state = repo.dirstate[abstarget]
573 state = repo.dirstate[abstarget]
574
574
575 scmutil.checkportable(ui, abstarget)
575 scmutil.checkportable(ui, abstarget)
576
576
577 # check for collisions
577 # check for collisions
578 prevsrc = targets.get(abstarget)
578 prevsrc = targets.get(abstarget)
579 if prevsrc is not None:
579 if prevsrc is not None:
580 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
580 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
581 (reltarget, repo.pathto(abssrc, cwd),
581 (reltarget, repo.pathto(abssrc, cwd),
582 repo.pathto(prevsrc, cwd)))
582 repo.pathto(prevsrc, cwd)))
583 return
583 return
584
584
585 # check for overwrites
585 # check for overwrites
586 exists = os.path.lexists(target)
586 exists = os.path.lexists(target)
587 samefile = False
587 samefile = False
588 if exists and abssrc != abstarget:
588 if exists and abssrc != abstarget:
589 if (repo.dirstate.normalize(abssrc) ==
589 if (repo.dirstate.normalize(abssrc) ==
590 repo.dirstate.normalize(abstarget)):
590 repo.dirstate.normalize(abstarget)):
591 if not rename:
591 if not rename:
592 ui.warn(_("%s: can't copy - same file\n") % reltarget)
592 ui.warn(_("%s: can't copy - same file\n") % reltarget)
593 return
593 return
594 exists = False
594 exists = False
595 samefile = True
595 samefile = True
596
596
597 if not after and exists or after and state in 'mn':
597 if not after and exists or after and state in 'mn':
598 if not opts['force']:
598 if not opts['force']:
599 ui.warn(_('%s: not overwriting - file exists\n') %
599 ui.warn(_('%s: not overwriting - file exists\n') %
600 reltarget)
600 reltarget)
601 return
601 return
602
602
603 if after:
603 if after:
604 if not exists:
604 if not exists:
605 if rename:
605 if rename:
606 ui.warn(_('%s: not recording move - %s does not exist\n') %
606 ui.warn(_('%s: not recording move - %s does not exist\n') %
607 (relsrc, reltarget))
607 (relsrc, reltarget))
608 else:
608 else:
609 ui.warn(_('%s: not recording copy - %s does not exist\n') %
609 ui.warn(_('%s: not recording copy - %s does not exist\n') %
610 (relsrc, reltarget))
610 (relsrc, reltarget))
611 return
611 return
612 elif not dryrun:
612 elif not dryrun:
613 try:
613 try:
614 if exists:
614 if exists:
615 os.unlink(target)
615 os.unlink(target)
616 targetdir = os.path.dirname(target) or '.'
616 targetdir = os.path.dirname(target) or '.'
617 if not os.path.isdir(targetdir):
617 if not os.path.isdir(targetdir):
618 os.makedirs(targetdir)
618 os.makedirs(targetdir)
619 if samefile:
619 if samefile:
620 tmp = target + "~hgrename"
620 tmp = target + "~hgrename"
621 os.rename(src, tmp)
621 os.rename(src, tmp)
622 os.rename(tmp, target)
622 os.rename(tmp, target)
623 else:
623 else:
624 util.copyfile(src, target)
624 util.copyfile(src, target)
625 srcexists = True
625 srcexists = True
626 except IOError as inst:
626 except IOError as inst:
627 if inst.errno == errno.ENOENT:
627 if inst.errno == errno.ENOENT:
628 ui.warn(_('%s: deleted in working directory\n') % relsrc)
628 ui.warn(_('%s: deleted in working directory\n') % relsrc)
629 srcexists = False
629 srcexists = False
630 else:
630 else:
631 ui.warn(_('%s: cannot copy - %s\n') %
631 ui.warn(_('%s: cannot copy - %s\n') %
632 (relsrc, inst.strerror))
632 (relsrc, inst.strerror))
633 return True # report a failure
633 return True # report a failure
634
634
635 if ui.verbose or not exact:
635 if ui.verbose or not exact:
636 if rename:
636 if rename:
637 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
637 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
638 else:
638 else:
639 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
639 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
640
640
641 targets[abstarget] = abssrc
641 targets[abstarget] = abssrc
642
642
643 # fix up dirstate
643 # fix up dirstate
644 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
644 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
645 dryrun=dryrun, cwd=cwd)
645 dryrun=dryrun, cwd=cwd)
646 if rename and not dryrun:
646 if rename and not dryrun:
647 if not after and srcexists and not samefile:
647 if not after and srcexists and not samefile:
648 util.unlinkpath(repo.wjoin(abssrc))
648 util.unlinkpath(repo.wjoin(abssrc))
649 wctx.forget([abssrc])
649 wctx.forget([abssrc])
650
650
651 # pat: ossep
651 # pat: ossep
652 # dest ossep
652 # dest ossep
653 # srcs: list of (hgsep, hgsep, ossep, bool)
653 # srcs: list of (hgsep, hgsep, ossep, bool)
654 # return: function that takes hgsep and returns ossep
654 # return: function that takes hgsep and returns ossep
655 def targetpathfn(pat, dest, srcs):
655 def targetpathfn(pat, dest, srcs):
656 if os.path.isdir(pat):
656 if os.path.isdir(pat):
657 abspfx = pathutil.canonpath(repo.root, cwd, pat)
657 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 abspfx = util.localpath(abspfx)
658 abspfx = util.localpath(abspfx)
659 if destdirexists:
659 if destdirexists:
660 striplen = len(os.path.split(abspfx)[0])
660 striplen = len(os.path.split(abspfx)[0])
661 else:
661 else:
662 striplen = len(abspfx)
662 striplen = len(abspfx)
663 if striplen:
663 if striplen:
664 striplen += len(os.sep)
664 striplen += len(os.sep)
665 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
665 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
666 elif destdirexists:
666 elif destdirexists:
667 res = lambda p: os.path.join(dest,
667 res = lambda p: os.path.join(dest,
668 os.path.basename(util.localpath(p)))
668 os.path.basename(util.localpath(p)))
669 else:
669 else:
670 res = lambda p: dest
670 res = lambda p: dest
671 return res
671 return res
672
672
673 # pat: ossep
673 # pat: ossep
674 # dest ossep
674 # dest ossep
675 # srcs: list of (hgsep, hgsep, ossep, bool)
675 # srcs: list of (hgsep, hgsep, ossep, bool)
676 # return: function that takes hgsep and returns ossep
676 # return: function that takes hgsep and returns ossep
677 def targetpathafterfn(pat, dest, srcs):
677 def targetpathafterfn(pat, dest, srcs):
678 if matchmod.patkind(pat):
678 if matchmod.patkind(pat):
679 # a mercurial pattern
679 # a mercurial pattern
680 res = lambda p: os.path.join(dest,
680 res = lambda p: os.path.join(dest,
681 os.path.basename(util.localpath(p)))
681 os.path.basename(util.localpath(p)))
682 else:
682 else:
683 abspfx = pathutil.canonpath(repo.root, cwd, pat)
683 abspfx = pathutil.canonpath(repo.root, cwd, pat)
684 if len(abspfx) < len(srcs[0][0]):
684 if len(abspfx) < len(srcs[0][0]):
685 # A directory. Either the target path contains the last
685 # A directory. Either the target path contains the last
686 # component of the source path or it does not.
686 # component of the source path or it does not.
687 def evalpath(striplen):
687 def evalpath(striplen):
688 score = 0
688 score = 0
689 for s in srcs:
689 for s in srcs:
690 t = os.path.join(dest, util.localpath(s[0])[striplen:])
690 t = os.path.join(dest, util.localpath(s[0])[striplen:])
691 if os.path.lexists(t):
691 if os.path.lexists(t):
692 score += 1
692 score += 1
693 return score
693 return score
694
694
695 abspfx = util.localpath(abspfx)
695 abspfx = util.localpath(abspfx)
696 striplen = len(abspfx)
696 striplen = len(abspfx)
697 if striplen:
697 if striplen:
698 striplen += len(os.sep)
698 striplen += len(os.sep)
699 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
699 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
700 score = evalpath(striplen)
700 score = evalpath(striplen)
701 striplen1 = len(os.path.split(abspfx)[0])
701 striplen1 = len(os.path.split(abspfx)[0])
702 if striplen1:
702 if striplen1:
703 striplen1 += len(os.sep)
703 striplen1 += len(os.sep)
704 if evalpath(striplen1) > score:
704 if evalpath(striplen1) > score:
705 striplen = striplen1
705 striplen = striplen1
706 res = lambda p: os.path.join(dest,
706 res = lambda p: os.path.join(dest,
707 util.localpath(p)[striplen:])
707 util.localpath(p)[striplen:])
708 else:
708 else:
709 # a file
709 # a file
710 if destdirexists:
710 if destdirexists:
711 res = lambda p: os.path.join(dest,
711 res = lambda p: os.path.join(dest,
712 os.path.basename(util.localpath(p)))
712 os.path.basename(util.localpath(p)))
713 else:
713 else:
714 res = lambda p: dest
714 res = lambda p: dest
715 return res
715 return res
716
716
717 pats = scmutil.expandpats(pats)
717 pats = scmutil.expandpats(pats)
718 if not pats:
718 if not pats:
719 raise util.Abort(_('no source or destination specified'))
719 raise util.Abort(_('no source or destination specified'))
720 if len(pats) == 1:
720 if len(pats) == 1:
721 raise util.Abort(_('no destination specified'))
721 raise util.Abort(_('no destination specified'))
722 dest = pats.pop()
722 dest = pats.pop()
723 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
723 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
724 if not destdirexists:
724 if not destdirexists:
725 if len(pats) > 1 or matchmod.patkind(pats[0]):
725 if len(pats) > 1 or matchmod.patkind(pats[0]):
726 raise util.Abort(_('with multiple sources, destination must be an '
726 raise util.Abort(_('with multiple sources, destination must be an '
727 'existing directory'))
727 'existing directory'))
728 if util.endswithsep(dest):
728 if util.endswithsep(dest):
729 raise util.Abort(_('destination %s is not a directory') % dest)
729 raise util.Abort(_('destination %s is not a directory') % dest)
730
730
731 tfn = targetpathfn
731 tfn = targetpathfn
732 if after:
732 if after:
733 tfn = targetpathafterfn
733 tfn = targetpathafterfn
734 copylist = []
734 copylist = []
735 for pat in pats:
735 for pat in pats:
736 srcs = walkpat(pat)
736 srcs = walkpat(pat)
737 if not srcs:
737 if not srcs:
738 continue
738 continue
739 copylist.append((tfn(pat, dest, srcs), srcs))
739 copylist.append((tfn(pat, dest, srcs), srcs))
740 if not copylist:
740 if not copylist:
741 raise util.Abort(_('no files to copy'))
741 raise util.Abort(_('no files to copy'))
742
742
743 errors = 0
743 errors = 0
744 for targetpath, srcs in copylist:
744 for targetpath, srcs in copylist:
745 for abssrc, relsrc, exact in srcs:
745 for abssrc, relsrc, exact in srcs:
746 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
746 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
747 errors += 1
747 errors += 1
748
748
749 if errors:
749 if errors:
750 ui.warn(_('(consider using --after)\n'))
750 ui.warn(_('(consider using --after)\n'))
751
751
752 return errors != 0
752 return errors != 0
753
753
754 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
754 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
755 runargs=None, appendpid=False):
755 runargs=None, appendpid=False):
756 '''Run a command as a service.'''
756 '''Run a command as a service.'''
757
757
758 def writepid(pid):
758 def writepid(pid):
759 if opts['pid_file']:
759 if opts['pid_file']:
760 if appendpid:
760 if appendpid:
761 mode = 'a'
761 mode = 'a'
762 else:
762 else:
763 mode = 'w'
763 mode = 'w'
764 fp = open(opts['pid_file'], mode)
764 fp = open(opts['pid_file'], mode)
765 fp.write(str(pid) + '\n')
765 fp.write(str(pid) + '\n')
766 fp.close()
766 fp.close()
767
767
768 if opts['daemon'] and not opts['daemon_pipefds']:
768 if opts['daemon'] and not opts['daemon_pipefds']:
769 # Signal child process startup with file removal
769 # Signal child process startup with file removal
770 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
770 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
771 os.close(lockfd)
771 os.close(lockfd)
772 try:
772 try:
773 if not runargs:
773 if not runargs:
774 runargs = util.hgcmd() + sys.argv[1:]
774 runargs = util.hgcmd() + sys.argv[1:]
775 runargs.append('--daemon-pipefds=%s' % lockpath)
775 runargs.append('--daemon-pipefds=%s' % lockpath)
776 # Don't pass --cwd to the child process, because we've already
776 # Don't pass --cwd to the child process, because we've already
777 # changed directory.
777 # changed directory.
778 for i in xrange(1, len(runargs)):
778 for i in xrange(1, len(runargs)):
779 if runargs[i].startswith('--cwd='):
779 if runargs[i].startswith('--cwd='):
780 del runargs[i]
780 del runargs[i]
781 break
781 break
782 elif runargs[i].startswith('--cwd'):
782 elif runargs[i].startswith('--cwd'):
783 del runargs[i:i + 2]
783 del runargs[i:i + 2]
784 break
784 break
785 def condfn():
785 def condfn():
786 return not os.path.exists(lockpath)
786 return not os.path.exists(lockpath)
787 pid = util.rundetached(runargs, condfn)
787 pid = util.rundetached(runargs, condfn)
788 if pid < 0:
788 if pid < 0:
789 raise util.Abort(_('child process failed to start'))
789 raise util.Abort(_('child process failed to start'))
790 writepid(pid)
790 writepid(pid)
791 finally:
791 finally:
792 try:
792 try:
793 os.unlink(lockpath)
793 os.unlink(lockpath)
794 except OSError as e:
794 except OSError as e:
795 if e.errno != errno.ENOENT:
795 if e.errno != errno.ENOENT:
796 raise
796 raise
797 if parentfn:
797 if parentfn:
798 return parentfn(pid)
798 return parentfn(pid)
799 else:
799 else:
800 return
800 return
801
801
802 if initfn:
802 if initfn:
803 initfn()
803 initfn()
804
804
805 if not opts['daemon']:
805 if not opts['daemon']:
806 writepid(os.getpid())
806 writepid(os.getpid())
807
807
808 if opts['daemon_pipefds']:
808 if opts['daemon_pipefds']:
809 lockpath = opts['daemon_pipefds']
809 lockpath = opts['daemon_pipefds']
810 try:
810 try:
811 os.setsid()
811 os.setsid()
812 except AttributeError:
812 except AttributeError:
813 pass
813 pass
814 os.unlink(lockpath)
814 os.unlink(lockpath)
815 util.hidewindow()
815 util.hidewindow()
816 sys.stdout.flush()
816 sys.stdout.flush()
817 sys.stderr.flush()
817 sys.stderr.flush()
818
818
819 nullfd = os.open(os.devnull, os.O_RDWR)
819 nullfd = os.open(os.devnull, os.O_RDWR)
820 logfilefd = nullfd
820 logfilefd = nullfd
821 if logfile:
821 if logfile:
822 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
822 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
823 os.dup2(nullfd, 0)
823 os.dup2(nullfd, 0)
824 os.dup2(logfilefd, 1)
824 os.dup2(logfilefd, 1)
825 os.dup2(logfilefd, 2)
825 os.dup2(logfilefd, 2)
826 if nullfd not in (0, 1, 2):
826 if nullfd not in (0, 1, 2):
827 os.close(nullfd)
827 os.close(nullfd)
828 if logfile and logfilefd not in (0, 1, 2):
828 if logfile and logfilefd not in (0, 1, 2):
829 os.close(logfilefd)
829 os.close(logfilefd)
830
830
831 if runfn:
831 if runfn:
832 return runfn()
832 return runfn()
833
833
834 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
834 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
835 """Utility function used by commands.import to import a single patch
835 """Utility function used by commands.import to import a single patch
836
836
837 This function is explicitly defined here to help the evolve extension to
837 This function is explicitly defined here to help the evolve extension to
838 wrap this part of the import logic.
838 wrap this part of the import logic.
839
839
840 The API is currently a bit ugly because it a simple code translation from
840 The API is currently a bit ugly because it a simple code translation from
841 the import command. Feel free to make it better.
841 the import command. Feel free to make it better.
842
842
843 :hunk: a patch (as a binary string)
843 :hunk: a patch (as a binary string)
844 :parents: nodes that will be parent of the created commit
844 :parents: nodes that will be parent of the created commit
845 :opts: the full dict of option passed to the import command
845 :opts: the full dict of option passed to the import command
846 :msgs: list to save commit message to.
846 :msgs: list to save commit message to.
847 (used in case we need to save it when failing)
847 (used in case we need to save it when failing)
848 :updatefunc: a function that update a repo to a given node
848 :updatefunc: a function that update a repo to a given node
849 updatefunc(<repo>, <node>)
849 updatefunc(<repo>, <node>)
850 """
850 """
851 # avoid cycle context -> subrepo -> cmdutil
851 # avoid cycle context -> subrepo -> cmdutil
852 import context
852 import context
853 tmpname, message, user, date, branch, nodeid, p1, p2 = \
853 extractdata = patch.extract(ui, hunk)
854 patch.extract(ui, hunk)
854 tmpname = extractdata.get('filename')
855 message = extractdata.get('message')
856 user = extractdata.get('user')
857 date = extractdata.get('date')
858 branch = extractdata.get('branch')
859 nodeid = extractdata.get('nodeid')
860 p1 = extractdata.get('p1')
861 p2 = extractdata.get('p2')
855
862
856 update = not opts.get('bypass')
863 update = not opts.get('bypass')
857 strip = opts["strip"]
864 strip = opts["strip"]
858 prefix = opts["prefix"]
865 prefix = opts["prefix"]
859 sim = float(opts.get('similarity') or 0)
866 sim = float(opts.get('similarity') or 0)
860 if not tmpname:
867 if not tmpname:
861 return (None, None, False)
868 return (None, None, False)
862 msg = _('applied to working directory')
869 msg = _('applied to working directory')
863
870
864 rejects = False
871 rejects = False
865 dsguard = None
872 dsguard = None
866
873
867 try:
874 try:
868 cmdline_message = logmessage(ui, opts)
875 cmdline_message = logmessage(ui, opts)
869 if cmdline_message:
876 if cmdline_message:
870 # pickup the cmdline msg
877 # pickup the cmdline msg
871 message = cmdline_message
878 message = cmdline_message
872 elif message:
879 elif message:
873 # pickup the patch msg
880 # pickup the patch msg
874 message = message.strip()
881 message = message.strip()
875 else:
882 else:
876 # launch the editor
883 # launch the editor
877 message = None
884 message = None
878 ui.debug('message:\n%s\n' % message)
885 ui.debug('message:\n%s\n' % message)
879
886
880 if len(parents) == 1:
887 if len(parents) == 1:
881 parents.append(repo[nullid])
888 parents.append(repo[nullid])
882 if opts.get('exact'):
889 if opts.get('exact'):
883 if not nodeid or not p1:
890 if not nodeid or not p1:
884 raise util.Abort(_('not a Mercurial patch'))
891 raise util.Abort(_('not a Mercurial patch'))
885 p1 = repo[p1]
892 p1 = repo[p1]
886 p2 = repo[p2 or nullid]
893 p2 = repo[p2 or nullid]
887 elif p2:
894 elif p2:
888 try:
895 try:
889 p1 = repo[p1]
896 p1 = repo[p1]
890 p2 = repo[p2]
897 p2 = repo[p2]
891 # Without any options, consider p2 only if the
898 # Without any options, consider p2 only if the
892 # patch is being applied on top of the recorded
899 # patch is being applied on top of the recorded
893 # first parent.
900 # first parent.
894 if p1 != parents[0]:
901 if p1 != parents[0]:
895 p1 = parents[0]
902 p1 = parents[0]
896 p2 = repo[nullid]
903 p2 = repo[nullid]
897 except error.RepoError:
904 except error.RepoError:
898 p1, p2 = parents
905 p1, p2 = parents
899 if p2.node() == nullid:
906 if p2.node() == nullid:
900 ui.warn(_("warning: import the patch as a normal revision\n"
907 ui.warn(_("warning: import the patch as a normal revision\n"
901 "(use --exact to import the patch as a merge)\n"))
908 "(use --exact to import the patch as a merge)\n"))
902 else:
909 else:
903 p1, p2 = parents
910 p1, p2 = parents
904
911
905 n = None
912 n = None
906 if update:
913 if update:
907 dsguard = dirstateguard(repo, 'tryimportone')
914 dsguard = dirstateguard(repo, 'tryimportone')
908 if p1 != parents[0]:
915 if p1 != parents[0]:
909 updatefunc(repo, p1.node())
916 updatefunc(repo, p1.node())
910 if p2 != parents[1]:
917 if p2 != parents[1]:
911 repo.setparents(p1.node(), p2.node())
918 repo.setparents(p1.node(), p2.node())
912
919
913 if opts.get('exact') or opts.get('import_branch'):
920 if opts.get('exact') or opts.get('import_branch'):
914 repo.dirstate.setbranch(branch or 'default')
921 repo.dirstate.setbranch(branch or 'default')
915
922
916 partial = opts.get('partial', False)
923 partial = opts.get('partial', False)
917 files = set()
924 files = set()
918 try:
925 try:
919 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
926 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
920 files=files, eolmode=None, similarity=sim / 100.0)
927 files=files, eolmode=None, similarity=sim / 100.0)
921 except patch.PatchError as e:
928 except patch.PatchError as e:
922 if not partial:
929 if not partial:
923 raise util.Abort(str(e))
930 raise util.Abort(str(e))
924 if partial:
931 if partial:
925 rejects = True
932 rejects = True
926
933
927 files = list(files)
934 files = list(files)
928 if opts.get('no_commit'):
935 if opts.get('no_commit'):
929 if message:
936 if message:
930 msgs.append(message)
937 msgs.append(message)
931 else:
938 else:
932 if opts.get('exact') or p2:
939 if opts.get('exact') or p2:
933 # If you got here, you either use --force and know what
940 # If you got here, you either use --force and know what
934 # you are doing or used --exact or a merge patch while
941 # you are doing or used --exact or a merge patch while
935 # being updated to its first parent.
942 # being updated to its first parent.
936 m = None
943 m = None
937 else:
944 else:
938 m = scmutil.matchfiles(repo, files or [])
945 m = scmutil.matchfiles(repo, files or [])
939 editform = mergeeditform(repo[None], 'import.normal')
946 editform = mergeeditform(repo[None], 'import.normal')
940 if opts.get('exact'):
947 if opts.get('exact'):
941 editor = None
948 editor = None
942 else:
949 else:
943 editor = getcommiteditor(editform=editform, **opts)
950 editor = getcommiteditor(editform=editform, **opts)
944 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
951 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
945 try:
952 try:
946 if partial:
953 if partial:
947 repo.ui.setconfig('ui', 'allowemptycommit', True)
954 repo.ui.setconfig('ui', 'allowemptycommit', True)
948 n = repo.commit(message, opts.get('user') or user,
955 n = repo.commit(message, opts.get('user') or user,
949 opts.get('date') or date, match=m,
956 opts.get('date') or date, match=m,
950 editor=editor)
957 editor=editor)
951 finally:
958 finally:
952 repo.ui.restoreconfig(allowemptyback)
959 repo.ui.restoreconfig(allowemptyback)
953 dsguard.close()
960 dsguard.close()
954 else:
961 else:
955 if opts.get('exact') or opts.get('import_branch'):
962 if opts.get('exact') or opts.get('import_branch'):
956 branch = branch or 'default'
963 branch = branch or 'default'
957 else:
964 else:
958 branch = p1.branch()
965 branch = p1.branch()
959 store = patch.filestore()
966 store = patch.filestore()
960 try:
967 try:
961 files = set()
968 files = set()
962 try:
969 try:
963 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
970 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
964 files, eolmode=None)
971 files, eolmode=None)
965 except patch.PatchError as e:
972 except patch.PatchError as e:
966 raise util.Abort(str(e))
973 raise util.Abort(str(e))
967 if opts.get('exact'):
974 if opts.get('exact'):
968 editor = None
975 editor = None
969 else:
976 else:
970 editor = getcommiteditor(editform='import.bypass')
977 editor = getcommiteditor(editform='import.bypass')
971 memctx = context.makememctx(repo, (p1.node(), p2.node()),
978 memctx = context.makememctx(repo, (p1.node(), p2.node()),
972 message,
979 message,
973 opts.get('user') or user,
980 opts.get('user') or user,
974 opts.get('date') or date,
981 opts.get('date') or date,
975 branch, files, store,
982 branch, files, store,
976 editor=editor)
983 editor=editor)
977 n = memctx.commit()
984 n = memctx.commit()
978 finally:
985 finally:
979 store.close()
986 store.close()
980 if opts.get('exact') and opts.get('no_commit'):
987 if opts.get('exact') and opts.get('no_commit'):
981 # --exact with --no-commit is still useful in that it does merge
988 # --exact with --no-commit is still useful in that it does merge
982 # and branch bits
989 # and branch bits
983 ui.warn(_("warning: can't check exact import with --no-commit\n"))
990 ui.warn(_("warning: can't check exact import with --no-commit\n"))
984 elif opts.get('exact') and hex(n) != nodeid:
991 elif opts.get('exact') and hex(n) != nodeid:
985 raise util.Abort(_('patch is damaged or loses information'))
992 raise util.Abort(_('patch is damaged or loses information'))
986 if n:
993 if n:
987 # i18n: refers to a short changeset id
994 # i18n: refers to a short changeset id
988 msg = _('created %s') % short(n)
995 msg = _('created %s') % short(n)
989 return (msg, n, rejects)
996 return (msg, n, rejects)
990 finally:
997 finally:
991 lockmod.release(dsguard)
998 lockmod.release(dsguard)
992 os.unlink(tmpname)
999 os.unlink(tmpname)
993
1000
994 # facility to let extensions include additional data in an exported patch
1001 # facility to let extensions include additional data in an exported patch
995 # list of identifiers to be executed in order
1002 # list of identifiers to be executed in order
996 extraexport = []
1003 extraexport = []
997 # mapping from identifier to actual export function
1004 # mapping from identifier to actual export function
998 # function as to return a string to be added to the header or None
1005 # function as to return a string to be added to the header or None
999 # it is given two arguments (sequencenumber, changectx)
1006 # it is given two arguments (sequencenumber, changectx)
1000 extraexportmap = {}
1007 extraexportmap = {}
1001
1008
1002 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1009 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1003 opts=None, match=None):
1010 opts=None, match=None):
1004 '''export changesets as hg patches.'''
1011 '''export changesets as hg patches.'''
1005
1012
1006 total = len(revs)
1013 total = len(revs)
1007 revwidth = max([len(str(rev)) for rev in revs])
1014 revwidth = max([len(str(rev)) for rev in revs])
1008 filemode = {}
1015 filemode = {}
1009
1016
1010 def single(rev, seqno, fp):
1017 def single(rev, seqno, fp):
1011 ctx = repo[rev]
1018 ctx = repo[rev]
1012 node = ctx.node()
1019 node = ctx.node()
1013 parents = [p.node() for p in ctx.parents() if p]
1020 parents = [p.node() for p in ctx.parents() if p]
1014 branch = ctx.branch()
1021 branch = ctx.branch()
1015 if switch_parent:
1022 if switch_parent:
1016 parents.reverse()
1023 parents.reverse()
1017
1024
1018 if parents:
1025 if parents:
1019 prev = parents[0]
1026 prev = parents[0]
1020 else:
1027 else:
1021 prev = nullid
1028 prev = nullid
1022
1029
1023 shouldclose = False
1030 shouldclose = False
1024 if not fp and len(template) > 0:
1031 if not fp and len(template) > 0:
1025 desc_lines = ctx.description().rstrip().split('\n')
1032 desc_lines = ctx.description().rstrip().split('\n')
1026 desc = desc_lines[0] #Commit always has a first line.
1033 desc = desc_lines[0] #Commit always has a first line.
1027 fp = makefileobj(repo, template, node, desc=desc, total=total,
1034 fp = makefileobj(repo, template, node, desc=desc, total=total,
1028 seqno=seqno, revwidth=revwidth, mode='wb',
1035 seqno=seqno, revwidth=revwidth, mode='wb',
1029 modemap=filemode)
1036 modemap=filemode)
1030 if fp != template:
1037 if fp != template:
1031 shouldclose = True
1038 shouldclose = True
1032 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1039 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1033 repo.ui.note("%s\n" % fp.name)
1040 repo.ui.note("%s\n" % fp.name)
1034
1041
1035 if not fp:
1042 if not fp:
1036 write = repo.ui.write
1043 write = repo.ui.write
1037 else:
1044 else:
1038 def write(s, **kw):
1045 def write(s, **kw):
1039 fp.write(s)
1046 fp.write(s)
1040
1047
1041 write("# HG changeset patch\n")
1048 write("# HG changeset patch\n")
1042 write("# User %s\n" % ctx.user())
1049 write("# User %s\n" % ctx.user())
1043 write("# Date %d %d\n" % ctx.date())
1050 write("# Date %d %d\n" % ctx.date())
1044 write("# %s\n" % util.datestr(ctx.date()))
1051 write("# %s\n" % util.datestr(ctx.date()))
1045 if branch and branch != 'default':
1052 if branch and branch != 'default':
1046 write("# Branch %s\n" % branch)
1053 write("# Branch %s\n" % branch)
1047 write("# Node ID %s\n" % hex(node))
1054 write("# Node ID %s\n" % hex(node))
1048 write("# Parent %s\n" % hex(prev))
1055 write("# Parent %s\n" % hex(prev))
1049 if len(parents) > 1:
1056 if len(parents) > 1:
1050 write("# Parent %s\n" % hex(parents[1]))
1057 write("# Parent %s\n" % hex(parents[1]))
1051
1058
1052 for headerid in extraexport:
1059 for headerid in extraexport:
1053 header = extraexportmap[headerid](seqno, ctx)
1060 header = extraexportmap[headerid](seqno, ctx)
1054 if header is not None:
1061 if header is not None:
1055 write('# %s\n' % header)
1062 write('# %s\n' % header)
1056 write(ctx.description().rstrip())
1063 write(ctx.description().rstrip())
1057 write("\n\n")
1064 write("\n\n")
1058
1065
1059 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1066 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1060 write(chunk, label=label)
1067 write(chunk, label=label)
1061
1068
1062 if shouldclose:
1069 if shouldclose:
1063 fp.close()
1070 fp.close()
1064
1071
1065 for seqno, rev in enumerate(revs):
1072 for seqno, rev in enumerate(revs):
1066 single(rev, seqno + 1, fp)
1073 single(rev, seqno + 1, fp)
1067
1074
1068 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1075 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1069 changes=None, stat=False, fp=None, prefix='',
1076 changes=None, stat=False, fp=None, prefix='',
1070 root='', listsubrepos=False):
1077 root='', listsubrepos=False):
1071 '''show diff or diffstat.'''
1078 '''show diff or diffstat.'''
1072 if fp is None:
1079 if fp is None:
1073 write = ui.write
1080 write = ui.write
1074 else:
1081 else:
1075 def write(s, **kw):
1082 def write(s, **kw):
1076 fp.write(s)
1083 fp.write(s)
1077
1084
1078 if root:
1085 if root:
1079 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1086 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1080 else:
1087 else:
1081 relroot = ''
1088 relroot = ''
1082 if relroot != '':
1089 if relroot != '':
1083 # XXX relative roots currently don't work if the root is within a
1090 # XXX relative roots currently don't work if the root is within a
1084 # subrepo
1091 # subrepo
1085 uirelroot = match.uipath(relroot)
1092 uirelroot = match.uipath(relroot)
1086 relroot += '/'
1093 relroot += '/'
1087 for matchroot in match.files():
1094 for matchroot in match.files():
1088 if not matchroot.startswith(relroot):
1095 if not matchroot.startswith(relroot):
1089 ui.warn(_('warning: %s not inside relative root %s\n') % (
1096 ui.warn(_('warning: %s not inside relative root %s\n') % (
1090 match.uipath(matchroot), uirelroot))
1097 match.uipath(matchroot), uirelroot))
1091
1098
1092 if stat:
1099 if stat:
1093 diffopts = diffopts.copy(context=0)
1100 diffopts = diffopts.copy(context=0)
1094 width = 80
1101 width = 80
1095 if not ui.plain():
1102 if not ui.plain():
1096 width = ui.termwidth()
1103 width = ui.termwidth()
1097 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1104 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1098 prefix=prefix, relroot=relroot)
1105 prefix=prefix, relroot=relroot)
1099 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1106 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1100 width=width,
1107 width=width,
1101 git=diffopts.git):
1108 git=diffopts.git):
1102 write(chunk, label=label)
1109 write(chunk, label=label)
1103 else:
1110 else:
1104 for chunk, label in patch.diffui(repo, node1, node2, match,
1111 for chunk, label in patch.diffui(repo, node1, node2, match,
1105 changes, diffopts, prefix=prefix,
1112 changes, diffopts, prefix=prefix,
1106 relroot=relroot):
1113 relroot=relroot):
1107 write(chunk, label=label)
1114 write(chunk, label=label)
1108
1115
1109 if listsubrepos:
1116 if listsubrepos:
1110 ctx1 = repo[node1]
1117 ctx1 = repo[node1]
1111 ctx2 = repo[node2]
1118 ctx2 = repo[node2]
1112 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1119 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1113 tempnode2 = node2
1120 tempnode2 = node2
1114 try:
1121 try:
1115 if node2 is not None:
1122 if node2 is not None:
1116 tempnode2 = ctx2.substate[subpath][1]
1123 tempnode2 = ctx2.substate[subpath][1]
1117 except KeyError:
1124 except KeyError:
1118 # A subrepo that existed in node1 was deleted between node1 and
1125 # A subrepo that existed in node1 was deleted between node1 and
1119 # node2 (inclusive). Thus, ctx2's substate won't contain that
1126 # node2 (inclusive). Thus, ctx2's substate won't contain that
1120 # subpath. The best we can do is to ignore it.
1127 # subpath. The best we can do is to ignore it.
1121 tempnode2 = None
1128 tempnode2 = None
1122 submatch = matchmod.narrowmatcher(subpath, match)
1129 submatch = matchmod.narrowmatcher(subpath, match)
1123 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1130 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1124 stat=stat, fp=fp, prefix=prefix)
1131 stat=stat, fp=fp, prefix=prefix)
1125
1132
1126 class changeset_printer(object):
1133 class changeset_printer(object):
1127 '''show changeset information when templating not requested.'''
1134 '''show changeset information when templating not requested.'''
1128
1135
1129 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1136 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1130 self.ui = ui
1137 self.ui = ui
1131 self.repo = repo
1138 self.repo = repo
1132 self.buffered = buffered
1139 self.buffered = buffered
1133 self.matchfn = matchfn
1140 self.matchfn = matchfn
1134 self.diffopts = diffopts
1141 self.diffopts = diffopts
1135 self.header = {}
1142 self.header = {}
1136 self.hunk = {}
1143 self.hunk = {}
1137 self.lastheader = None
1144 self.lastheader = None
1138 self.footer = None
1145 self.footer = None
1139
1146
1140 def flush(self, ctx):
1147 def flush(self, ctx):
1141 rev = ctx.rev()
1148 rev = ctx.rev()
1142 if rev in self.header:
1149 if rev in self.header:
1143 h = self.header[rev]
1150 h = self.header[rev]
1144 if h != self.lastheader:
1151 if h != self.lastheader:
1145 self.lastheader = h
1152 self.lastheader = h
1146 self.ui.write(h)
1153 self.ui.write(h)
1147 del self.header[rev]
1154 del self.header[rev]
1148 if rev in self.hunk:
1155 if rev in self.hunk:
1149 self.ui.write(self.hunk[rev])
1156 self.ui.write(self.hunk[rev])
1150 del self.hunk[rev]
1157 del self.hunk[rev]
1151 return 1
1158 return 1
1152 return 0
1159 return 0
1153
1160
1154 def close(self):
1161 def close(self):
1155 if self.footer:
1162 if self.footer:
1156 self.ui.write(self.footer)
1163 self.ui.write(self.footer)
1157
1164
1158 def show(self, ctx, copies=None, matchfn=None, **props):
1165 def show(self, ctx, copies=None, matchfn=None, **props):
1159 if self.buffered:
1166 if self.buffered:
1160 self.ui.pushbuffer()
1167 self.ui.pushbuffer()
1161 self._show(ctx, copies, matchfn, props)
1168 self._show(ctx, copies, matchfn, props)
1162 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1169 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1163 else:
1170 else:
1164 self._show(ctx, copies, matchfn, props)
1171 self._show(ctx, copies, matchfn, props)
1165
1172
1166 def _show(self, ctx, copies, matchfn, props):
1173 def _show(self, ctx, copies, matchfn, props):
1167 '''show a single changeset or file revision'''
1174 '''show a single changeset or file revision'''
1168 changenode = ctx.node()
1175 changenode = ctx.node()
1169 rev = ctx.rev()
1176 rev = ctx.rev()
1170 if self.ui.debugflag:
1177 if self.ui.debugflag:
1171 hexfunc = hex
1178 hexfunc = hex
1172 else:
1179 else:
1173 hexfunc = short
1180 hexfunc = short
1174 # as of now, wctx.node() and wctx.rev() return None, but we want to
1181 # as of now, wctx.node() and wctx.rev() return None, but we want to
1175 # show the same values as {node} and {rev} templatekw
1182 # show the same values as {node} and {rev} templatekw
1176 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1183 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1177
1184
1178 if self.ui.quiet:
1185 if self.ui.quiet:
1179 self.ui.write("%d:%s\n" % revnode, label='log.node')
1186 self.ui.write("%d:%s\n" % revnode, label='log.node')
1180 return
1187 return
1181
1188
1182 date = util.datestr(ctx.date())
1189 date = util.datestr(ctx.date())
1183
1190
1184 # i18n: column positioning for "hg log"
1191 # i18n: column positioning for "hg log"
1185 self.ui.write(_("changeset: %d:%s\n") % revnode,
1192 self.ui.write(_("changeset: %d:%s\n") % revnode,
1186 label='log.changeset changeset.%s' % ctx.phasestr())
1193 label='log.changeset changeset.%s' % ctx.phasestr())
1187
1194
1188 # branches are shown first before any other names due to backwards
1195 # branches are shown first before any other names due to backwards
1189 # compatibility
1196 # compatibility
1190 branch = ctx.branch()
1197 branch = ctx.branch()
1191 # don't show the default branch name
1198 # don't show the default branch name
1192 if branch != 'default':
1199 if branch != 'default':
1193 # i18n: column positioning for "hg log"
1200 # i18n: column positioning for "hg log"
1194 self.ui.write(_("branch: %s\n") % branch,
1201 self.ui.write(_("branch: %s\n") % branch,
1195 label='log.branch')
1202 label='log.branch')
1196
1203
1197 for name, ns in self.repo.names.iteritems():
1204 for name, ns in self.repo.names.iteritems():
1198 # branches has special logic already handled above, so here we just
1205 # branches has special logic already handled above, so here we just
1199 # skip it
1206 # skip it
1200 if name == 'branches':
1207 if name == 'branches':
1201 continue
1208 continue
1202 # we will use the templatename as the color name since those two
1209 # we will use the templatename as the color name since those two
1203 # should be the same
1210 # should be the same
1204 for name in ns.names(self.repo, changenode):
1211 for name in ns.names(self.repo, changenode):
1205 self.ui.write(ns.logfmt % name,
1212 self.ui.write(ns.logfmt % name,
1206 label='log.%s' % ns.colorname)
1213 label='log.%s' % ns.colorname)
1207 if self.ui.debugflag:
1214 if self.ui.debugflag:
1208 # i18n: column positioning for "hg log"
1215 # i18n: column positioning for "hg log"
1209 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1216 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1210 label='log.phase')
1217 label='log.phase')
1211 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1218 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1212 label = 'log.parent changeset.%s' % pctx.phasestr()
1219 label = 'log.parent changeset.%s' % pctx.phasestr()
1213 # i18n: column positioning for "hg log"
1220 # i18n: column positioning for "hg log"
1214 self.ui.write(_("parent: %d:%s\n")
1221 self.ui.write(_("parent: %d:%s\n")
1215 % (pctx.rev(), hexfunc(pctx.node())),
1222 % (pctx.rev(), hexfunc(pctx.node())),
1216 label=label)
1223 label=label)
1217
1224
1218 if self.ui.debugflag and rev is not None:
1225 if self.ui.debugflag and rev is not None:
1219 mnode = ctx.manifestnode()
1226 mnode = ctx.manifestnode()
1220 # i18n: column positioning for "hg log"
1227 # i18n: column positioning for "hg log"
1221 self.ui.write(_("manifest: %d:%s\n") %
1228 self.ui.write(_("manifest: %d:%s\n") %
1222 (self.repo.manifest.rev(mnode), hex(mnode)),
1229 (self.repo.manifest.rev(mnode), hex(mnode)),
1223 label='ui.debug log.manifest')
1230 label='ui.debug log.manifest')
1224 # i18n: column positioning for "hg log"
1231 # i18n: column positioning for "hg log"
1225 self.ui.write(_("user: %s\n") % ctx.user(),
1232 self.ui.write(_("user: %s\n") % ctx.user(),
1226 label='log.user')
1233 label='log.user')
1227 # i18n: column positioning for "hg log"
1234 # i18n: column positioning for "hg log"
1228 self.ui.write(_("date: %s\n") % date,
1235 self.ui.write(_("date: %s\n") % date,
1229 label='log.date')
1236 label='log.date')
1230
1237
1231 if self.ui.debugflag:
1238 if self.ui.debugflag:
1232 files = ctx.p1().status(ctx)[:3]
1239 files = ctx.p1().status(ctx)[:3]
1233 for key, value in zip([# i18n: column positioning for "hg log"
1240 for key, value in zip([# i18n: column positioning for "hg log"
1234 _("files:"),
1241 _("files:"),
1235 # i18n: column positioning for "hg log"
1242 # i18n: column positioning for "hg log"
1236 _("files+:"),
1243 _("files+:"),
1237 # i18n: column positioning for "hg log"
1244 # i18n: column positioning for "hg log"
1238 _("files-:")], files):
1245 _("files-:")], files):
1239 if value:
1246 if value:
1240 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1247 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1241 label='ui.debug log.files')
1248 label='ui.debug log.files')
1242 elif ctx.files() and self.ui.verbose:
1249 elif ctx.files() and self.ui.verbose:
1243 # i18n: column positioning for "hg log"
1250 # i18n: column positioning for "hg log"
1244 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1251 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1245 label='ui.note log.files')
1252 label='ui.note log.files')
1246 if copies and self.ui.verbose:
1253 if copies and self.ui.verbose:
1247 copies = ['%s (%s)' % c for c in copies]
1254 copies = ['%s (%s)' % c for c in copies]
1248 # i18n: column positioning for "hg log"
1255 # i18n: column positioning for "hg log"
1249 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1256 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1250 label='ui.note log.copies')
1257 label='ui.note log.copies')
1251
1258
1252 extra = ctx.extra()
1259 extra = ctx.extra()
1253 if extra and self.ui.debugflag:
1260 if extra and self.ui.debugflag:
1254 for key, value in sorted(extra.items()):
1261 for key, value in sorted(extra.items()):
1255 # i18n: column positioning for "hg log"
1262 # i18n: column positioning for "hg log"
1256 self.ui.write(_("extra: %s=%s\n")
1263 self.ui.write(_("extra: %s=%s\n")
1257 % (key, value.encode('string_escape')),
1264 % (key, value.encode('string_escape')),
1258 label='ui.debug log.extra')
1265 label='ui.debug log.extra')
1259
1266
1260 description = ctx.description().strip()
1267 description = ctx.description().strip()
1261 if description:
1268 if description:
1262 if self.ui.verbose:
1269 if self.ui.verbose:
1263 self.ui.write(_("description:\n"),
1270 self.ui.write(_("description:\n"),
1264 label='ui.note log.description')
1271 label='ui.note log.description')
1265 self.ui.write(description,
1272 self.ui.write(description,
1266 label='ui.note log.description')
1273 label='ui.note log.description')
1267 self.ui.write("\n\n")
1274 self.ui.write("\n\n")
1268 else:
1275 else:
1269 # i18n: column positioning for "hg log"
1276 # i18n: column positioning for "hg log"
1270 self.ui.write(_("summary: %s\n") %
1277 self.ui.write(_("summary: %s\n") %
1271 description.splitlines()[0],
1278 description.splitlines()[0],
1272 label='log.summary')
1279 label='log.summary')
1273 self.ui.write("\n")
1280 self.ui.write("\n")
1274
1281
1275 self.showpatch(changenode, matchfn)
1282 self.showpatch(changenode, matchfn)
1276
1283
1277 def showpatch(self, node, matchfn):
1284 def showpatch(self, node, matchfn):
1278 if not matchfn:
1285 if not matchfn:
1279 matchfn = self.matchfn
1286 matchfn = self.matchfn
1280 if matchfn:
1287 if matchfn:
1281 stat = self.diffopts.get('stat')
1288 stat = self.diffopts.get('stat')
1282 diff = self.diffopts.get('patch')
1289 diff = self.diffopts.get('patch')
1283 diffopts = patch.diffallopts(self.ui, self.diffopts)
1290 diffopts = patch.diffallopts(self.ui, self.diffopts)
1284 prev = self.repo.changelog.parents(node)[0]
1291 prev = self.repo.changelog.parents(node)[0]
1285 if stat:
1292 if stat:
1286 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1293 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1287 match=matchfn, stat=True)
1294 match=matchfn, stat=True)
1288 if diff:
1295 if diff:
1289 if stat:
1296 if stat:
1290 self.ui.write("\n")
1297 self.ui.write("\n")
1291 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1298 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1292 match=matchfn, stat=False)
1299 match=matchfn, stat=False)
1293 self.ui.write("\n")
1300 self.ui.write("\n")
1294
1301
1295 class jsonchangeset(changeset_printer):
1302 class jsonchangeset(changeset_printer):
1296 '''format changeset information.'''
1303 '''format changeset information.'''
1297
1304
1298 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1305 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1299 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1306 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1300 self.cache = {}
1307 self.cache = {}
1301 self._first = True
1308 self._first = True
1302
1309
1303 def close(self):
1310 def close(self):
1304 if not self._first:
1311 if not self._first:
1305 self.ui.write("\n]\n")
1312 self.ui.write("\n]\n")
1306 else:
1313 else:
1307 self.ui.write("[]\n")
1314 self.ui.write("[]\n")
1308
1315
1309 def _show(self, ctx, copies, matchfn, props):
1316 def _show(self, ctx, copies, matchfn, props):
1310 '''show a single changeset or file revision'''
1317 '''show a single changeset or file revision'''
1311 rev = ctx.rev()
1318 rev = ctx.rev()
1312 if rev is None:
1319 if rev is None:
1313 jrev = jnode = 'null'
1320 jrev = jnode = 'null'
1314 else:
1321 else:
1315 jrev = str(rev)
1322 jrev = str(rev)
1316 jnode = '"%s"' % hex(ctx.node())
1323 jnode = '"%s"' % hex(ctx.node())
1317 j = encoding.jsonescape
1324 j = encoding.jsonescape
1318
1325
1319 if self._first:
1326 if self._first:
1320 self.ui.write("[\n {")
1327 self.ui.write("[\n {")
1321 self._first = False
1328 self._first = False
1322 else:
1329 else:
1323 self.ui.write(",\n {")
1330 self.ui.write(",\n {")
1324
1331
1325 if self.ui.quiet:
1332 if self.ui.quiet:
1326 self.ui.write('\n "rev": %s' % jrev)
1333 self.ui.write('\n "rev": %s' % jrev)
1327 self.ui.write(',\n "node": %s' % jnode)
1334 self.ui.write(',\n "node": %s' % jnode)
1328 self.ui.write('\n }')
1335 self.ui.write('\n }')
1329 return
1336 return
1330
1337
1331 self.ui.write('\n "rev": %s' % jrev)
1338 self.ui.write('\n "rev": %s' % jrev)
1332 self.ui.write(',\n "node": %s' % jnode)
1339 self.ui.write(',\n "node": %s' % jnode)
1333 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1340 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1334 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1341 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1335 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1342 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1336 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1343 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1337 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1344 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1338
1345
1339 self.ui.write(',\n "bookmarks": [%s]' %
1346 self.ui.write(',\n "bookmarks": [%s]' %
1340 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1347 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1341 self.ui.write(',\n "tags": [%s]' %
1348 self.ui.write(',\n "tags": [%s]' %
1342 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1349 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1343 self.ui.write(',\n "parents": [%s]' %
1350 self.ui.write(',\n "parents": [%s]' %
1344 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1351 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1345
1352
1346 if self.ui.debugflag:
1353 if self.ui.debugflag:
1347 if rev is None:
1354 if rev is None:
1348 jmanifestnode = 'null'
1355 jmanifestnode = 'null'
1349 else:
1356 else:
1350 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1357 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1351 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1358 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1352
1359
1353 self.ui.write(',\n "extra": {%s}' %
1360 self.ui.write(',\n "extra": {%s}' %
1354 ", ".join('"%s": "%s"' % (j(k), j(v))
1361 ", ".join('"%s": "%s"' % (j(k), j(v))
1355 for k, v in ctx.extra().items()))
1362 for k, v in ctx.extra().items()))
1356
1363
1357 files = ctx.p1().status(ctx)
1364 files = ctx.p1().status(ctx)
1358 self.ui.write(',\n "modified": [%s]' %
1365 self.ui.write(',\n "modified": [%s]' %
1359 ", ".join('"%s"' % j(f) for f in files[0]))
1366 ", ".join('"%s"' % j(f) for f in files[0]))
1360 self.ui.write(',\n "added": [%s]' %
1367 self.ui.write(',\n "added": [%s]' %
1361 ", ".join('"%s"' % j(f) for f in files[1]))
1368 ", ".join('"%s"' % j(f) for f in files[1]))
1362 self.ui.write(',\n "removed": [%s]' %
1369 self.ui.write(',\n "removed": [%s]' %
1363 ", ".join('"%s"' % j(f) for f in files[2]))
1370 ", ".join('"%s"' % j(f) for f in files[2]))
1364
1371
1365 elif self.ui.verbose:
1372 elif self.ui.verbose:
1366 self.ui.write(',\n "files": [%s]' %
1373 self.ui.write(',\n "files": [%s]' %
1367 ", ".join('"%s"' % j(f) for f in ctx.files()))
1374 ", ".join('"%s"' % j(f) for f in ctx.files()))
1368
1375
1369 if copies:
1376 if copies:
1370 self.ui.write(',\n "copies": {%s}' %
1377 self.ui.write(',\n "copies": {%s}' %
1371 ", ".join('"%s": "%s"' % (j(k), j(v))
1378 ", ".join('"%s": "%s"' % (j(k), j(v))
1372 for k, v in copies))
1379 for k, v in copies))
1373
1380
1374 matchfn = self.matchfn
1381 matchfn = self.matchfn
1375 if matchfn:
1382 if matchfn:
1376 stat = self.diffopts.get('stat')
1383 stat = self.diffopts.get('stat')
1377 diff = self.diffopts.get('patch')
1384 diff = self.diffopts.get('patch')
1378 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1385 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1379 node, prev = ctx.node(), ctx.p1().node()
1386 node, prev = ctx.node(), ctx.p1().node()
1380 if stat:
1387 if stat:
1381 self.ui.pushbuffer()
1388 self.ui.pushbuffer()
1382 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1389 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1383 match=matchfn, stat=True)
1390 match=matchfn, stat=True)
1384 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1391 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1385 if diff:
1392 if diff:
1386 self.ui.pushbuffer()
1393 self.ui.pushbuffer()
1387 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1394 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1388 match=matchfn, stat=False)
1395 match=matchfn, stat=False)
1389 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1396 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1390
1397
1391 self.ui.write("\n }")
1398 self.ui.write("\n }")
1392
1399
1393 class changeset_templater(changeset_printer):
1400 class changeset_templater(changeset_printer):
1394 '''format changeset information.'''
1401 '''format changeset information.'''
1395
1402
1396 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1403 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1397 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1404 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1398 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1405 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1399 defaulttempl = {
1406 defaulttempl = {
1400 'parent': '{rev}:{node|formatnode} ',
1407 'parent': '{rev}:{node|formatnode} ',
1401 'manifest': '{rev}:{node|formatnode}',
1408 'manifest': '{rev}:{node|formatnode}',
1402 'file_copy': '{name} ({source})',
1409 'file_copy': '{name} ({source})',
1403 'extra': '{key}={value|stringescape}'
1410 'extra': '{key}={value|stringescape}'
1404 }
1411 }
1405 # filecopy is preserved for compatibility reasons
1412 # filecopy is preserved for compatibility reasons
1406 defaulttempl['filecopy'] = defaulttempl['file_copy']
1413 defaulttempl['filecopy'] = defaulttempl['file_copy']
1407 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1414 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1408 cache=defaulttempl)
1415 cache=defaulttempl)
1409 if tmpl:
1416 if tmpl:
1410 self.t.cache['changeset'] = tmpl
1417 self.t.cache['changeset'] = tmpl
1411
1418
1412 self.cache = {}
1419 self.cache = {}
1413
1420
1414 # find correct templates for current mode
1421 # find correct templates for current mode
1415 tmplmodes = [
1422 tmplmodes = [
1416 (True, None),
1423 (True, None),
1417 (self.ui.verbose, 'verbose'),
1424 (self.ui.verbose, 'verbose'),
1418 (self.ui.quiet, 'quiet'),
1425 (self.ui.quiet, 'quiet'),
1419 (self.ui.debugflag, 'debug'),
1426 (self.ui.debugflag, 'debug'),
1420 ]
1427 ]
1421
1428
1422 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1429 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1423 'docheader': '', 'docfooter': ''}
1430 'docheader': '', 'docfooter': ''}
1424 for mode, postfix in tmplmodes:
1431 for mode, postfix in tmplmodes:
1425 for t in self._parts:
1432 for t in self._parts:
1426 cur = t
1433 cur = t
1427 if postfix:
1434 if postfix:
1428 cur += "_" + postfix
1435 cur += "_" + postfix
1429 if mode and cur in self.t:
1436 if mode and cur in self.t:
1430 self._parts[t] = cur
1437 self._parts[t] = cur
1431
1438
1432 if self._parts['docheader']:
1439 if self._parts['docheader']:
1433 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1440 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1434
1441
1435 def close(self):
1442 def close(self):
1436 if self._parts['docfooter']:
1443 if self._parts['docfooter']:
1437 if not self.footer:
1444 if not self.footer:
1438 self.footer = ""
1445 self.footer = ""
1439 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1446 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1440 return super(changeset_templater, self).close()
1447 return super(changeset_templater, self).close()
1441
1448
1442 def _show(self, ctx, copies, matchfn, props):
1449 def _show(self, ctx, copies, matchfn, props):
1443 '''show a single changeset or file revision'''
1450 '''show a single changeset or file revision'''
1444 props = props.copy()
1451 props = props.copy()
1445 props.update(templatekw.keywords)
1452 props.update(templatekw.keywords)
1446 props['templ'] = self.t
1453 props['templ'] = self.t
1447 props['ctx'] = ctx
1454 props['ctx'] = ctx
1448 props['repo'] = self.repo
1455 props['repo'] = self.repo
1449 props['revcache'] = {'copies': copies}
1456 props['revcache'] = {'copies': copies}
1450 props['cache'] = self.cache
1457 props['cache'] = self.cache
1451
1458
1452 try:
1459 try:
1453 # write header
1460 # write header
1454 if self._parts['header']:
1461 if self._parts['header']:
1455 h = templater.stringify(self.t(self._parts['header'], **props))
1462 h = templater.stringify(self.t(self._parts['header'], **props))
1456 if self.buffered:
1463 if self.buffered:
1457 self.header[ctx.rev()] = h
1464 self.header[ctx.rev()] = h
1458 else:
1465 else:
1459 if self.lastheader != h:
1466 if self.lastheader != h:
1460 self.lastheader = h
1467 self.lastheader = h
1461 self.ui.write(h)
1468 self.ui.write(h)
1462
1469
1463 # write changeset metadata, then patch if requested
1470 # write changeset metadata, then patch if requested
1464 key = self._parts['changeset']
1471 key = self._parts['changeset']
1465 self.ui.write(templater.stringify(self.t(key, **props)))
1472 self.ui.write(templater.stringify(self.t(key, **props)))
1466 self.showpatch(ctx.node(), matchfn)
1473 self.showpatch(ctx.node(), matchfn)
1467
1474
1468 if self._parts['footer']:
1475 if self._parts['footer']:
1469 if not self.footer:
1476 if not self.footer:
1470 self.footer = templater.stringify(
1477 self.footer = templater.stringify(
1471 self.t(self._parts['footer'], **props))
1478 self.t(self._parts['footer'], **props))
1472 except KeyError as inst:
1479 except KeyError as inst:
1473 msg = _("%s: no key named '%s'")
1480 msg = _("%s: no key named '%s'")
1474 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1481 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1475 except SyntaxError as inst:
1482 except SyntaxError as inst:
1476 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1483 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1477
1484
1478 def gettemplate(ui, tmpl, style):
1485 def gettemplate(ui, tmpl, style):
1479 """
1486 """
1480 Find the template matching the given template spec or style.
1487 Find the template matching the given template spec or style.
1481 """
1488 """
1482
1489
1483 # ui settings
1490 # ui settings
1484 if not tmpl and not style: # template are stronger than style
1491 if not tmpl and not style: # template are stronger than style
1485 tmpl = ui.config('ui', 'logtemplate')
1492 tmpl = ui.config('ui', 'logtemplate')
1486 if tmpl:
1493 if tmpl:
1487 try:
1494 try:
1488 tmpl = templater.unquotestring(tmpl)
1495 tmpl = templater.unquotestring(tmpl)
1489 except SyntaxError:
1496 except SyntaxError:
1490 pass
1497 pass
1491 return tmpl, None
1498 return tmpl, None
1492 else:
1499 else:
1493 style = util.expandpath(ui.config('ui', 'style', ''))
1500 style = util.expandpath(ui.config('ui', 'style', ''))
1494
1501
1495 if not tmpl and style:
1502 if not tmpl and style:
1496 mapfile = style
1503 mapfile = style
1497 if not os.path.split(mapfile)[0]:
1504 if not os.path.split(mapfile)[0]:
1498 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1505 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1499 or templater.templatepath(mapfile))
1506 or templater.templatepath(mapfile))
1500 if mapname:
1507 if mapname:
1501 mapfile = mapname
1508 mapfile = mapname
1502 return None, mapfile
1509 return None, mapfile
1503
1510
1504 if not tmpl:
1511 if not tmpl:
1505 return None, None
1512 return None, None
1506
1513
1507 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1514 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1508
1515
1509 def show_changeset(ui, repo, opts, buffered=False):
1516 def show_changeset(ui, repo, opts, buffered=False):
1510 """show one changeset using template or regular display.
1517 """show one changeset using template or regular display.
1511
1518
1512 Display format will be the first non-empty hit of:
1519 Display format will be the first non-empty hit of:
1513 1. option 'template'
1520 1. option 'template'
1514 2. option 'style'
1521 2. option 'style'
1515 3. [ui] setting 'logtemplate'
1522 3. [ui] setting 'logtemplate'
1516 4. [ui] setting 'style'
1523 4. [ui] setting 'style'
1517 If all of these values are either the unset or the empty string,
1524 If all of these values are either the unset or the empty string,
1518 regular display via changeset_printer() is done.
1525 regular display via changeset_printer() is done.
1519 """
1526 """
1520 # options
1527 # options
1521 matchfn = None
1528 matchfn = None
1522 if opts.get('patch') or opts.get('stat'):
1529 if opts.get('patch') or opts.get('stat'):
1523 matchfn = scmutil.matchall(repo)
1530 matchfn = scmutil.matchall(repo)
1524
1531
1525 if opts.get('template') == 'json':
1532 if opts.get('template') == 'json':
1526 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1533 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1527
1534
1528 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1535 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1529
1536
1530 if not tmpl and not mapfile:
1537 if not tmpl and not mapfile:
1531 return changeset_printer(ui, repo, matchfn, opts, buffered)
1538 return changeset_printer(ui, repo, matchfn, opts, buffered)
1532
1539
1533 try:
1540 try:
1534 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1541 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1535 buffered)
1542 buffered)
1536 except SyntaxError as inst:
1543 except SyntaxError as inst:
1537 raise util.Abort(inst.args[0])
1544 raise util.Abort(inst.args[0])
1538 return t
1545 return t
1539
1546
1540 def showmarker(ui, marker):
1547 def showmarker(ui, marker):
1541 """utility function to display obsolescence marker in a readable way
1548 """utility function to display obsolescence marker in a readable way
1542
1549
1543 To be used by debug function."""
1550 To be used by debug function."""
1544 ui.write(hex(marker.precnode()))
1551 ui.write(hex(marker.precnode()))
1545 for repl in marker.succnodes():
1552 for repl in marker.succnodes():
1546 ui.write(' ')
1553 ui.write(' ')
1547 ui.write(hex(repl))
1554 ui.write(hex(repl))
1548 ui.write(' %X ' % marker.flags())
1555 ui.write(' %X ' % marker.flags())
1549 parents = marker.parentnodes()
1556 parents = marker.parentnodes()
1550 if parents is not None:
1557 if parents is not None:
1551 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1558 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1552 ui.write('(%s) ' % util.datestr(marker.date()))
1559 ui.write('(%s) ' % util.datestr(marker.date()))
1553 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1560 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1554 sorted(marker.metadata().items())
1561 sorted(marker.metadata().items())
1555 if t[0] != 'date')))
1562 if t[0] != 'date')))
1556 ui.write('\n')
1563 ui.write('\n')
1557
1564
1558 def finddate(ui, repo, date):
1565 def finddate(ui, repo, date):
1559 """Find the tipmost changeset that matches the given date spec"""
1566 """Find the tipmost changeset that matches the given date spec"""
1560
1567
1561 df = util.matchdate(date)
1568 df = util.matchdate(date)
1562 m = scmutil.matchall(repo)
1569 m = scmutil.matchall(repo)
1563 results = {}
1570 results = {}
1564
1571
1565 def prep(ctx, fns):
1572 def prep(ctx, fns):
1566 d = ctx.date()
1573 d = ctx.date()
1567 if df(d[0]):
1574 if df(d[0]):
1568 results[ctx.rev()] = d
1575 results[ctx.rev()] = d
1569
1576
1570 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1577 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1571 rev = ctx.rev()
1578 rev = ctx.rev()
1572 if rev in results:
1579 if rev in results:
1573 ui.status(_("found revision %s from %s\n") %
1580 ui.status(_("found revision %s from %s\n") %
1574 (rev, util.datestr(results[rev])))
1581 (rev, util.datestr(results[rev])))
1575 return str(rev)
1582 return str(rev)
1576
1583
1577 raise util.Abort(_("revision matching date not found"))
1584 raise util.Abort(_("revision matching date not found"))
1578
1585
1579 def increasingwindows(windowsize=8, sizelimit=512):
1586 def increasingwindows(windowsize=8, sizelimit=512):
1580 while True:
1587 while True:
1581 yield windowsize
1588 yield windowsize
1582 if windowsize < sizelimit:
1589 if windowsize < sizelimit:
1583 windowsize *= 2
1590 windowsize *= 2
1584
1591
1585 class FileWalkError(Exception):
1592 class FileWalkError(Exception):
1586 pass
1593 pass
1587
1594
1588 def walkfilerevs(repo, match, follow, revs, fncache):
1595 def walkfilerevs(repo, match, follow, revs, fncache):
1589 '''Walks the file history for the matched files.
1596 '''Walks the file history for the matched files.
1590
1597
1591 Returns the changeset revs that are involved in the file history.
1598 Returns the changeset revs that are involved in the file history.
1592
1599
1593 Throws FileWalkError if the file history can't be walked using
1600 Throws FileWalkError if the file history can't be walked using
1594 filelogs alone.
1601 filelogs alone.
1595 '''
1602 '''
1596 wanted = set()
1603 wanted = set()
1597 copies = []
1604 copies = []
1598 minrev, maxrev = min(revs), max(revs)
1605 minrev, maxrev = min(revs), max(revs)
1599 def filerevgen(filelog, last):
1606 def filerevgen(filelog, last):
1600 """
1607 """
1601 Only files, no patterns. Check the history of each file.
1608 Only files, no patterns. Check the history of each file.
1602
1609
1603 Examines filelog entries within minrev, maxrev linkrev range
1610 Examines filelog entries within minrev, maxrev linkrev range
1604 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1611 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1605 tuples in backwards order
1612 tuples in backwards order
1606 """
1613 """
1607 cl_count = len(repo)
1614 cl_count = len(repo)
1608 revs = []
1615 revs = []
1609 for j in xrange(0, last + 1):
1616 for j in xrange(0, last + 1):
1610 linkrev = filelog.linkrev(j)
1617 linkrev = filelog.linkrev(j)
1611 if linkrev < minrev:
1618 if linkrev < minrev:
1612 continue
1619 continue
1613 # only yield rev for which we have the changelog, it can
1620 # only yield rev for which we have the changelog, it can
1614 # happen while doing "hg log" during a pull or commit
1621 # happen while doing "hg log" during a pull or commit
1615 if linkrev >= cl_count:
1622 if linkrev >= cl_count:
1616 break
1623 break
1617
1624
1618 parentlinkrevs = []
1625 parentlinkrevs = []
1619 for p in filelog.parentrevs(j):
1626 for p in filelog.parentrevs(j):
1620 if p != nullrev:
1627 if p != nullrev:
1621 parentlinkrevs.append(filelog.linkrev(p))
1628 parentlinkrevs.append(filelog.linkrev(p))
1622 n = filelog.node(j)
1629 n = filelog.node(j)
1623 revs.append((linkrev, parentlinkrevs,
1630 revs.append((linkrev, parentlinkrevs,
1624 follow and filelog.renamed(n)))
1631 follow and filelog.renamed(n)))
1625
1632
1626 return reversed(revs)
1633 return reversed(revs)
1627 def iterfiles():
1634 def iterfiles():
1628 pctx = repo['.']
1635 pctx = repo['.']
1629 for filename in match.files():
1636 for filename in match.files():
1630 if follow:
1637 if follow:
1631 if filename not in pctx:
1638 if filename not in pctx:
1632 raise util.Abort(_('cannot follow file not in parent '
1639 raise util.Abort(_('cannot follow file not in parent '
1633 'revision: "%s"') % filename)
1640 'revision: "%s"') % filename)
1634 yield filename, pctx[filename].filenode()
1641 yield filename, pctx[filename].filenode()
1635 else:
1642 else:
1636 yield filename, None
1643 yield filename, None
1637 for filename_node in copies:
1644 for filename_node in copies:
1638 yield filename_node
1645 yield filename_node
1639
1646
1640 for file_, node in iterfiles():
1647 for file_, node in iterfiles():
1641 filelog = repo.file(file_)
1648 filelog = repo.file(file_)
1642 if not len(filelog):
1649 if not len(filelog):
1643 if node is None:
1650 if node is None:
1644 # A zero count may be a directory or deleted file, so
1651 # A zero count may be a directory or deleted file, so
1645 # try to find matching entries on the slow path.
1652 # try to find matching entries on the slow path.
1646 if follow:
1653 if follow:
1647 raise util.Abort(
1654 raise util.Abort(
1648 _('cannot follow nonexistent file: "%s"') % file_)
1655 _('cannot follow nonexistent file: "%s"') % file_)
1649 raise FileWalkError("Cannot walk via filelog")
1656 raise FileWalkError("Cannot walk via filelog")
1650 else:
1657 else:
1651 continue
1658 continue
1652
1659
1653 if node is None:
1660 if node is None:
1654 last = len(filelog) - 1
1661 last = len(filelog) - 1
1655 else:
1662 else:
1656 last = filelog.rev(node)
1663 last = filelog.rev(node)
1657
1664
1658 # keep track of all ancestors of the file
1665 # keep track of all ancestors of the file
1659 ancestors = set([filelog.linkrev(last)])
1666 ancestors = set([filelog.linkrev(last)])
1660
1667
1661 # iterate from latest to oldest revision
1668 # iterate from latest to oldest revision
1662 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1669 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1663 if not follow:
1670 if not follow:
1664 if rev > maxrev:
1671 if rev > maxrev:
1665 continue
1672 continue
1666 else:
1673 else:
1667 # Note that last might not be the first interesting
1674 # Note that last might not be the first interesting
1668 # rev to us:
1675 # rev to us:
1669 # if the file has been changed after maxrev, we'll
1676 # if the file has been changed after maxrev, we'll
1670 # have linkrev(last) > maxrev, and we still need
1677 # have linkrev(last) > maxrev, and we still need
1671 # to explore the file graph
1678 # to explore the file graph
1672 if rev not in ancestors:
1679 if rev not in ancestors:
1673 continue
1680 continue
1674 # XXX insert 1327 fix here
1681 # XXX insert 1327 fix here
1675 if flparentlinkrevs:
1682 if flparentlinkrevs:
1676 ancestors.update(flparentlinkrevs)
1683 ancestors.update(flparentlinkrevs)
1677
1684
1678 fncache.setdefault(rev, []).append(file_)
1685 fncache.setdefault(rev, []).append(file_)
1679 wanted.add(rev)
1686 wanted.add(rev)
1680 if copied:
1687 if copied:
1681 copies.append(copied)
1688 copies.append(copied)
1682
1689
1683 return wanted
1690 return wanted
1684
1691
1685 class _followfilter(object):
1692 class _followfilter(object):
1686 def __init__(self, repo, onlyfirst=False):
1693 def __init__(self, repo, onlyfirst=False):
1687 self.repo = repo
1694 self.repo = repo
1688 self.startrev = nullrev
1695 self.startrev = nullrev
1689 self.roots = set()
1696 self.roots = set()
1690 self.onlyfirst = onlyfirst
1697 self.onlyfirst = onlyfirst
1691
1698
1692 def match(self, rev):
1699 def match(self, rev):
1693 def realparents(rev):
1700 def realparents(rev):
1694 if self.onlyfirst:
1701 if self.onlyfirst:
1695 return self.repo.changelog.parentrevs(rev)[0:1]
1702 return self.repo.changelog.parentrevs(rev)[0:1]
1696 else:
1703 else:
1697 return filter(lambda x: x != nullrev,
1704 return filter(lambda x: x != nullrev,
1698 self.repo.changelog.parentrevs(rev))
1705 self.repo.changelog.parentrevs(rev))
1699
1706
1700 if self.startrev == nullrev:
1707 if self.startrev == nullrev:
1701 self.startrev = rev
1708 self.startrev = rev
1702 return True
1709 return True
1703
1710
1704 if rev > self.startrev:
1711 if rev > self.startrev:
1705 # forward: all descendants
1712 # forward: all descendants
1706 if not self.roots:
1713 if not self.roots:
1707 self.roots.add(self.startrev)
1714 self.roots.add(self.startrev)
1708 for parent in realparents(rev):
1715 for parent in realparents(rev):
1709 if parent in self.roots:
1716 if parent in self.roots:
1710 self.roots.add(rev)
1717 self.roots.add(rev)
1711 return True
1718 return True
1712 else:
1719 else:
1713 # backwards: all parents
1720 # backwards: all parents
1714 if not self.roots:
1721 if not self.roots:
1715 self.roots.update(realparents(self.startrev))
1722 self.roots.update(realparents(self.startrev))
1716 if rev in self.roots:
1723 if rev in self.roots:
1717 self.roots.remove(rev)
1724 self.roots.remove(rev)
1718 self.roots.update(realparents(rev))
1725 self.roots.update(realparents(rev))
1719 return True
1726 return True
1720
1727
1721 return False
1728 return False
1722
1729
1723 def walkchangerevs(repo, match, opts, prepare):
1730 def walkchangerevs(repo, match, opts, prepare):
1724 '''Iterate over files and the revs in which they changed.
1731 '''Iterate over files and the revs in which they changed.
1725
1732
1726 Callers most commonly need to iterate backwards over the history
1733 Callers most commonly need to iterate backwards over the history
1727 in which they are interested. Doing so has awful (quadratic-looking)
1734 in which they are interested. Doing so has awful (quadratic-looking)
1728 performance, so we use iterators in a "windowed" way.
1735 performance, so we use iterators in a "windowed" way.
1729
1736
1730 We walk a window of revisions in the desired order. Within the
1737 We walk a window of revisions in the desired order. Within the
1731 window, we first walk forwards to gather data, then in the desired
1738 window, we first walk forwards to gather data, then in the desired
1732 order (usually backwards) to display it.
1739 order (usually backwards) to display it.
1733
1740
1734 This function returns an iterator yielding contexts. Before
1741 This function returns an iterator yielding contexts. Before
1735 yielding each context, the iterator will first call the prepare
1742 yielding each context, the iterator will first call the prepare
1736 function on each context in the window in forward order.'''
1743 function on each context in the window in forward order.'''
1737
1744
1738 follow = opts.get('follow') or opts.get('follow_first')
1745 follow = opts.get('follow') or opts.get('follow_first')
1739 revs = _logrevs(repo, opts)
1746 revs = _logrevs(repo, opts)
1740 if not revs:
1747 if not revs:
1741 return []
1748 return []
1742 wanted = set()
1749 wanted = set()
1743 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1750 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1744 opts.get('removed'))
1751 opts.get('removed'))
1745 fncache = {}
1752 fncache = {}
1746 change = repo.changectx
1753 change = repo.changectx
1747
1754
1748 # First step is to fill wanted, the set of revisions that we want to yield.
1755 # First step is to fill wanted, the set of revisions that we want to yield.
1749 # When it does not induce extra cost, we also fill fncache for revisions in
1756 # When it does not induce extra cost, we also fill fncache for revisions in
1750 # wanted: a cache of filenames that were changed (ctx.files()) and that
1757 # wanted: a cache of filenames that were changed (ctx.files()) and that
1751 # match the file filtering conditions.
1758 # match the file filtering conditions.
1752
1759
1753 if match.always():
1760 if match.always():
1754 # No files, no patterns. Display all revs.
1761 # No files, no patterns. Display all revs.
1755 wanted = revs
1762 wanted = revs
1756 elif not slowpath:
1763 elif not slowpath:
1757 # We only have to read through the filelog to find wanted revisions
1764 # We only have to read through the filelog to find wanted revisions
1758
1765
1759 try:
1766 try:
1760 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1767 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1761 except FileWalkError:
1768 except FileWalkError:
1762 slowpath = True
1769 slowpath = True
1763
1770
1764 # We decided to fall back to the slowpath because at least one
1771 # We decided to fall back to the slowpath because at least one
1765 # of the paths was not a file. Check to see if at least one of them
1772 # of the paths was not a file. Check to see if at least one of them
1766 # existed in history, otherwise simply return
1773 # existed in history, otherwise simply return
1767 for path in match.files():
1774 for path in match.files():
1768 if path == '.' or path in repo.store:
1775 if path == '.' or path in repo.store:
1769 break
1776 break
1770 else:
1777 else:
1771 return []
1778 return []
1772
1779
1773 if slowpath:
1780 if slowpath:
1774 # We have to read the changelog to match filenames against
1781 # We have to read the changelog to match filenames against
1775 # changed files
1782 # changed files
1776
1783
1777 if follow:
1784 if follow:
1778 raise util.Abort(_('can only follow copies/renames for explicit '
1785 raise util.Abort(_('can only follow copies/renames for explicit '
1779 'filenames'))
1786 'filenames'))
1780
1787
1781 # The slow path checks files modified in every changeset.
1788 # The slow path checks files modified in every changeset.
1782 # This is really slow on large repos, so compute the set lazily.
1789 # This is really slow on large repos, so compute the set lazily.
1783 class lazywantedset(object):
1790 class lazywantedset(object):
1784 def __init__(self):
1791 def __init__(self):
1785 self.set = set()
1792 self.set = set()
1786 self.revs = set(revs)
1793 self.revs = set(revs)
1787
1794
1788 # No need to worry about locality here because it will be accessed
1795 # No need to worry about locality here because it will be accessed
1789 # in the same order as the increasing window below.
1796 # in the same order as the increasing window below.
1790 def __contains__(self, value):
1797 def __contains__(self, value):
1791 if value in self.set:
1798 if value in self.set:
1792 return True
1799 return True
1793 elif not value in self.revs:
1800 elif not value in self.revs:
1794 return False
1801 return False
1795 else:
1802 else:
1796 self.revs.discard(value)
1803 self.revs.discard(value)
1797 ctx = change(value)
1804 ctx = change(value)
1798 matches = filter(match, ctx.files())
1805 matches = filter(match, ctx.files())
1799 if matches:
1806 if matches:
1800 fncache[value] = matches
1807 fncache[value] = matches
1801 self.set.add(value)
1808 self.set.add(value)
1802 return True
1809 return True
1803 return False
1810 return False
1804
1811
1805 def discard(self, value):
1812 def discard(self, value):
1806 self.revs.discard(value)
1813 self.revs.discard(value)
1807 self.set.discard(value)
1814 self.set.discard(value)
1808
1815
1809 wanted = lazywantedset()
1816 wanted = lazywantedset()
1810
1817
1811 # it might be worthwhile to do this in the iterator if the rev range
1818 # it might be worthwhile to do this in the iterator if the rev range
1812 # is descending and the prune args are all within that range
1819 # is descending and the prune args are all within that range
1813 for rev in opts.get('prune', ()):
1820 for rev in opts.get('prune', ()):
1814 rev = repo[rev].rev()
1821 rev = repo[rev].rev()
1815 ff = _followfilter(repo)
1822 ff = _followfilter(repo)
1816 stop = min(revs[0], revs[-1])
1823 stop = min(revs[0], revs[-1])
1817 for x in xrange(rev, stop - 1, -1):
1824 for x in xrange(rev, stop - 1, -1):
1818 if ff.match(x):
1825 if ff.match(x):
1819 wanted = wanted - [x]
1826 wanted = wanted - [x]
1820
1827
1821 # Now that wanted is correctly initialized, we can iterate over the
1828 # Now that wanted is correctly initialized, we can iterate over the
1822 # revision range, yielding only revisions in wanted.
1829 # revision range, yielding only revisions in wanted.
1823 def iterate():
1830 def iterate():
1824 if follow and match.always():
1831 if follow and match.always():
1825 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1832 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1826 def want(rev):
1833 def want(rev):
1827 return ff.match(rev) and rev in wanted
1834 return ff.match(rev) and rev in wanted
1828 else:
1835 else:
1829 def want(rev):
1836 def want(rev):
1830 return rev in wanted
1837 return rev in wanted
1831
1838
1832 it = iter(revs)
1839 it = iter(revs)
1833 stopiteration = False
1840 stopiteration = False
1834 for windowsize in increasingwindows():
1841 for windowsize in increasingwindows():
1835 nrevs = []
1842 nrevs = []
1836 for i in xrange(windowsize):
1843 for i in xrange(windowsize):
1837 rev = next(it, None)
1844 rev = next(it, None)
1838 if rev is None:
1845 if rev is None:
1839 stopiteration = True
1846 stopiteration = True
1840 break
1847 break
1841 elif want(rev):
1848 elif want(rev):
1842 nrevs.append(rev)
1849 nrevs.append(rev)
1843 for rev in sorted(nrevs):
1850 for rev in sorted(nrevs):
1844 fns = fncache.get(rev)
1851 fns = fncache.get(rev)
1845 ctx = change(rev)
1852 ctx = change(rev)
1846 if not fns:
1853 if not fns:
1847 def fns_generator():
1854 def fns_generator():
1848 for f in ctx.files():
1855 for f in ctx.files():
1849 if match(f):
1856 if match(f):
1850 yield f
1857 yield f
1851 fns = fns_generator()
1858 fns = fns_generator()
1852 prepare(ctx, fns)
1859 prepare(ctx, fns)
1853 for rev in nrevs:
1860 for rev in nrevs:
1854 yield change(rev)
1861 yield change(rev)
1855
1862
1856 if stopiteration:
1863 if stopiteration:
1857 break
1864 break
1858
1865
1859 return iterate()
1866 return iterate()
1860
1867
1861 def _makefollowlogfilematcher(repo, files, followfirst):
1868 def _makefollowlogfilematcher(repo, files, followfirst):
1862 # When displaying a revision with --patch --follow FILE, we have
1869 # When displaying a revision with --patch --follow FILE, we have
1863 # to know which file of the revision must be diffed. With
1870 # to know which file of the revision must be diffed. With
1864 # --follow, we want the names of the ancestors of FILE in the
1871 # --follow, we want the names of the ancestors of FILE in the
1865 # revision, stored in "fcache". "fcache" is populated by
1872 # revision, stored in "fcache". "fcache" is populated by
1866 # reproducing the graph traversal already done by --follow revset
1873 # reproducing the graph traversal already done by --follow revset
1867 # and relating linkrevs to file names (which is not "correct" but
1874 # and relating linkrevs to file names (which is not "correct" but
1868 # good enough).
1875 # good enough).
1869 fcache = {}
1876 fcache = {}
1870 fcacheready = [False]
1877 fcacheready = [False]
1871 pctx = repo['.']
1878 pctx = repo['.']
1872
1879
1873 def populate():
1880 def populate():
1874 for fn in files:
1881 for fn in files:
1875 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1882 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1876 for c in i:
1883 for c in i:
1877 fcache.setdefault(c.linkrev(), set()).add(c.path())
1884 fcache.setdefault(c.linkrev(), set()).add(c.path())
1878
1885
1879 def filematcher(rev):
1886 def filematcher(rev):
1880 if not fcacheready[0]:
1887 if not fcacheready[0]:
1881 # Lazy initialization
1888 # Lazy initialization
1882 fcacheready[0] = True
1889 fcacheready[0] = True
1883 populate()
1890 populate()
1884 return scmutil.matchfiles(repo, fcache.get(rev, []))
1891 return scmutil.matchfiles(repo, fcache.get(rev, []))
1885
1892
1886 return filematcher
1893 return filematcher
1887
1894
1888 def _makenofollowlogfilematcher(repo, pats, opts):
1895 def _makenofollowlogfilematcher(repo, pats, opts):
1889 '''hook for extensions to override the filematcher for non-follow cases'''
1896 '''hook for extensions to override the filematcher for non-follow cases'''
1890 return None
1897 return None
1891
1898
1892 def _makelogrevset(repo, pats, opts, revs):
1899 def _makelogrevset(repo, pats, opts, revs):
1893 """Return (expr, filematcher) where expr is a revset string built
1900 """Return (expr, filematcher) where expr is a revset string built
1894 from log options and file patterns or None. If --stat or --patch
1901 from log options and file patterns or None. If --stat or --patch
1895 are not passed filematcher is None. Otherwise it is a callable
1902 are not passed filematcher is None. Otherwise it is a callable
1896 taking a revision number and returning a match objects filtering
1903 taking a revision number and returning a match objects filtering
1897 the files to be detailed when displaying the revision.
1904 the files to be detailed when displaying the revision.
1898 """
1905 """
1899 opt2revset = {
1906 opt2revset = {
1900 'no_merges': ('not merge()', None),
1907 'no_merges': ('not merge()', None),
1901 'only_merges': ('merge()', None),
1908 'only_merges': ('merge()', None),
1902 '_ancestors': ('ancestors(%(val)s)', None),
1909 '_ancestors': ('ancestors(%(val)s)', None),
1903 '_fancestors': ('_firstancestors(%(val)s)', None),
1910 '_fancestors': ('_firstancestors(%(val)s)', None),
1904 '_descendants': ('descendants(%(val)s)', None),
1911 '_descendants': ('descendants(%(val)s)', None),
1905 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1912 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1906 '_matchfiles': ('_matchfiles(%(val)s)', None),
1913 '_matchfiles': ('_matchfiles(%(val)s)', None),
1907 'date': ('date(%(val)r)', None),
1914 'date': ('date(%(val)r)', None),
1908 'branch': ('branch(%(val)r)', ' or '),
1915 'branch': ('branch(%(val)r)', ' or '),
1909 '_patslog': ('filelog(%(val)r)', ' or '),
1916 '_patslog': ('filelog(%(val)r)', ' or '),
1910 '_patsfollow': ('follow(%(val)r)', ' or '),
1917 '_patsfollow': ('follow(%(val)r)', ' or '),
1911 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1918 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1912 'keyword': ('keyword(%(val)r)', ' or '),
1919 'keyword': ('keyword(%(val)r)', ' or '),
1913 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1920 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1914 'user': ('user(%(val)r)', ' or '),
1921 'user': ('user(%(val)r)', ' or '),
1915 }
1922 }
1916
1923
1917 opts = dict(opts)
1924 opts = dict(opts)
1918 # follow or not follow?
1925 # follow or not follow?
1919 follow = opts.get('follow') or opts.get('follow_first')
1926 follow = opts.get('follow') or opts.get('follow_first')
1920 if opts.get('follow_first'):
1927 if opts.get('follow_first'):
1921 followfirst = 1
1928 followfirst = 1
1922 else:
1929 else:
1923 followfirst = 0
1930 followfirst = 0
1924 # --follow with FILE behavior depends on revs...
1931 # --follow with FILE behavior depends on revs...
1925 it = iter(revs)
1932 it = iter(revs)
1926 startrev = it.next()
1933 startrev = it.next()
1927 followdescendants = startrev < next(it, startrev)
1934 followdescendants = startrev < next(it, startrev)
1928
1935
1929 # branch and only_branch are really aliases and must be handled at
1936 # branch and only_branch are really aliases and must be handled at
1930 # the same time
1937 # the same time
1931 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1938 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1932 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1939 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1933 # pats/include/exclude are passed to match.match() directly in
1940 # pats/include/exclude are passed to match.match() directly in
1934 # _matchfiles() revset but walkchangerevs() builds its matcher with
1941 # _matchfiles() revset but walkchangerevs() builds its matcher with
1935 # scmutil.match(). The difference is input pats are globbed on
1942 # scmutil.match(). The difference is input pats are globbed on
1936 # platforms without shell expansion (windows).
1943 # platforms without shell expansion (windows).
1937 wctx = repo[None]
1944 wctx = repo[None]
1938 match, pats = scmutil.matchandpats(wctx, pats, opts)
1945 match, pats = scmutil.matchandpats(wctx, pats, opts)
1939 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1946 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1940 opts.get('removed'))
1947 opts.get('removed'))
1941 if not slowpath:
1948 if not slowpath:
1942 for f in match.files():
1949 for f in match.files():
1943 if follow and f not in wctx:
1950 if follow and f not in wctx:
1944 # If the file exists, it may be a directory, so let it
1951 # If the file exists, it may be a directory, so let it
1945 # take the slow path.
1952 # take the slow path.
1946 if os.path.exists(repo.wjoin(f)):
1953 if os.path.exists(repo.wjoin(f)):
1947 slowpath = True
1954 slowpath = True
1948 continue
1955 continue
1949 else:
1956 else:
1950 raise util.Abort(_('cannot follow file not in parent '
1957 raise util.Abort(_('cannot follow file not in parent '
1951 'revision: "%s"') % f)
1958 'revision: "%s"') % f)
1952 filelog = repo.file(f)
1959 filelog = repo.file(f)
1953 if not filelog:
1960 if not filelog:
1954 # A zero count may be a directory or deleted file, so
1961 # A zero count may be a directory or deleted file, so
1955 # try to find matching entries on the slow path.
1962 # try to find matching entries on the slow path.
1956 if follow:
1963 if follow:
1957 raise util.Abort(
1964 raise util.Abort(
1958 _('cannot follow nonexistent file: "%s"') % f)
1965 _('cannot follow nonexistent file: "%s"') % f)
1959 slowpath = True
1966 slowpath = True
1960
1967
1961 # We decided to fall back to the slowpath because at least one
1968 # We decided to fall back to the slowpath because at least one
1962 # of the paths was not a file. Check to see if at least one of them
1969 # of the paths was not a file. Check to see if at least one of them
1963 # existed in history - in that case, we'll continue down the
1970 # existed in history - in that case, we'll continue down the
1964 # slowpath; otherwise, we can turn off the slowpath
1971 # slowpath; otherwise, we can turn off the slowpath
1965 if slowpath:
1972 if slowpath:
1966 for path in match.files():
1973 for path in match.files():
1967 if path == '.' or path in repo.store:
1974 if path == '.' or path in repo.store:
1968 break
1975 break
1969 else:
1976 else:
1970 slowpath = False
1977 slowpath = False
1971
1978
1972 fpats = ('_patsfollow', '_patsfollowfirst')
1979 fpats = ('_patsfollow', '_patsfollowfirst')
1973 fnopats = (('_ancestors', '_fancestors'),
1980 fnopats = (('_ancestors', '_fancestors'),
1974 ('_descendants', '_fdescendants'))
1981 ('_descendants', '_fdescendants'))
1975 if slowpath:
1982 if slowpath:
1976 # See walkchangerevs() slow path.
1983 # See walkchangerevs() slow path.
1977 #
1984 #
1978 # pats/include/exclude cannot be represented as separate
1985 # pats/include/exclude cannot be represented as separate
1979 # revset expressions as their filtering logic applies at file
1986 # revset expressions as their filtering logic applies at file
1980 # level. For instance "-I a -X a" matches a revision touching
1987 # level. For instance "-I a -X a" matches a revision touching
1981 # "a" and "b" while "file(a) and not file(b)" does
1988 # "a" and "b" while "file(a) and not file(b)" does
1982 # not. Besides, filesets are evaluated against the working
1989 # not. Besides, filesets are evaluated against the working
1983 # directory.
1990 # directory.
1984 matchargs = ['r:', 'd:relpath']
1991 matchargs = ['r:', 'd:relpath']
1985 for p in pats:
1992 for p in pats:
1986 matchargs.append('p:' + p)
1993 matchargs.append('p:' + p)
1987 for p in opts.get('include', []):
1994 for p in opts.get('include', []):
1988 matchargs.append('i:' + p)
1995 matchargs.append('i:' + p)
1989 for p in opts.get('exclude', []):
1996 for p in opts.get('exclude', []):
1990 matchargs.append('x:' + p)
1997 matchargs.append('x:' + p)
1991 matchargs = ','.join(('%r' % p) for p in matchargs)
1998 matchargs = ','.join(('%r' % p) for p in matchargs)
1992 opts['_matchfiles'] = matchargs
1999 opts['_matchfiles'] = matchargs
1993 if follow:
2000 if follow:
1994 opts[fnopats[0][followfirst]] = '.'
2001 opts[fnopats[0][followfirst]] = '.'
1995 else:
2002 else:
1996 if follow:
2003 if follow:
1997 if pats:
2004 if pats:
1998 # follow() revset interprets its file argument as a
2005 # follow() revset interprets its file argument as a
1999 # manifest entry, so use match.files(), not pats.
2006 # manifest entry, so use match.files(), not pats.
2000 opts[fpats[followfirst]] = list(match.files())
2007 opts[fpats[followfirst]] = list(match.files())
2001 else:
2008 else:
2002 op = fnopats[followdescendants][followfirst]
2009 op = fnopats[followdescendants][followfirst]
2003 opts[op] = 'rev(%d)' % startrev
2010 opts[op] = 'rev(%d)' % startrev
2004 else:
2011 else:
2005 opts['_patslog'] = list(pats)
2012 opts['_patslog'] = list(pats)
2006
2013
2007 filematcher = None
2014 filematcher = None
2008 if opts.get('patch') or opts.get('stat'):
2015 if opts.get('patch') or opts.get('stat'):
2009 # When following files, track renames via a special matcher.
2016 # When following files, track renames via a special matcher.
2010 # If we're forced to take the slowpath it means we're following
2017 # If we're forced to take the slowpath it means we're following
2011 # at least one pattern/directory, so don't bother with rename tracking.
2018 # at least one pattern/directory, so don't bother with rename tracking.
2012 if follow and not match.always() and not slowpath:
2019 if follow and not match.always() and not slowpath:
2013 # _makefollowlogfilematcher expects its files argument to be
2020 # _makefollowlogfilematcher expects its files argument to be
2014 # relative to the repo root, so use match.files(), not pats.
2021 # relative to the repo root, so use match.files(), not pats.
2015 filematcher = _makefollowlogfilematcher(repo, match.files(),
2022 filematcher = _makefollowlogfilematcher(repo, match.files(),
2016 followfirst)
2023 followfirst)
2017 else:
2024 else:
2018 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2025 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2019 if filematcher is None:
2026 if filematcher is None:
2020 filematcher = lambda rev: match
2027 filematcher = lambda rev: match
2021
2028
2022 expr = []
2029 expr = []
2023 for op, val in sorted(opts.iteritems()):
2030 for op, val in sorted(opts.iteritems()):
2024 if not val:
2031 if not val:
2025 continue
2032 continue
2026 if op not in opt2revset:
2033 if op not in opt2revset:
2027 continue
2034 continue
2028 revop, andor = opt2revset[op]
2035 revop, andor = opt2revset[op]
2029 if '%(val)' not in revop:
2036 if '%(val)' not in revop:
2030 expr.append(revop)
2037 expr.append(revop)
2031 else:
2038 else:
2032 if not isinstance(val, list):
2039 if not isinstance(val, list):
2033 e = revop % {'val': val}
2040 e = revop % {'val': val}
2034 else:
2041 else:
2035 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2042 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2036 expr.append(e)
2043 expr.append(e)
2037
2044
2038 if expr:
2045 if expr:
2039 expr = '(' + ' and '.join(expr) + ')'
2046 expr = '(' + ' and '.join(expr) + ')'
2040 else:
2047 else:
2041 expr = None
2048 expr = None
2042 return expr, filematcher
2049 return expr, filematcher
2043
2050
2044 def _logrevs(repo, opts):
2051 def _logrevs(repo, opts):
2045 # Default --rev value depends on --follow but --follow behavior
2052 # Default --rev value depends on --follow but --follow behavior
2046 # depends on revisions resolved from --rev...
2053 # depends on revisions resolved from --rev...
2047 follow = opts.get('follow') or opts.get('follow_first')
2054 follow = opts.get('follow') or opts.get('follow_first')
2048 if opts.get('rev'):
2055 if opts.get('rev'):
2049 revs = scmutil.revrange(repo, opts['rev'])
2056 revs = scmutil.revrange(repo, opts['rev'])
2050 elif follow and repo.dirstate.p1() == nullid:
2057 elif follow and repo.dirstate.p1() == nullid:
2051 revs = revset.baseset()
2058 revs = revset.baseset()
2052 elif follow:
2059 elif follow:
2053 revs = repo.revs('reverse(:.)')
2060 revs = repo.revs('reverse(:.)')
2054 else:
2061 else:
2055 revs = revset.spanset(repo)
2062 revs = revset.spanset(repo)
2056 revs.reverse()
2063 revs.reverse()
2057 return revs
2064 return revs
2058
2065
2059 def getgraphlogrevs(repo, pats, opts):
2066 def getgraphlogrevs(repo, pats, opts):
2060 """Return (revs, expr, filematcher) where revs is an iterable of
2067 """Return (revs, expr, filematcher) where revs is an iterable of
2061 revision numbers, expr is a revset string built from log options
2068 revision numbers, expr is a revset string built from log options
2062 and file patterns or None, and used to filter 'revs'. If --stat or
2069 and file patterns or None, and used to filter 'revs'. If --stat or
2063 --patch are not passed filematcher is None. Otherwise it is a
2070 --patch are not passed filematcher is None. Otherwise it is a
2064 callable taking a revision number and returning a match objects
2071 callable taking a revision number and returning a match objects
2065 filtering the files to be detailed when displaying the revision.
2072 filtering the files to be detailed when displaying the revision.
2066 """
2073 """
2067 limit = loglimit(opts)
2074 limit = loglimit(opts)
2068 revs = _logrevs(repo, opts)
2075 revs = _logrevs(repo, opts)
2069 if not revs:
2076 if not revs:
2070 return revset.baseset(), None, None
2077 return revset.baseset(), None, None
2071 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2078 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2072 if opts.get('rev'):
2079 if opts.get('rev'):
2073 # User-specified revs might be unsorted, but don't sort before
2080 # User-specified revs might be unsorted, but don't sort before
2074 # _makelogrevset because it might depend on the order of revs
2081 # _makelogrevset because it might depend on the order of revs
2075 revs.sort(reverse=True)
2082 revs.sort(reverse=True)
2076 if expr:
2083 if expr:
2077 # Revset matchers often operate faster on revisions in changelog
2084 # Revset matchers often operate faster on revisions in changelog
2078 # order, because most filters deal with the changelog.
2085 # order, because most filters deal with the changelog.
2079 revs.reverse()
2086 revs.reverse()
2080 matcher = revset.match(repo.ui, expr)
2087 matcher = revset.match(repo.ui, expr)
2081 # Revset matches can reorder revisions. "A or B" typically returns
2088 # Revset matches can reorder revisions. "A or B" typically returns
2082 # returns the revision matching A then the revision matching B. Sort
2089 # returns the revision matching A then the revision matching B. Sort
2083 # again to fix that.
2090 # again to fix that.
2084 revs = matcher(repo, revs)
2091 revs = matcher(repo, revs)
2085 revs.sort(reverse=True)
2092 revs.sort(reverse=True)
2086 if limit is not None:
2093 if limit is not None:
2087 limitedrevs = []
2094 limitedrevs = []
2088 for idx, rev in enumerate(revs):
2095 for idx, rev in enumerate(revs):
2089 if idx >= limit:
2096 if idx >= limit:
2090 break
2097 break
2091 limitedrevs.append(rev)
2098 limitedrevs.append(rev)
2092 revs = revset.baseset(limitedrevs)
2099 revs = revset.baseset(limitedrevs)
2093
2100
2094 return revs, expr, filematcher
2101 return revs, expr, filematcher
2095
2102
2096 def getlogrevs(repo, pats, opts):
2103 def getlogrevs(repo, pats, opts):
2097 """Return (revs, expr, filematcher) where revs is an iterable of
2104 """Return (revs, expr, filematcher) where revs is an iterable of
2098 revision numbers, expr is a revset string built from log options
2105 revision numbers, expr is a revset string built from log options
2099 and file patterns or None, and used to filter 'revs'. If --stat or
2106 and file patterns or None, and used to filter 'revs'. If --stat or
2100 --patch are not passed filematcher is None. Otherwise it is a
2107 --patch are not passed filematcher is None. Otherwise it is a
2101 callable taking a revision number and returning a match objects
2108 callable taking a revision number and returning a match objects
2102 filtering the files to be detailed when displaying the revision.
2109 filtering the files to be detailed when displaying the revision.
2103 """
2110 """
2104 limit = loglimit(opts)
2111 limit = loglimit(opts)
2105 revs = _logrevs(repo, opts)
2112 revs = _logrevs(repo, opts)
2106 if not revs:
2113 if not revs:
2107 return revset.baseset([]), None, None
2114 return revset.baseset([]), None, None
2108 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2115 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2109 if expr:
2116 if expr:
2110 # Revset matchers often operate faster on revisions in changelog
2117 # Revset matchers often operate faster on revisions in changelog
2111 # order, because most filters deal with the changelog.
2118 # order, because most filters deal with the changelog.
2112 if not opts.get('rev'):
2119 if not opts.get('rev'):
2113 revs.reverse()
2120 revs.reverse()
2114 matcher = revset.match(repo.ui, expr)
2121 matcher = revset.match(repo.ui, expr)
2115 # Revset matches can reorder revisions. "A or B" typically returns
2122 # Revset matches can reorder revisions. "A or B" typically returns
2116 # returns the revision matching A then the revision matching B. Sort
2123 # returns the revision matching A then the revision matching B. Sort
2117 # again to fix that.
2124 # again to fix that.
2118 revs = matcher(repo, revs)
2125 revs = matcher(repo, revs)
2119 if not opts.get('rev'):
2126 if not opts.get('rev'):
2120 revs.sort(reverse=True)
2127 revs.sort(reverse=True)
2121 if limit is not None:
2128 if limit is not None:
2122 limitedrevs = []
2129 limitedrevs = []
2123 for idx, r in enumerate(revs):
2130 for idx, r in enumerate(revs):
2124 if limit <= idx:
2131 if limit <= idx:
2125 break
2132 break
2126 limitedrevs.append(r)
2133 limitedrevs.append(r)
2127 revs = revset.baseset(limitedrevs)
2134 revs = revset.baseset(limitedrevs)
2128
2135
2129 return revs, expr, filematcher
2136 return revs, expr, filematcher
2130
2137
2131 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2138 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2132 filematcher=None):
2139 filematcher=None):
2133 seen, state = [], graphmod.asciistate()
2140 seen, state = [], graphmod.asciistate()
2134 for rev, type, ctx, parents in dag:
2141 for rev, type, ctx, parents in dag:
2135 char = 'o'
2142 char = 'o'
2136 if ctx.node() in showparents:
2143 if ctx.node() in showparents:
2137 char = '@'
2144 char = '@'
2138 elif ctx.obsolete():
2145 elif ctx.obsolete():
2139 char = 'x'
2146 char = 'x'
2140 elif ctx.closesbranch():
2147 elif ctx.closesbranch():
2141 char = '_'
2148 char = '_'
2142 copies = None
2149 copies = None
2143 if getrenamed and ctx.rev():
2150 if getrenamed and ctx.rev():
2144 copies = []
2151 copies = []
2145 for fn in ctx.files():
2152 for fn in ctx.files():
2146 rename = getrenamed(fn, ctx.rev())
2153 rename = getrenamed(fn, ctx.rev())
2147 if rename:
2154 if rename:
2148 copies.append((fn, rename[0]))
2155 copies.append((fn, rename[0]))
2149 revmatchfn = None
2156 revmatchfn = None
2150 if filematcher is not None:
2157 if filematcher is not None:
2151 revmatchfn = filematcher(ctx.rev())
2158 revmatchfn = filematcher(ctx.rev())
2152 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2159 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2153 lines = displayer.hunk.pop(rev).split('\n')
2160 lines = displayer.hunk.pop(rev).split('\n')
2154 if not lines[-1]:
2161 if not lines[-1]:
2155 del lines[-1]
2162 del lines[-1]
2156 displayer.flush(ctx)
2163 displayer.flush(ctx)
2157 edges = edgefn(type, char, lines, seen, rev, parents)
2164 edges = edgefn(type, char, lines, seen, rev, parents)
2158 for type, char, lines, coldata in edges:
2165 for type, char, lines, coldata in edges:
2159 graphmod.ascii(ui, state, type, char, lines, coldata)
2166 graphmod.ascii(ui, state, type, char, lines, coldata)
2160 displayer.close()
2167 displayer.close()
2161
2168
2162 def graphlog(ui, repo, *pats, **opts):
2169 def graphlog(ui, repo, *pats, **opts):
2163 # Parameters are identical to log command ones
2170 # Parameters are identical to log command ones
2164 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2171 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2165 revdag = graphmod.dagwalker(repo, revs)
2172 revdag = graphmod.dagwalker(repo, revs)
2166
2173
2167 getrenamed = None
2174 getrenamed = None
2168 if opts.get('copies'):
2175 if opts.get('copies'):
2169 endrev = None
2176 endrev = None
2170 if opts.get('rev'):
2177 if opts.get('rev'):
2171 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2178 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2172 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2179 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2173 displayer = show_changeset(ui, repo, opts, buffered=True)
2180 displayer = show_changeset(ui, repo, opts, buffered=True)
2174 showparents = [ctx.node() for ctx in repo[None].parents()]
2181 showparents = [ctx.node() for ctx in repo[None].parents()]
2175 displaygraph(ui, revdag, displayer, showparents,
2182 displaygraph(ui, revdag, displayer, showparents,
2176 graphmod.asciiedges, getrenamed, filematcher)
2183 graphmod.asciiedges, getrenamed, filematcher)
2177
2184
2178 def checkunsupportedgraphflags(pats, opts):
2185 def checkunsupportedgraphflags(pats, opts):
2179 for op in ["newest_first"]:
2186 for op in ["newest_first"]:
2180 if op in opts and opts[op]:
2187 if op in opts and opts[op]:
2181 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2188 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2182 % op.replace("_", "-"))
2189 % op.replace("_", "-"))
2183
2190
2184 def graphrevs(repo, nodes, opts):
2191 def graphrevs(repo, nodes, opts):
2185 limit = loglimit(opts)
2192 limit = loglimit(opts)
2186 nodes.reverse()
2193 nodes.reverse()
2187 if limit is not None:
2194 if limit is not None:
2188 nodes = nodes[:limit]
2195 nodes = nodes[:limit]
2189 return graphmod.nodes(repo, nodes)
2196 return graphmod.nodes(repo, nodes)
2190
2197
2191 def add(ui, repo, match, prefix, explicitonly, **opts):
2198 def add(ui, repo, match, prefix, explicitonly, **opts):
2192 join = lambda f: os.path.join(prefix, f)
2199 join = lambda f: os.path.join(prefix, f)
2193 bad = []
2200 bad = []
2194
2201
2195 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2202 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2196 names = []
2203 names = []
2197 wctx = repo[None]
2204 wctx = repo[None]
2198 cca = None
2205 cca = None
2199 abort, warn = scmutil.checkportabilityalert(ui)
2206 abort, warn = scmutil.checkportabilityalert(ui)
2200 if abort or warn:
2207 if abort or warn:
2201 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2208 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2202
2209
2203 badmatch = matchmod.badmatch(match, badfn)
2210 badmatch = matchmod.badmatch(match, badfn)
2204 dirstate = repo.dirstate
2211 dirstate = repo.dirstate
2205 # We don't want to just call wctx.walk here, since it would return a lot of
2212 # We don't want to just call wctx.walk here, since it would return a lot of
2206 # clean files, which we aren't interested in and takes time.
2213 # clean files, which we aren't interested in and takes time.
2207 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2214 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2208 True, False, full=False)):
2215 True, False, full=False)):
2209 exact = match.exact(f)
2216 exact = match.exact(f)
2210 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2217 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2211 if cca:
2218 if cca:
2212 cca(f)
2219 cca(f)
2213 names.append(f)
2220 names.append(f)
2214 if ui.verbose or not exact:
2221 if ui.verbose or not exact:
2215 ui.status(_('adding %s\n') % match.rel(f))
2222 ui.status(_('adding %s\n') % match.rel(f))
2216
2223
2217 for subpath in sorted(wctx.substate):
2224 for subpath in sorted(wctx.substate):
2218 sub = wctx.sub(subpath)
2225 sub = wctx.sub(subpath)
2219 try:
2226 try:
2220 submatch = matchmod.narrowmatcher(subpath, match)
2227 submatch = matchmod.narrowmatcher(subpath, match)
2221 if opts.get('subrepos'):
2228 if opts.get('subrepos'):
2222 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2229 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2223 else:
2230 else:
2224 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2231 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2225 except error.LookupError:
2232 except error.LookupError:
2226 ui.status(_("skipping missing subrepository: %s\n")
2233 ui.status(_("skipping missing subrepository: %s\n")
2227 % join(subpath))
2234 % join(subpath))
2228
2235
2229 if not opts.get('dry_run'):
2236 if not opts.get('dry_run'):
2230 rejected = wctx.add(names, prefix)
2237 rejected = wctx.add(names, prefix)
2231 bad.extend(f for f in rejected if f in match.files())
2238 bad.extend(f for f in rejected if f in match.files())
2232 return bad
2239 return bad
2233
2240
2234 def forget(ui, repo, match, prefix, explicitonly):
2241 def forget(ui, repo, match, prefix, explicitonly):
2235 join = lambda f: os.path.join(prefix, f)
2242 join = lambda f: os.path.join(prefix, f)
2236 bad = []
2243 bad = []
2237 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2244 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2238 wctx = repo[None]
2245 wctx = repo[None]
2239 forgot = []
2246 forgot = []
2240
2247
2241 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2248 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2242 forget = sorted(s[0] + s[1] + s[3] + s[6])
2249 forget = sorted(s[0] + s[1] + s[3] + s[6])
2243 if explicitonly:
2250 if explicitonly:
2244 forget = [f for f in forget if match.exact(f)]
2251 forget = [f for f in forget if match.exact(f)]
2245
2252
2246 for subpath in sorted(wctx.substate):
2253 for subpath in sorted(wctx.substate):
2247 sub = wctx.sub(subpath)
2254 sub = wctx.sub(subpath)
2248 try:
2255 try:
2249 submatch = matchmod.narrowmatcher(subpath, match)
2256 submatch = matchmod.narrowmatcher(subpath, match)
2250 subbad, subforgot = sub.forget(submatch, prefix)
2257 subbad, subforgot = sub.forget(submatch, prefix)
2251 bad.extend([subpath + '/' + f for f in subbad])
2258 bad.extend([subpath + '/' + f for f in subbad])
2252 forgot.extend([subpath + '/' + f for f in subforgot])
2259 forgot.extend([subpath + '/' + f for f in subforgot])
2253 except error.LookupError:
2260 except error.LookupError:
2254 ui.status(_("skipping missing subrepository: %s\n")
2261 ui.status(_("skipping missing subrepository: %s\n")
2255 % join(subpath))
2262 % join(subpath))
2256
2263
2257 if not explicitonly:
2264 if not explicitonly:
2258 for f in match.files():
2265 for f in match.files():
2259 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2266 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2260 if f not in forgot:
2267 if f not in forgot:
2261 if repo.wvfs.exists(f):
2268 if repo.wvfs.exists(f):
2262 # Don't complain if the exact case match wasn't given.
2269 # Don't complain if the exact case match wasn't given.
2263 # But don't do this until after checking 'forgot', so
2270 # But don't do this until after checking 'forgot', so
2264 # that subrepo files aren't normalized, and this op is
2271 # that subrepo files aren't normalized, and this op is
2265 # purely from data cached by the status walk above.
2272 # purely from data cached by the status walk above.
2266 if repo.dirstate.normalize(f) in repo.dirstate:
2273 if repo.dirstate.normalize(f) in repo.dirstate:
2267 continue
2274 continue
2268 ui.warn(_('not removing %s: '
2275 ui.warn(_('not removing %s: '
2269 'file is already untracked\n')
2276 'file is already untracked\n')
2270 % match.rel(f))
2277 % match.rel(f))
2271 bad.append(f)
2278 bad.append(f)
2272
2279
2273 for f in forget:
2280 for f in forget:
2274 if ui.verbose or not match.exact(f):
2281 if ui.verbose or not match.exact(f):
2275 ui.status(_('removing %s\n') % match.rel(f))
2282 ui.status(_('removing %s\n') % match.rel(f))
2276
2283
2277 rejected = wctx.forget(forget, prefix)
2284 rejected = wctx.forget(forget, prefix)
2278 bad.extend(f for f in rejected if f in match.files())
2285 bad.extend(f for f in rejected if f in match.files())
2279 forgot.extend(f for f in forget if f not in rejected)
2286 forgot.extend(f for f in forget if f not in rejected)
2280 return bad, forgot
2287 return bad, forgot
2281
2288
2282 def files(ui, ctx, m, fm, fmt, subrepos):
2289 def files(ui, ctx, m, fm, fmt, subrepos):
2283 rev = ctx.rev()
2290 rev = ctx.rev()
2284 ret = 1
2291 ret = 1
2285 ds = ctx.repo().dirstate
2292 ds = ctx.repo().dirstate
2286
2293
2287 for f in ctx.matches(m):
2294 for f in ctx.matches(m):
2288 if rev is None and ds[f] == 'r':
2295 if rev is None and ds[f] == 'r':
2289 continue
2296 continue
2290 fm.startitem()
2297 fm.startitem()
2291 if ui.verbose:
2298 if ui.verbose:
2292 fc = ctx[f]
2299 fc = ctx[f]
2293 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2300 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2294 fm.data(abspath=f)
2301 fm.data(abspath=f)
2295 fm.write('path', fmt, m.rel(f))
2302 fm.write('path', fmt, m.rel(f))
2296 ret = 0
2303 ret = 0
2297
2304
2298 for subpath in sorted(ctx.substate):
2305 for subpath in sorted(ctx.substate):
2299 def matchessubrepo(subpath):
2306 def matchessubrepo(subpath):
2300 return (m.always() or m.exact(subpath)
2307 return (m.always() or m.exact(subpath)
2301 or any(f.startswith(subpath + '/') for f in m.files()))
2308 or any(f.startswith(subpath + '/') for f in m.files()))
2302
2309
2303 if subrepos or matchessubrepo(subpath):
2310 if subrepos or matchessubrepo(subpath):
2304 sub = ctx.sub(subpath)
2311 sub = ctx.sub(subpath)
2305 try:
2312 try:
2306 submatch = matchmod.narrowmatcher(subpath, m)
2313 submatch = matchmod.narrowmatcher(subpath, m)
2307 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2314 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2308 ret = 0
2315 ret = 0
2309 except error.LookupError:
2316 except error.LookupError:
2310 ui.status(_("skipping missing subrepository: %s\n")
2317 ui.status(_("skipping missing subrepository: %s\n")
2311 % m.abs(subpath))
2318 % m.abs(subpath))
2312
2319
2313 return ret
2320 return ret
2314
2321
2315 def remove(ui, repo, m, prefix, after, force, subrepos):
2322 def remove(ui, repo, m, prefix, after, force, subrepos):
2316 join = lambda f: os.path.join(prefix, f)
2323 join = lambda f: os.path.join(prefix, f)
2317 ret = 0
2324 ret = 0
2318 s = repo.status(match=m, clean=True)
2325 s = repo.status(match=m, clean=True)
2319 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2326 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2320
2327
2321 wctx = repo[None]
2328 wctx = repo[None]
2322
2329
2323 for subpath in sorted(wctx.substate):
2330 for subpath in sorted(wctx.substate):
2324 def matchessubrepo(matcher, subpath):
2331 def matchessubrepo(matcher, subpath):
2325 if matcher.exact(subpath):
2332 if matcher.exact(subpath):
2326 return True
2333 return True
2327 for f in matcher.files():
2334 for f in matcher.files():
2328 if f.startswith(subpath):
2335 if f.startswith(subpath):
2329 return True
2336 return True
2330 return False
2337 return False
2331
2338
2332 if subrepos or matchessubrepo(m, subpath):
2339 if subrepos or matchessubrepo(m, subpath):
2333 sub = wctx.sub(subpath)
2340 sub = wctx.sub(subpath)
2334 try:
2341 try:
2335 submatch = matchmod.narrowmatcher(subpath, m)
2342 submatch = matchmod.narrowmatcher(subpath, m)
2336 if sub.removefiles(submatch, prefix, after, force, subrepos):
2343 if sub.removefiles(submatch, prefix, after, force, subrepos):
2337 ret = 1
2344 ret = 1
2338 except error.LookupError:
2345 except error.LookupError:
2339 ui.status(_("skipping missing subrepository: %s\n")
2346 ui.status(_("skipping missing subrepository: %s\n")
2340 % join(subpath))
2347 % join(subpath))
2341
2348
2342 # warn about failure to delete explicit files/dirs
2349 # warn about failure to delete explicit files/dirs
2343 deleteddirs = util.dirs(deleted)
2350 deleteddirs = util.dirs(deleted)
2344 for f in m.files():
2351 for f in m.files():
2345 def insubrepo():
2352 def insubrepo():
2346 for subpath in wctx.substate:
2353 for subpath in wctx.substate:
2347 if f.startswith(subpath):
2354 if f.startswith(subpath):
2348 return True
2355 return True
2349 return False
2356 return False
2350
2357
2351 isdir = f in deleteddirs or wctx.hasdir(f)
2358 isdir = f in deleteddirs or wctx.hasdir(f)
2352 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2359 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2353 continue
2360 continue
2354
2361
2355 if repo.wvfs.exists(f):
2362 if repo.wvfs.exists(f):
2356 if repo.wvfs.isdir(f):
2363 if repo.wvfs.isdir(f):
2357 ui.warn(_('not removing %s: no tracked files\n')
2364 ui.warn(_('not removing %s: no tracked files\n')
2358 % m.rel(f))
2365 % m.rel(f))
2359 else:
2366 else:
2360 ui.warn(_('not removing %s: file is untracked\n')
2367 ui.warn(_('not removing %s: file is untracked\n')
2361 % m.rel(f))
2368 % m.rel(f))
2362 # missing files will generate a warning elsewhere
2369 # missing files will generate a warning elsewhere
2363 ret = 1
2370 ret = 1
2364
2371
2365 if force:
2372 if force:
2366 list = modified + deleted + clean + added
2373 list = modified + deleted + clean + added
2367 elif after:
2374 elif after:
2368 list = deleted
2375 list = deleted
2369 for f in modified + added + clean:
2376 for f in modified + added + clean:
2370 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2377 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2371 ret = 1
2378 ret = 1
2372 else:
2379 else:
2373 list = deleted + clean
2380 list = deleted + clean
2374 for f in modified:
2381 for f in modified:
2375 ui.warn(_('not removing %s: file is modified (use -f'
2382 ui.warn(_('not removing %s: file is modified (use -f'
2376 ' to force removal)\n') % m.rel(f))
2383 ' to force removal)\n') % m.rel(f))
2377 ret = 1
2384 ret = 1
2378 for f in added:
2385 for f in added:
2379 ui.warn(_('not removing %s: file has been marked for add'
2386 ui.warn(_('not removing %s: file has been marked for add'
2380 ' (use forget to undo)\n') % m.rel(f))
2387 ' (use forget to undo)\n') % m.rel(f))
2381 ret = 1
2388 ret = 1
2382
2389
2383 for f in sorted(list):
2390 for f in sorted(list):
2384 if ui.verbose or not m.exact(f):
2391 if ui.verbose or not m.exact(f):
2385 ui.status(_('removing %s\n') % m.rel(f))
2392 ui.status(_('removing %s\n') % m.rel(f))
2386
2393
2387 wlock = repo.wlock()
2394 wlock = repo.wlock()
2388 try:
2395 try:
2389 if not after:
2396 if not after:
2390 for f in list:
2397 for f in list:
2391 if f in added:
2398 if f in added:
2392 continue # we never unlink added files on remove
2399 continue # we never unlink added files on remove
2393 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2400 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2394 repo[None].forget(list)
2401 repo[None].forget(list)
2395 finally:
2402 finally:
2396 wlock.release()
2403 wlock.release()
2397
2404
2398 return ret
2405 return ret
2399
2406
2400 def cat(ui, repo, ctx, matcher, prefix, **opts):
2407 def cat(ui, repo, ctx, matcher, prefix, **opts):
2401 err = 1
2408 err = 1
2402
2409
2403 def write(path):
2410 def write(path):
2404 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2411 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2405 pathname=os.path.join(prefix, path))
2412 pathname=os.path.join(prefix, path))
2406 data = ctx[path].data()
2413 data = ctx[path].data()
2407 if opts.get('decode'):
2414 if opts.get('decode'):
2408 data = repo.wwritedata(path, data)
2415 data = repo.wwritedata(path, data)
2409 fp.write(data)
2416 fp.write(data)
2410 fp.close()
2417 fp.close()
2411
2418
2412 # Automation often uses hg cat on single files, so special case it
2419 # Automation often uses hg cat on single files, so special case it
2413 # for performance to avoid the cost of parsing the manifest.
2420 # for performance to avoid the cost of parsing the manifest.
2414 if len(matcher.files()) == 1 and not matcher.anypats():
2421 if len(matcher.files()) == 1 and not matcher.anypats():
2415 file = matcher.files()[0]
2422 file = matcher.files()[0]
2416 mf = repo.manifest
2423 mf = repo.manifest
2417 mfnode = ctx.manifestnode()
2424 mfnode = ctx.manifestnode()
2418 if mfnode and mf.find(mfnode, file)[0]:
2425 if mfnode and mf.find(mfnode, file)[0]:
2419 write(file)
2426 write(file)
2420 return 0
2427 return 0
2421
2428
2422 # Don't warn about "missing" files that are really in subrepos
2429 # Don't warn about "missing" files that are really in subrepos
2423 def badfn(path, msg):
2430 def badfn(path, msg):
2424 for subpath in ctx.substate:
2431 for subpath in ctx.substate:
2425 if path.startswith(subpath):
2432 if path.startswith(subpath):
2426 return
2433 return
2427 matcher.bad(path, msg)
2434 matcher.bad(path, msg)
2428
2435
2429 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2436 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2430 write(abs)
2437 write(abs)
2431 err = 0
2438 err = 0
2432
2439
2433 for subpath in sorted(ctx.substate):
2440 for subpath in sorted(ctx.substate):
2434 sub = ctx.sub(subpath)
2441 sub = ctx.sub(subpath)
2435 try:
2442 try:
2436 submatch = matchmod.narrowmatcher(subpath, matcher)
2443 submatch = matchmod.narrowmatcher(subpath, matcher)
2437
2444
2438 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2445 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2439 **opts):
2446 **opts):
2440 err = 0
2447 err = 0
2441 except error.RepoLookupError:
2448 except error.RepoLookupError:
2442 ui.status(_("skipping missing subrepository: %s\n")
2449 ui.status(_("skipping missing subrepository: %s\n")
2443 % os.path.join(prefix, subpath))
2450 % os.path.join(prefix, subpath))
2444
2451
2445 return err
2452 return err
2446
2453
2447 def commit(ui, repo, commitfunc, pats, opts):
2454 def commit(ui, repo, commitfunc, pats, opts):
2448 '''commit the specified files or all outstanding changes'''
2455 '''commit the specified files or all outstanding changes'''
2449 date = opts.get('date')
2456 date = opts.get('date')
2450 if date:
2457 if date:
2451 opts['date'] = util.parsedate(date)
2458 opts['date'] = util.parsedate(date)
2452 message = logmessage(ui, opts)
2459 message = logmessage(ui, opts)
2453 matcher = scmutil.match(repo[None], pats, opts)
2460 matcher = scmutil.match(repo[None], pats, opts)
2454
2461
2455 # extract addremove carefully -- this function can be called from a command
2462 # extract addremove carefully -- this function can be called from a command
2456 # that doesn't support addremove
2463 # that doesn't support addremove
2457 if opts.get('addremove'):
2464 if opts.get('addremove'):
2458 if scmutil.addremove(repo, matcher, "", opts) != 0:
2465 if scmutil.addremove(repo, matcher, "", opts) != 0:
2459 raise util.Abort(
2466 raise util.Abort(
2460 _("failed to mark all new/missing files as added/removed"))
2467 _("failed to mark all new/missing files as added/removed"))
2461
2468
2462 return commitfunc(ui, repo, message, matcher, opts)
2469 return commitfunc(ui, repo, message, matcher, opts)
2463
2470
2464 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2471 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2465 # avoid cycle context -> subrepo -> cmdutil
2472 # avoid cycle context -> subrepo -> cmdutil
2466 import context
2473 import context
2467
2474
2468 # amend will reuse the existing user if not specified, but the obsolete
2475 # amend will reuse the existing user if not specified, but the obsolete
2469 # marker creation requires that the current user's name is specified.
2476 # marker creation requires that the current user's name is specified.
2470 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2477 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2471 ui.username() # raise exception if username not set
2478 ui.username() # raise exception if username not set
2472
2479
2473 ui.note(_('amending changeset %s\n') % old)
2480 ui.note(_('amending changeset %s\n') % old)
2474 base = old.p1()
2481 base = old.p1()
2475 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2482 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2476
2483
2477 wlock = dsguard = lock = newid = None
2484 wlock = dsguard = lock = newid = None
2478 try:
2485 try:
2479 wlock = repo.wlock()
2486 wlock = repo.wlock()
2480 dsguard = dirstateguard(repo, 'amend')
2487 dsguard = dirstateguard(repo, 'amend')
2481 lock = repo.lock()
2488 lock = repo.lock()
2482 tr = repo.transaction('amend')
2489 tr = repo.transaction('amend')
2483 try:
2490 try:
2484 # See if we got a message from -m or -l, if not, open the editor
2491 # See if we got a message from -m or -l, if not, open the editor
2485 # with the message of the changeset to amend
2492 # with the message of the changeset to amend
2486 message = logmessage(ui, opts)
2493 message = logmessage(ui, opts)
2487 # ensure logfile does not conflict with later enforcement of the
2494 # ensure logfile does not conflict with later enforcement of the
2488 # message. potential logfile content has been processed by
2495 # message. potential logfile content has been processed by
2489 # `logmessage` anyway.
2496 # `logmessage` anyway.
2490 opts.pop('logfile')
2497 opts.pop('logfile')
2491 # First, do a regular commit to record all changes in the working
2498 # First, do a regular commit to record all changes in the working
2492 # directory (if there are any)
2499 # directory (if there are any)
2493 ui.callhooks = False
2500 ui.callhooks = False
2494 activebookmark = repo._activebookmark
2501 activebookmark = repo._activebookmark
2495 try:
2502 try:
2496 repo._activebookmark = None
2503 repo._activebookmark = None
2497 opts['message'] = 'temporary amend commit for %s' % old
2504 opts['message'] = 'temporary amend commit for %s' % old
2498 node = commit(ui, repo, commitfunc, pats, opts)
2505 node = commit(ui, repo, commitfunc, pats, opts)
2499 finally:
2506 finally:
2500 repo._activebookmark = activebookmark
2507 repo._activebookmark = activebookmark
2501 ui.callhooks = True
2508 ui.callhooks = True
2502 ctx = repo[node]
2509 ctx = repo[node]
2503
2510
2504 # Participating changesets:
2511 # Participating changesets:
2505 #
2512 #
2506 # node/ctx o - new (intermediate) commit that contains changes
2513 # node/ctx o - new (intermediate) commit that contains changes
2507 # | from working dir to go into amending commit
2514 # | from working dir to go into amending commit
2508 # | (or a workingctx if there were no changes)
2515 # | (or a workingctx if there were no changes)
2509 # |
2516 # |
2510 # old o - changeset to amend
2517 # old o - changeset to amend
2511 # |
2518 # |
2512 # base o - parent of amending changeset
2519 # base o - parent of amending changeset
2513
2520
2514 # Update extra dict from amended commit (e.g. to preserve graft
2521 # Update extra dict from amended commit (e.g. to preserve graft
2515 # source)
2522 # source)
2516 extra.update(old.extra())
2523 extra.update(old.extra())
2517
2524
2518 # Also update it from the intermediate commit or from the wctx
2525 # Also update it from the intermediate commit or from the wctx
2519 extra.update(ctx.extra())
2526 extra.update(ctx.extra())
2520
2527
2521 if len(old.parents()) > 1:
2528 if len(old.parents()) > 1:
2522 # ctx.files() isn't reliable for merges, so fall back to the
2529 # ctx.files() isn't reliable for merges, so fall back to the
2523 # slower repo.status() method
2530 # slower repo.status() method
2524 files = set([fn for st in repo.status(base, old)[:3]
2531 files = set([fn for st in repo.status(base, old)[:3]
2525 for fn in st])
2532 for fn in st])
2526 else:
2533 else:
2527 files = set(old.files())
2534 files = set(old.files())
2528
2535
2529 # Second, we use either the commit we just did, or if there were no
2536 # Second, we use either the commit we just did, or if there were no
2530 # changes the parent of the working directory as the version of the
2537 # changes the parent of the working directory as the version of the
2531 # files in the final amend commit
2538 # files in the final amend commit
2532 if node:
2539 if node:
2533 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2540 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2534
2541
2535 user = ctx.user()
2542 user = ctx.user()
2536 date = ctx.date()
2543 date = ctx.date()
2537 # Recompute copies (avoid recording a -> b -> a)
2544 # Recompute copies (avoid recording a -> b -> a)
2538 copied = copies.pathcopies(base, ctx)
2545 copied = copies.pathcopies(base, ctx)
2539 if old.p2:
2546 if old.p2:
2540 copied.update(copies.pathcopies(old.p2(), ctx))
2547 copied.update(copies.pathcopies(old.p2(), ctx))
2541
2548
2542 # Prune files which were reverted by the updates: if old
2549 # Prune files which were reverted by the updates: if old
2543 # introduced file X and our intermediate commit, node,
2550 # introduced file X and our intermediate commit, node,
2544 # renamed that file, then those two files are the same and
2551 # renamed that file, then those two files are the same and
2545 # we can discard X from our list of files. Likewise if X
2552 # we can discard X from our list of files. Likewise if X
2546 # was deleted, it's no longer relevant
2553 # was deleted, it's no longer relevant
2547 files.update(ctx.files())
2554 files.update(ctx.files())
2548
2555
2549 def samefile(f):
2556 def samefile(f):
2550 if f in ctx.manifest():
2557 if f in ctx.manifest():
2551 a = ctx.filectx(f)
2558 a = ctx.filectx(f)
2552 if f in base.manifest():
2559 if f in base.manifest():
2553 b = base.filectx(f)
2560 b = base.filectx(f)
2554 return (not a.cmp(b)
2561 return (not a.cmp(b)
2555 and a.flags() == b.flags())
2562 and a.flags() == b.flags())
2556 else:
2563 else:
2557 return False
2564 return False
2558 else:
2565 else:
2559 return f not in base.manifest()
2566 return f not in base.manifest()
2560 files = [f for f in files if not samefile(f)]
2567 files = [f for f in files if not samefile(f)]
2561
2568
2562 def filectxfn(repo, ctx_, path):
2569 def filectxfn(repo, ctx_, path):
2563 try:
2570 try:
2564 fctx = ctx[path]
2571 fctx = ctx[path]
2565 flags = fctx.flags()
2572 flags = fctx.flags()
2566 mctx = context.memfilectx(repo,
2573 mctx = context.memfilectx(repo,
2567 fctx.path(), fctx.data(),
2574 fctx.path(), fctx.data(),
2568 islink='l' in flags,
2575 islink='l' in flags,
2569 isexec='x' in flags,
2576 isexec='x' in flags,
2570 copied=copied.get(path))
2577 copied=copied.get(path))
2571 return mctx
2578 return mctx
2572 except KeyError:
2579 except KeyError:
2573 return None
2580 return None
2574 else:
2581 else:
2575 ui.note(_('copying changeset %s to %s\n') % (old, base))
2582 ui.note(_('copying changeset %s to %s\n') % (old, base))
2576
2583
2577 # Use version of files as in the old cset
2584 # Use version of files as in the old cset
2578 def filectxfn(repo, ctx_, path):
2585 def filectxfn(repo, ctx_, path):
2579 try:
2586 try:
2580 return old.filectx(path)
2587 return old.filectx(path)
2581 except KeyError:
2588 except KeyError:
2582 return None
2589 return None
2583
2590
2584 user = opts.get('user') or old.user()
2591 user = opts.get('user') or old.user()
2585 date = opts.get('date') or old.date()
2592 date = opts.get('date') or old.date()
2586 editform = mergeeditform(old, 'commit.amend')
2593 editform = mergeeditform(old, 'commit.amend')
2587 editor = getcommiteditor(editform=editform, **opts)
2594 editor = getcommiteditor(editform=editform, **opts)
2588 if not message:
2595 if not message:
2589 editor = getcommiteditor(edit=True, editform=editform)
2596 editor = getcommiteditor(edit=True, editform=editform)
2590 message = old.description()
2597 message = old.description()
2591
2598
2592 pureextra = extra.copy()
2599 pureextra = extra.copy()
2593 extra['amend_source'] = old.hex()
2600 extra['amend_source'] = old.hex()
2594
2601
2595 new = context.memctx(repo,
2602 new = context.memctx(repo,
2596 parents=[base.node(), old.p2().node()],
2603 parents=[base.node(), old.p2().node()],
2597 text=message,
2604 text=message,
2598 files=files,
2605 files=files,
2599 filectxfn=filectxfn,
2606 filectxfn=filectxfn,
2600 user=user,
2607 user=user,
2601 date=date,
2608 date=date,
2602 extra=extra,
2609 extra=extra,
2603 editor=editor)
2610 editor=editor)
2604
2611
2605 newdesc = changelog.stripdesc(new.description())
2612 newdesc = changelog.stripdesc(new.description())
2606 if ((not node)
2613 if ((not node)
2607 and newdesc == old.description()
2614 and newdesc == old.description()
2608 and user == old.user()
2615 and user == old.user()
2609 and date == old.date()
2616 and date == old.date()
2610 and pureextra == old.extra()):
2617 and pureextra == old.extra()):
2611 # nothing changed. continuing here would create a new node
2618 # nothing changed. continuing here would create a new node
2612 # anyway because of the amend_source noise.
2619 # anyway because of the amend_source noise.
2613 #
2620 #
2614 # This not what we expect from amend.
2621 # This not what we expect from amend.
2615 return old.node()
2622 return old.node()
2616
2623
2617 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2624 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2618 try:
2625 try:
2619 if opts.get('secret'):
2626 if opts.get('secret'):
2620 commitphase = 'secret'
2627 commitphase = 'secret'
2621 else:
2628 else:
2622 commitphase = old.phase()
2629 commitphase = old.phase()
2623 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2630 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2624 newid = repo.commitctx(new)
2631 newid = repo.commitctx(new)
2625 finally:
2632 finally:
2626 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2633 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2627 if newid != old.node():
2634 if newid != old.node():
2628 # Reroute the working copy parent to the new changeset
2635 # Reroute the working copy parent to the new changeset
2629 repo.setparents(newid, nullid)
2636 repo.setparents(newid, nullid)
2630
2637
2631 # Move bookmarks from old parent to amend commit
2638 # Move bookmarks from old parent to amend commit
2632 bms = repo.nodebookmarks(old.node())
2639 bms = repo.nodebookmarks(old.node())
2633 if bms:
2640 if bms:
2634 marks = repo._bookmarks
2641 marks = repo._bookmarks
2635 for bm in bms:
2642 for bm in bms:
2636 ui.debug('moving bookmarks %r from %s to %s\n' %
2643 ui.debug('moving bookmarks %r from %s to %s\n' %
2637 (marks, old.hex(), hex(newid)))
2644 (marks, old.hex(), hex(newid)))
2638 marks[bm] = newid
2645 marks[bm] = newid
2639 marks.recordchange(tr)
2646 marks.recordchange(tr)
2640 #commit the whole amend process
2647 #commit the whole amend process
2641 if createmarkers:
2648 if createmarkers:
2642 # mark the new changeset as successor of the rewritten one
2649 # mark the new changeset as successor of the rewritten one
2643 new = repo[newid]
2650 new = repo[newid]
2644 obs = [(old, (new,))]
2651 obs = [(old, (new,))]
2645 if node:
2652 if node:
2646 obs.append((ctx, ()))
2653 obs.append((ctx, ()))
2647
2654
2648 obsolete.createmarkers(repo, obs)
2655 obsolete.createmarkers(repo, obs)
2649 tr.close()
2656 tr.close()
2650 finally:
2657 finally:
2651 tr.release()
2658 tr.release()
2652 dsguard.close()
2659 dsguard.close()
2653 if not createmarkers and newid != old.node():
2660 if not createmarkers and newid != old.node():
2654 # Strip the intermediate commit (if there was one) and the amended
2661 # Strip the intermediate commit (if there was one) and the amended
2655 # commit
2662 # commit
2656 if node:
2663 if node:
2657 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2664 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2658 ui.note(_('stripping amended changeset %s\n') % old)
2665 ui.note(_('stripping amended changeset %s\n') % old)
2659 repair.strip(ui, repo, old.node(), topic='amend-backup')
2666 repair.strip(ui, repo, old.node(), topic='amend-backup')
2660 finally:
2667 finally:
2661 lockmod.release(lock, dsguard, wlock)
2668 lockmod.release(lock, dsguard, wlock)
2662 return newid
2669 return newid
2663
2670
2664 def commiteditor(repo, ctx, subs, editform=''):
2671 def commiteditor(repo, ctx, subs, editform=''):
2665 if ctx.description():
2672 if ctx.description():
2666 return ctx.description()
2673 return ctx.description()
2667 return commitforceeditor(repo, ctx, subs, editform=editform)
2674 return commitforceeditor(repo, ctx, subs, editform=editform)
2668
2675
2669 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2676 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2670 editform=''):
2677 editform=''):
2671 if not extramsg:
2678 if not extramsg:
2672 extramsg = _("Leave message empty to abort commit.")
2679 extramsg = _("Leave message empty to abort commit.")
2673
2680
2674 forms = [e for e in editform.split('.') if e]
2681 forms = [e for e in editform.split('.') if e]
2675 forms.insert(0, 'changeset')
2682 forms.insert(0, 'changeset')
2676 while forms:
2683 while forms:
2677 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2684 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2678 if tmpl:
2685 if tmpl:
2679 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2686 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2680 break
2687 break
2681 forms.pop()
2688 forms.pop()
2682 else:
2689 else:
2683 committext = buildcommittext(repo, ctx, subs, extramsg)
2690 committext = buildcommittext(repo, ctx, subs, extramsg)
2684
2691
2685 # run editor in the repository root
2692 # run editor in the repository root
2686 olddir = os.getcwd()
2693 olddir = os.getcwd()
2687 os.chdir(repo.root)
2694 os.chdir(repo.root)
2688 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2695 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2689 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2696 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2690 os.chdir(olddir)
2697 os.chdir(olddir)
2691
2698
2692 if finishdesc:
2699 if finishdesc:
2693 text = finishdesc(text)
2700 text = finishdesc(text)
2694 if not text.strip():
2701 if not text.strip():
2695 raise util.Abort(_("empty commit message"))
2702 raise util.Abort(_("empty commit message"))
2696
2703
2697 return text
2704 return text
2698
2705
2699 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2706 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2700 ui = repo.ui
2707 ui = repo.ui
2701 tmpl, mapfile = gettemplate(ui, tmpl, None)
2708 tmpl, mapfile = gettemplate(ui, tmpl, None)
2702
2709
2703 try:
2710 try:
2704 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2711 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2705 except SyntaxError as inst:
2712 except SyntaxError as inst:
2706 raise util.Abort(inst.args[0])
2713 raise util.Abort(inst.args[0])
2707
2714
2708 for k, v in repo.ui.configitems('committemplate'):
2715 for k, v in repo.ui.configitems('committemplate'):
2709 if k != 'changeset':
2716 if k != 'changeset':
2710 t.t.cache[k] = v
2717 t.t.cache[k] = v
2711
2718
2712 if not extramsg:
2719 if not extramsg:
2713 extramsg = '' # ensure that extramsg is string
2720 extramsg = '' # ensure that extramsg is string
2714
2721
2715 ui.pushbuffer()
2722 ui.pushbuffer()
2716 t.show(ctx, extramsg=extramsg)
2723 t.show(ctx, extramsg=extramsg)
2717 return ui.popbuffer()
2724 return ui.popbuffer()
2718
2725
2719 def hgprefix(msg):
2726 def hgprefix(msg):
2720 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2727 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2721
2728
2722 def buildcommittext(repo, ctx, subs, extramsg):
2729 def buildcommittext(repo, ctx, subs, extramsg):
2723 edittext = []
2730 edittext = []
2724 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2731 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2725 if ctx.description():
2732 if ctx.description():
2726 edittext.append(ctx.description())
2733 edittext.append(ctx.description())
2727 edittext.append("")
2734 edittext.append("")
2728 edittext.append("") # Empty line between message and comments.
2735 edittext.append("") # Empty line between message and comments.
2729 edittext.append(hgprefix(_("Enter commit message."
2736 edittext.append(hgprefix(_("Enter commit message."
2730 " Lines beginning with 'HG:' are removed.")))
2737 " Lines beginning with 'HG:' are removed.")))
2731 edittext.append(hgprefix(extramsg))
2738 edittext.append(hgprefix(extramsg))
2732 edittext.append("HG: --")
2739 edittext.append("HG: --")
2733 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2740 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2734 if ctx.p2():
2741 if ctx.p2():
2735 edittext.append(hgprefix(_("branch merge")))
2742 edittext.append(hgprefix(_("branch merge")))
2736 if ctx.branch():
2743 if ctx.branch():
2737 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2744 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2738 if bookmarks.isactivewdirparent(repo):
2745 if bookmarks.isactivewdirparent(repo):
2739 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2746 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2740 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2747 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2741 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2748 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2742 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2749 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2743 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2750 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2744 if not added and not modified and not removed:
2751 if not added and not modified and not removed:
2745 edittext.append(hgprefix(_("no files changed")))
2752 edittext.append(hgprefix(_("no files changed")))
2746 edittext.append("")
2753 edittext.append("")
2747
2754
2748 return "\n".join(edittext)
2755 return "\n".join(edittext)
2749
2756
2750 def commitstatus(repo, node, branch, bheads=None, opts=None):
2757 def commitstatus(repo, node, branch, bheads=None, opts=None):
2751 if opts is None:
2758 if opts is None:
2752 opts = {}
2759 opts = {}
2753 ctx = repo[node]
2760 ctx = repo[node]
2754 parents = ctx.parents()
2761 parents = ctx.parents()
2755
2762
2756 if (not opts.get('amend') and bheads and node not in bheads and not
2763 if (not opts.get('amend') and bheads and node not in bheads and not
2757 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2764 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2758 repo.ui.status(_('created new head\n'))
2765 repo.ui.status(_('created new head\n'))
2759 # The message is not printed for initial roots. For the other
2766 # The message is not printed for initial roots. For the other
2760 # changesets, it is printed in the following situations:
2767 # changesets, it is printed in the following situations:
2761 #
2768 #
2762 # Par column: for the 2 parents with ...
2769 # Par column: for the 2 parents with ...
2763 # N: null or no parent
2770 # N: null or no parent
2764 # B: parent is on another named branch
2771 # B: parent is on another named branch
2765 # C: parent is a regular non head changeset
2772 # C: parent is a regular non head changeset
2766 # H: parent was a branch head of the current branch
2773 # H: parent was a branch head of the current branch
2767 # Msg column: whether we print "created new head" message
2774 # Msg column: whether we print "created new head" message
2768 # In the following, it is assumed that there already exists some
2775 # In the following, it is assumed that there already exists some
2769 # initial branch heads of the current branch, otherwise nothing is
2776 # initial branch heads of the current branch, otherwise nothing is
2770 # printed anyway.
2777 # printed anyway.
2771 #
2778 #
2772 # Par Msg Comment
2779 # Par Msg Comment
2773 # N N y additional topo root
2780 # N N y additional topo root
2774 #
2781 #
2775 # B N y additional branch root
2782 # B N y additional branch root
2776 # C N y additional topo head
2783 # C N y additional topo head
2777 # H N n usual case
2784 # H N n usual case
2778 #
2785 #
2779 # B B y weird additional branch root
2786 # B B y weird additional branch root
2780 # C B y branch merge
2787 # C B y branch merge
2781 # H B n merge with named branch
2788 # H B n merge with named branch
2782 #
2789 #
2783 # C C y additional head from merge
2790 # C C y additional head from merge
2784 # C H n merge with a head
2791 # C H n merge with a head
2785 #
2792 #
2786 # H H n head merge: head count decreases
2793 # H H n head merge: head count decreases
2787
2794
2788 if not opts.get('close_branch'):
2795 if not opts.get('close_branch'):
2789 for r in parents:
2796 for r in parents:
2790 if r.closesbranch() and r.branch() == branch:
2797 if r.closesbranch() and r.branch() == branch:
2791 repo.ui.status(_('reopening closed branch head %d\n') % r)
2798 repo.ui.status(_('reopening closed branch head %d\n') % r)
2792
2799
2793 if repo.ui.debugflag:
2800 if repo.ui.debugflag:
2794 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2801 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2795 elif repo.ui.verbose:
2802 elif repo.ui.verbose:
2796 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2803 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2797
2804
2798 def revert(ui, repo, ctx, parents, *pats, **opts):
2805 def revert(ui, repo, ctx, parents, *pats, **opts):
2799 parent, p2 = parents
2806 parent, p2 = parents
2800 node = ctx.node()
2807 node = ctx.node()
2801
2808
2802 mf = ctx.manifest()
2809 mf = ctx.manifest()
2803 if node == p2:
2810 if node == p2:
2804 parent = p2
2811 parent = p2
2805 if node == parent:
2812 if node == parent:
2806 pmf = mf
2813 pmf = mf
2807 else:
2814 else:
2808 pmf = None
2815 pmf = None
2809
2816
2810 # need all matching names in dirstate and manifest of target rev,
2817 # need all matching names in dirstate and manifest of target rev,
2811 # so have to walk both. do not print errors if files exist in one
2818 # so have to walk both. do not print errors if files exist in one
2812 # but not other. in both cases, filesets should be evaluated against
2819 # but not other. in both cases, filesets should be evaluated against
2813 # workingctx to get consistent result (issue4497). this means 'set:**'
2820 # workingctx to get consistent result (issue4497). this means 'set:**'
2814 # cannot be used to select missing files from target rev.
2821 # cannot be used to select missing files from target rev.
2815
2822
2816 # `names` is a mapping for all elements in working copy and target revision
2823 # `names` is a mapping for all elements in working copy and target revision
2817 # The mapping is in the form:
2824 # The mapping is in the form:
2818 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2825 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2819 names = {}
2826 names = {}
2820
2827
2821 wlock = repo.wlock()
2828 wlock = repo.wlock()
2822 try:
2829 try:
2823 ## filling of the `names` mapping
2830 ## filling of the `names` mapping
2824 # walk dirstate to fill `names`
2831 # walk dirstate to fill `names`
2825
2832
2826 interactive = opts.get('interactive', False)
2833 interactive = opts.get('interactive', False)
2827 wctx = repo[None]
2834 wctx = repo[None]
2828 m = scmutil.match(wctx, pats, opts)
2835 m = scmutil.match(wctx, pats, opts)
2829
2836
2830 # we'll need this later
2837 # we'll need this later
2831 targetsubs = sorted(s for s in wctx.substate if m(s))
2838 targetsubs = sorted(s for s in wctx.substate if m(s))
2832
2839
2833 if not m.always():
2840 if not m.always():
2834 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2841 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2835 names[abs] = m.rel(abs), m.exact(abs)
2842 names[abs] = m.rel(abs), m.exact(abs)
2836
2843
2837 # walk target manifest to fill `names`
2844 # walk target manifest to fill `names`
2838
2845
2839 def badfn(path, msg):
2846 def badfn(path, msg):
2840 if path in names:
2847 if path in names:
2841 return
2848 return
2842 if path in ctx.substate:
2849 if path in ctx.substate:
2843 return
2850 return
2844 path_ = path + '/'
2851 path_ = path + '/'
2845 for f in names:
2852 for f in names:
2846 if f.startswith(path_):
2853 if f.startswith(path_):
2847 return
2854 return
2848 ui.warn("%s: %s\n" % (m.rel(path), msg))
2855 ui.warn("%s: %s\n" % (m.rel(path), msg))
2849
2856
2850 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2857 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2851 if abs not in names:
2858 if abs not in names:
2852 names[abs] = m.rel(abs), m.exact(abs)
2859 names[abs] = m.rel(abs), m.exact(abs)
2853
2860
2854 # Find status of all file in `names`.
2861 # Find status of all file in `names`.
2855 m = scmutil.matchfiles(repo, names)
2862 m = scmutil.matchfiles(repo, names)
2856
2863
2857 changes = repo.status(node1=node, match=m,
2864 changes = repo.status(node1=node, match=m,
2858 unknown=True, ignored=True, clean=True)
2865 unknown=True, ignored=True, clean=True)
2859 else:
2866 else:
2860 changes = repo.status(node1=node, match=m)
2867 changes = repo.status(node1=node, match=m)
2861 for kind in changes:
2868 for kind in changes:
2862 for abs in kind:
2869 for abs in kind:
2863 names[abs] = m.rel(abs), m.exact(abs)
2870 names[abs] = m.rel(abs), m.exact(abs)
2864
2871
2865 m = scmutil.matchfiles(repo, names)
2872 m = scmutil.matchfiles(repo, names)
2866
2873
2867 modified = set(changes.modified)
2874 modified = set(changes.modified)
2868 added = set(changes.added)
2875 added = set(changes.added)
2869 removed = set(changes.removed)
2876 removed = set(changes.removed)
2870 _deleted = set(changes.deleted)
2877 _deleted = set(changes.deleted)
2871 unknown = set(changes.unknown)
2878 unknown = set(changes.unknown)
2872 unknown.update(changes.ignored)
2879 unknown.update(changes.ignored)
2873 clean = set(changes.clean)
2880 clean = set(changes.clean)
2874 modadded = set()
2881 modadded = set()
2875
2882
2876 # split between files known in target manifest and the others
2883 # split between files known in target manifest and the others
2877 smf = set(mf)
2884 smf = set(mf)
2878
2885
2879 # determine the exact nature of the deleted changesets
2886 # determine the exact nature of the deleted changesets
2880 deladded = _deleted - smf
2887 deladded = _deleted - smf
2881 deleted = _deleted - deladded
2888 deleted = _deleted - deladded
2882
2889
2883 # We need to account for the state of the file in the dirstate,
2890 # We need to account for the state of the file in the dirstate,
2884 # even when we revert against something else than parent. This will
2891 # even when we revert against something else than parent. This will
2885 # slightly alter the behavior of revert (doing back up or not, delete
2892 # slightly alter the behavior of revert (doing back up or not, delete
2886 # or just forget etc).
2893 # or just forget etc).
2887 if parent == node:
2894 if parent == node:
2888 dsmodified = modified
2895 dsmodified = modified
2889 dsadded = added
2896 dsadded = added
2890 dsremoved = removed
2897 dsremoved = removed
2891 # store all local modifications, useful later for rename detection
2898 # store all local modifications, useful later for rename detection
2892 localchanges = dsmodified | dsadded
2899 localchanges = dsmodified | dsadded
2893 modified, added, removed = set(), set(), set()
2900 modified, added, removed = set(), set(), set()
2894 else:
2901 else:
2895 changes = repo.status(node1=parent, match=m)
2902 changes = repo.status(node1=parent, match=m)
2896 dsmodified = set(changes.modified)
2903 dsmodified = set(changes.modified)
2897 dsadded = set(changes.added)
2904 dsadded = set(changes.added)
2898 dsremoved = set(changes.removed)
2905 dsremoved = set(changes.removed)
2899 # store all local modifications, useful later for rename detection
2906 # store all local modifications, useful later for rename detection
2900 localchanges = dsmodified | dsadded
2907 localchanges = dsmodified | dsadded
2901
2908
2902 # only take into account for removes between wc and target
2909 # only take into account for removes between wc and target
2903 clean |= dsremoved - removed
2910 clean |= dsremoved - removed
2904 dsremoved &= removed
2911 dsremoved &= removed
2905 # distinct between dirstate remove and other
2912 # distinct between dirstate remove and other
2906 removed -= dsremoved
2913 removed -= dsremoved
2907
2914
2908 modadded = added & dsmodified
2915 modadded = added & dsmodified
2909 added -= modadded
2916 added -= modadded
2910
2917
2911 # tell newly modified apart.
2918 # tell newly modified apart.
2912 dsmodified &= modified
2919 dsmodified &= modified
2913 dsmodified |= modified & dsadded # dirstate added may needs backup
2920 dsmodified |= modified & dsadded # dirstate added may needs backup
2914 modified -= dsmodified
2921 modified -= dsmodified
2915
2922
2916 # We need to wait for some post-processing to update this set
2923 # We need to wait for some post-processing to update this set
2917 # before making the distinction. The dirstate will be used for
2924 # before making the distinction. The dirstate will be used for
2918 # that purpose.
2925 # that purpose.
2919 dsadded = added
2926 dsadded = added
2920
2927
2921 # in case of merge, files that are actually added can be reported as
2928 # in case of merge, files that are actually added can be reported as
2922 # modified, we need to post process the result
2929 # modified, we need to post process the result
2923 if p2 != nullid:
2930 if p2 != nullid:
2924 if pmf is None:
2931 if pmf is None:
2925 # only need parent manifest in the merge case,
2932 # only need parent manifest in the merge case,
2926 # so do not read by default
2933 # so do not read by default
2927 pmf = repo[parent].manifest()
2934 pmf = repo[parent].manifest()
2928 mergeadd = dsmodified - set(pmf)
2935 mergeadd = dsmodified - set(pmf)
2929 dsadded |= mergeadd
2936 dsadded |= mergeadd
2930 dsmodified -= mergeadd
2937 dsmodified -= mergeadd
2931
2938
2932 # if f is a rename, update `names` to also revert the source
2939 # if f is a rename, update `names` to also revert the source
2933 cwd = repo.getcwd()
2940 cwd = repo.getcwd()
2934 for f in localchanges:
2941 for f in localchanges:
2935 src = repo.dirstate.copied(f)
2942 src = repo.dirstate.copied(f)
2936 # XXX should we check for rename down to target node?
2943 # XXX should we check for rename down to target node?
2937 if src and src not in names and repo.dirstate[src] == 'r':
2944 if src and src not in names and repo.dirstate[src] == 'r':
2938 dsremoved.add(src)
2945 dsremoved.add(src)
2939 names[src] = (repo.pathto(src, cwd), True)
2946 names[src] = (repo.pathto(src, cwd), True)
2940
2947
2941 # distinguish between file to forget and the other
2948 # distinguish between file to forget and the other
2942 added = set()
2949 added = set()
2943 for abs in dsadded:
2950 for abs in dsadded:
2944 if repo.dirstate[abs] != 'a':
2951 if repo.dirstate[abs] != 'a':
2945 added.add(abs)
2952 added.add(abs)
2946 dsadded -= added
2953 dsadded -= added
2947
2954
2948 for abs in deladded:
2955 for abs in deladded:
2949 if repo.dirstate[abs] == 'a':
2956 if repo.dirstate[abs] == 'a':
2950 dsadded.add(abs)
2957 dsadded.add(abs)
2951 deladded -= dsadded
2958 deladded -= dsadded
2952
2959
2953 # For files marked as removed, we check if an unknown file is present at
2960 # For files marked as removed, we check if an unknown file is present at
2954 # the same path. If a such file exists it may need to be backed up.
2961 # the same path. If a such file exists it may need to be backed up.
2955 # Making the distinction at this stage helps have simpler backup
2962 # Making the distinction at this stage helps have simpler backup
2956 # logic.
2963 # logic.
2957 removunk = set()
2964 removunk = set()
2958 for abs in removed:
2965 for abs in removed:
2959 target = repo.wjoin(abs)
2966 target = repo.wjoin(abs)
2960 if os.path.lexists(target):
2967 if os.path.lexists(target):
2961 removunk.add(abs)
2968 removunk.add(abs)
2962 removed -= removunk
2969 removed -= removunk
2963
2970
2964 dsremovunk = set()
2971 dsremovunk = set()
2965 for abs in dsremoved:
2972 for abs in dsremoved:
2966 target = repo.wjoin(abs)
2973 target = repo.wjoin(abs)
2967 if os.path.lexists(target):
2974 if os.path.lexists(target):
2968 dsremovunk.add(abs)
2975 dsremovunk.add(abs)
2969 dsremoved -= dsremovunk
2976 dsremoved -= dsremovunk
2970
2977
2971 # action to be actually performed by revert
2978 # action to be actually performed by revert
2972 # (<list of file>, message>) tuple
2979 # (<list of file>, message>) tuple
2973 actions = {'revert': ([], _('reverting %s\n')),
2980 actions = {'revert': ([], _('reverting %s\n')),
2974 'add': ([], _('adding %s\n')),
2981 'add': ([], _('adding %s\n')),
2975 'remove': ([], _('removing %s\n')),
2982 'remove': ([], _('removing %s\n')),
2976 'drop': ([], _('removing %s\n')),
2983 'drop': ([], _('removing %s\n')),
2977 'forget': ([], _('forgetting %s\n')),
2984 'forget': ([], _('forgetting %s\n')),
2978 'undelete': ([], _('undeleting %s\n')),
2985 'undelete': ([], _('undeleting %s\n')),
2979 'noop': (None, _('no changes needed to %s\n')),
2986 'noop': (None, _('no changes needed to %s\n')),
2980 'unknown': (None, _('file not managed: %s\n')),
2987 'unknown': (None, _('file not managed: %s\n')),
2981 }
2988 }
2982
2989
2983 # "constant" that convey the backup strategy.
2990 # "constant" that convey the backup strategy.
2984 # All set to `discard` if `no-backup` is set do avoid checking
2991 # All set to `discard` if `no-backup` is set do avoid checking
2985 # no_backup lower in the code.
2992 # no_backup lower in the code.
2986 # These values are ordered for comparison purposes
2993 # These values are ordered for comparison purposes
2987 backup = 2 # unconditionally do backup
2994 backup = 2 # unconditionally do backup
2988 check = 1 # check if the existing file differs from target
2995 check = 1 # check if the existing file differs from target
2989 discard = 0 # never do backup
2996 discard = 0 # never do backup
2990 if opts.get('no_backup'):
2997 if opts.get('no_backup'):
2991 backup = check = discard
2998 backup = check = discard
2992
2999
2993 backupanddel = actions['remove']
3000 backupanddel = actions['remove']
2994 if not opts.get('no_backup'):
3001 if not opts.get('no_backup'):
2995 backupanddel = actions['drop']
3002 backupanddel = actions['drop']
2996
3003
2997 disptable = (
3004 disptable = (
2998 # dispatch table:
3005 # dispatch table:
2999 # file state
3006 # file state
3000 # action
3007 # action
3001 # make backup
3008 # make backup
3002
3009
3003 ## Sets that results that will change file on disk
3010 ## Sets that results that will change file on disk
3004 # Modified compared to target, no local change
3011 # Modified compared to target, no local change
3005 (modified, actions['revert'], discard),
3012 (modified, actions['revert'], discard),
3006 # Modified compared to target, but local file is deleted
3013 # Modified compared to target, but local file is deleted
3007 (deleted, actions['revert'], discard),
3014 (deleted, actions['revert'], discard),
3008 # Modified compared to target, local change
3015 # Modified compared to target, local change
3009 (dsmodified, actions['revert'], backup),
3016 (dsmodified, actions['revert'], backup),
3010 # Added since target
3017 # Added since target
3011 (added, actions['remove'], discard),
3018 (added, actions['remove'], discard),
3012 # Added in working directory
3019 # Added in working directory
3013 (dsadded, actions['forget'], discard),
3020 (dsadded, actions['forget'], discard),
3014 # Added since target, have local modification
3021 # Added since target, have local modification
3015 (modadded, backupanddel, backup),
3022 (modadded, backupanddel, backup),
3016 # Added since target but file is missing in working directory
3023 # Added since target but file is missing in working directory
3017 (deladded, actions['drop'], discard),
3024 (deladded, actions['drop'], discard),
3018 # Removed since target, before working copy parent
3025 # Removed since target, before working copy parent
3019 (removed, actions['add'], discard),
3026 (removed, actions['add'], discard),
3020 # Same as `removed` but an unknown file exists at the same path
3027 # Same as `removed` but an unknown file exists at the same path
3021 (removunk, actions['add'], check),
3028 (removunk, actions['add'], check),
3022 # Removed since targe, marked as such in working copy parent
3029 # Removed since targe, marked as such in working copy parent
3023 (dsremoved, actions['undelete'], discard),
3030 (dsremoved, actions['undelete'], discard),
3024 # Same as `dsremoved` but an unknown file exists at the same path
3031 # Same as `dsremoved` but an unknown file exists at the same path
3025 (dsremovunk, actions['undelete'], check),
3032 (dsremovunk, actions['undelete'], check),
3026 ## the following sets does not result in any file changes
3033 ## the following sets does not result in any file changes
3027 # File with no modification
3034 # File with no modification
3028 (clean, actions['noop'], discard),
3035 (clean, actions['noop'], discard),
3029 # Existing file, not tracked anywhere
3036 # Existing file, not tracked anywhere
3030 (unknown, actions['unknown'], discard),
3037 (unknown, actions['unknown'], discard),
3031 )
3038 )
3032
3039
3033 for abs, (rel, exact) in sorted(names.items()):
3040 for abs, (rel, exact) in sorted(names.items()):
3034 # target file to be touch on disk (relative to cwd)
3041 # target file to be touch on disk (relative to cwd)
3035 target = repo.wjoin(abs)
3042 target = repo.wjoin(abs)
3036 # search the entry in the dispatch table.
3043 # search the entry in the dispatch table.
3037 # if the file is in any of these sets, it was touched in the working
3044 # if the file is in any of these sets, it was touched in the working
3038 # directory parent and we are sure it needs to be reverted.
3045 # directory parent and we are sure it needs to be reverted.
3039 for table, (xlist, msg), dobackup in disptable:
3046 for table, (xlist, msg), dobackup in disptable:
3040 if abs not in table:
3047 if abs not in table:
3041 continue
3048 continue
3042 if xlist is not None:
3049 if xlist is not None:
3043 xlist.append(abs)
3050 xlist.append(abs)
3044 if dobackup and (backup <= dobackup
3051 if dobackup and (backup <= dobackup
3045 or wctx[abs].cmp(ctx[abs])):
3052 or wctx[abs].cmp(ctx[abs])):
3046 bakname = "%s.orig" % rel
3053 bakname = "%s.orig" % rel
3047 ui.note(_('saving current version of %s as %s\n') %
3054 ui.note(_('saving current version of %s as %s\n') %
3048 (rel, bakname))
3055 (rel, bakname))
3049 if not opts.get('dry_run'):
3056 if not opts.get('dry_run'):
3050 if interactive:
3057 if interactive:
3051 util.copyfile(target, bakname)
3058 util.copyfile(target, bakname)
3052 else:
3059 else:
3053 util.rename(target, bakname)
3060 util.rename(target, bakname)
3054 if ui.verbose or not exact:
3061 if ui.verbose or not exact:
3055 if not isinstance(msg, basestring):
3062 if not isinstance(msg, basestring):
3056 msg = msg(abs)
3063 msg = msg(abs)
3057 ui.status(msg % rel)
3064 ui.status(msg % rel)
3058 elif exact:
3065 elif exact:
3059 ui.warn(msg % rel)
3066 ui.warn(msg % rel)
3060 break
3067 break
3061
3068
3062 if not opts.get('dry_run'):
3069 if not opts.get('dry_run'):
3063 needdata = ('revert', 'add', 'undelete')
3070 needdata = ('revert', 'add', 'undelete')
3064 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3071 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3065 _performrevert(repo, parents, ctx, actions, interactive)
3072 _performrevert(repo, parents, ctx, actions, interactive)
3066
3073
3067 if targetsubs:
3074 if targetsubs:
3068 # Revert the subrepos on the revert list
3075 # Revert the subrepos on the revert list
3069 for sub in targetsubs:
3076 for sub in targetsubs:
3070 try:
3077 try:
3071 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3078 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3072 except KeyError:
3079 except KeyError:
3073 raise util.Abort("subrepository '%s' does not exist in %s!"
3080 raise util.Abort("subrepository '%s' does not exist in %s!"
3074 % (sub, short(ctx.node())))
3081 % (sub, short(ctx.node())))
3075 finally:
3082 finally:
3076 wlock.release()
3083 wlock.release()
3077
3084
3078 def _revertprefetch(repo, ctx, *files):
3085 def _revertprefetch(repo, ctx, *files):
3079 """Let extension changing the storage layer prefetch content"""
3086 """Let extension changing the storage layer prefetch content"""
3080 pass
3087 pass
3081
3088
3082 def _performrevert(repo, parents, ctx, actions, interactive=False):
3089 def _performrevert(repo, parents, ctx, actions, interactive=False):
3083 """function that actually perform all the actions computed for revert
3090 """function that actually perform all the actions computed for revert
3084
3091
3085 This is an independent function to let extension to plug in and react to
3092 This is an independent function to let extension to plug in and react to
3086 the imminent revert.
3093 the imminent revert.
3087
3094
3088 Make sure you have the working directory locked when calling this function.
3095 Make sure you have the working directory locked when calling this function.
3089 """
3096 """
3090 parent, p2 = parents
3097 parent, p2 = parents
3091 node = ctx.node()
3098 node = ctx.node()
3092 def checkout(f):
3099 def checkout(f):
3093 fc = ctx[f]
3100 fc = ctx[f]
3094 repo.wwrite(f, fc.data(), fc.flags())
3101 repo.wwrite(f, fc.data(), fc.flags())
3095
3102
3096 audit_path = pathutil.pathauditor(repo.root)
3103 audit_path = pathutil.pathauditor(repo.root)
3097 for f in actions['forget'][0]:
3104 for f in actions['forget'][0]:
3098 repo.dirstate.drop(f)
3105 repo.dirstate.drop(f)
3099 for f in actions['remove'][0]:
3106 for f in actions['remove'][0]:
3100 audit_path(f)
3107 audit_path(f)
3101 try:
3108 try:
3102 util.unlinkpath(repo.wjoin(f))
3109 util.unlinkpath(repo.wjoin(f))
3103 except OSError:
3110 except OSError:
3104 pass
3111 pass
3105 repo.dirstate.remove(f)
3112 repo.dirstate.remove(f)
3106 for f in actions['drop'][0]:
3113 for f in actions['drop'][0]:
3107 audit_path(f)
3114 audit_path(f)
3108 repo.dirstate.remove(f)
3115 repo.dirstate.remove(f)
3109
3116
3110 normal = None
3117 normal = None
3111 if node == parent:
3118 if node == parent:
3112 # We're reverting to our parent. If possible, we'd like status
3119 # We're reverting to our parent. If possible, we'd like status
3113 # to report the file as clean. We have to use normallookup for
3120 # to report the file as clean. We have to use normallookup for
3114 # merges to avoid losing information about merged/dirty files.
3121 # merges to avoid losing information about merged/dirty files.
3115 if p2 != nullid:
3122 if p2 != nullid:
3116 normal = repo.dirstate.normallookup
3123 normal = repo.dirstate.normallookup
3117 else:
3124 else:
3118 normal = repo.dirstate.normal
3125 normal = repo.dirstate.normal
3119
3126
3120 newlyaddedandmodifiedfiles = set()
3127 newlyaddedandmodifiedfiles = set()
3121 if interactive:
3128 if interactive:
3122 # Prompt the user for changes to revert
3129 # Prompt the user for changes to revert
3123 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3130 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3124 m = scmutil.match(ctx, torevert, {})
3131 m = scmutil.match(ctx, torevert, {})
3125 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3132 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3126 diffopts.nodates = True
3133 diffopts.nodates = True
3127 diffopts.git = True
3134 diffopts.git = True
3128 reversehunks = repo.ui.configbool('experimental',
3135 reversehunks = repo.ui.configbool('experimental',
3129 'revertalternateinteractivemode',
3136 'revertalternateinteractivemode',
3130 True)
3137 True)
3131 if reversehunks:
3138 if reversehunks:
3132 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3139 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3133 else:
3140 else:
3134 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3141 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3135 originalchunks = patch.parsepatch(diff)
3142 originalchunks = patch.parsepatch(diff)
3136
3143
3137 try:
3144 try:
3138
3145
3139 chunks = recordfilter(repo.ui, originalchunks)
3146 chunks = recordfilter(repo.ui, originalchunks)
3140 if reversehunks:
3147 if reversehunks:
3141 chunks = patch.reversehunks(chunks)
3148 chunks = patch.reversehunks(chunks)
3142
3149
3143 except patch.PatchError as err:
3150 except patch.PatchError as err:
3144 raise util.Abort(_('error parsing patch: %s') % err)
3151 raise util.Abort(_('error parsing patch: %s') % err)
3145
3152
3146 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3153 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3147 # Apply changes
3154 # Apply changes
3148 fp = cStringIO.StringIO()
3155 fp = cStringIO.StringIO()
3149 for c in chunks:
3156 for c in chunks:
3150 c.write(fp)
3157 c.write(fp)
3151 dopatch = fp.tell()
3158 dopatch = fp.tell()
3152 fp.seek(0)
3159 fp.seek(0)
3153 if dopatch:
3160 if dopatch:
3154 try:
3161 try:
3155 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3162 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3156 except patch.PatchError as err:
3163 except patch.PatchError as err:
3157 raise util.Abort(str(err))
3164 raise util.Abort(str(err))
3158 del fp
3165 del fp
3159 else:
3166 else:
3160 for f in actions['revert'][0]:
3167 for f in actions['revert'][0]:
3161 checkout(f)
3168 checkout(f)
3162 if normal:
3169 if normal:
3163 normal(f)
3170 normal(f)
3164
3171
3165 for f in actions['add'][0]:
3172 for f in actions['add'][0]:
3166 # Don't checkout modified files, they are already created by the diff
3173 # Don't checkout modified files, they are already created by the diff
3167 if f not in newlyaddedandmodifiedfiles:
3174 if f not in newlyaddedandmodifiedfiles:
3168 checkout(f)
3175 checkout(f)
3169 repo.dirstate.add(f)
3176 repo.dirstate.add(f)
3170
3177
3171 normal = repo.dirstate.normallookup
3178 normal = repo.dirstate.normallookup
3172 if node == parent and p2 == nullid:
3179 if node == parent and p2 == nullid:
3173 normal = repo.dirstate.normal
3180 normal = repo.dirstate.normal
3174 for f in actions['undelete'][0]:
3181 for f in actions['undelete'][0]:
3175 checkout(f)
3182 checkout(f)
3176 normal(f)
3183 normal(f)
3177
3184
3178 copied = copies.pathcopies(repo[parent], ctx)
3185 copied = copies.pathcopies(repo[parent], ctx)
3179
3186
3180 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3187 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3181 if f in copied:
3188 if f in copied:
3182 repo.dirstate.copy(copied[f], f)
3189 repo.dirstate.copy(copied[f], f)
3183
3190
3184 def command(table):
3191 def command(table):
3185 """Returns a function object to be used as a decorator for making commands.
3192 """Returns a function object to be used as a decorator for making commands.
3186
3193
3187 This function receives a command table as its argument. The table should
3194 This function receives a command table as its argument. The table should
3188 be a dict.
3195 be a dict.
3189
3196
3190 The returned function can be used as a decorator for adding commands
3197 The returned function can be used as a decorator for adding commands
3191 to that command table. This function accepts multiple arguments to define
3198 to that command table. This function accepts multiple arguments to define
3192 a command.
3199 a command.
3193
3200
3194 The first argument is the command name.
3201 The first argument is the command name.
3195
3202
3196 The options argument is an iterable of tuples defining command arguments.
3203 The options argument is an iterable of tuples defining command arguments.
3197 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3204 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3198
3205
3199 The synopsis argument defines a short, one line summary of how to use the
3206 The synopsis argument defines a short, one line summary of how to use the
3200 command. This shows up in the help output.
3207 command. This shows up in the help output.
3201
3208
3202 The norepo argument defines whether the command does not require a
3209 The norepo argument defines whether the command does not require a
3203 local repository. Most commands operate against a repository, thus the
3210 local repository. Most commands operate against a repository, thus the
3204 default is False.
3211 default is False.
3205
3212
3206 The optionalrepo argument defines whether the command optionally requires
3213 The optionalrepo argument defines whether the command optionally requires
3207 a local repository.
3214 a local repository.
3208
3215
3209 The inferrepo argument defines whether to try to find a repository from the
3216 The inferrepo argument defines whether to try to find a repository from the
3210 command line arguments. If True, arguments will be examined for potential
3217 command line arguments. If True, arguments will be examined for potential
3211 repository locations. See ``findrepo()``. If a repository is found, it
3218 repository locations. See ``findrepo()``. If a repository is found, it
3212 will be used.
3219 will be used.
3213 """
3220 """
3214 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3221 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3215 inferrepo=False):
3222 inferrepo=False):
3216 def decorator(func):
3223 def decorator(func):
3217 if synopsis:
3224 if synopsis:
3218 table[name] = func, list(options), synopsis
3225 table[name] = func, list(options), synopsis
3219 else:
3226 else:
3220 table[name] = func, list(options)
3227 table[name] = func, list(options)
3221
3228
3222 if norepo:
3229 if norepo:
3223 # Avoid import cycle.
3230 # Avoid import cycle.
3224 import commands
3231 import commands
3225 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3232 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3226
3233
3227 if optionalrepo:
3234 if optionalrepo:
3228 import commands
3235 import commands
3229 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3236 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3230
3237
3231 if inferrepo:
3238 if inferrepo:
3232 import commands
3239 import commands
3233 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3240 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3234
3241
3235 return func
3242 return func
3236 return decorator
3243 return decorator
3237
3244
3238 return cmd
3245 return cmd
3239
3246
3240 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3247 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3241 # commands.outgoing. "missing" is "missing" of the result of
3248 # commands.outgoing. "missing" is "missing" of the result of
3242 # "findcommonoutgoing()"
3249 # "findcommonoutgoing()"
3243 outgoinghooks = util.hooks()
3250 outgoinghooks = util.hooks()
3244
3251
3245 # a list of (ui, repo) functions called by commands.summary
3252 # a list of (ui, repo) functions called by commands.summary
3246 summaryhooks = util.hooks()
3253 summaryhooks = util.hooks()
3247
3254
3248 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3255 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3249 #
3256 #
3250 # functions should return tuple of booleans below, if 'changes' is None:
3257 # functions should return tuple of booleans below, if 'changes' is None:
3251 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3258 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3252 #
3259 #
3253 # otherwise, 'changes' is a tuple of tuples below:
3260 # otherwise, 'changes' is a tuple of tuples below:
3254 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3261 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3255 # - (desturl, destbranch, destpeer, outgoing)
3262 # - (desturl, destbranch, destpeer, outgoing)
3256 summaryremotehooks = util.hooks()
3263 summaryremotehooks = util.hooks()
3257
3264
3258 # A list of state files kept by multistep operations like graft.
3265 # A list of state files kept by multistep operations like graft.
3259 # Since graft cannot be aborted, it is considered 'clearable' by update.
3266 # Since graft cannot be aborted, it is considered 'clearable' by update.
3260 # note: bisect is intentionally excluded
3267 # note: bisect is intentionally excluded
3261 # (state file, clearable, allowcommit, error, hint)
3268 # (state file, clearable, allowcommit, error, hint)
3262 unfinishedstates = [
3269 unfinishedstates = [
3263 ('graftstate', True, False, _('graft in progress'),
3270 ('graftstate', True, False, _('graft in progress'),
3264 _("use 'hg graft --continue' or 'hg update' to abort")),
3271 _("use 'hg graft --continue' or 'hg update' to abort")),
3265 ('updatestate', True, False, _('last update was interrupted'),
3272 ('updatestate', True, False, _('last update was interrupted'),
3266 _("use 'hg update' to get a consistent checkout"))
3273 _("use 'hg update' to get a consistent checkout"))
3267 ]
3274 ]
3268
3275
3269 def checkunfinished(repo, commit=False):
3276 def checkunfinished(repo, commit=False):
3270 '''Look for an unfinished multistep operation, like graft, and abort
3277 '''Look for an unfinished multistep operation, like graft, and abort
3271 if found. It's probably good to check this right before
3278 if found. It's probably good to check this right before
3272 bailifchanged().
3279 bailifchanged().
3273 '''
3280 '''
3274 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3281 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3275 if commit and allowcommit:
3282 if commit and allowcommit:
3276 continue
3283 continue
3277 if repo.vfs.exists(f):
3284 if repo.vfs.exists(f):
3278 raise util.Abort(msg, hint=hint)
3285 raise util.Abort(msg, hint=hint)
3279
3286
3280 def clearunfinished(repo):
3287 def clearunfinished(repo):
3281 '''Check for unfinished operations (as above), and clear the ones
3288 '''Check for unfinished operations (as above), and clear the ones
3282 that are clearable.
3289 that are clearable.
3283 '''
3290 '''
3284 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3291 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3285 if not clearable and repo.vfs.exists(f):
3292 if not clearable and repo.vfs.exists(f):
3286 raise util.Abort(msg, hint=hint)
3293 raise util.Abort(msg, hint=hint)
3287 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3294 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3288 if clearable and repo.vfs.exists(f):
3295 if clearable and repo.vfs.exists(f):
3289 util.unlink(repo.join(f))
3296 util.unlink(repo.join(f))
3290
3297
3291 class dirstateguard(object):
3298 class dirstateguard(object):
3292 '''Restore dirstate at unexpected failure.
3299 '''Restore dirstate at unexpected failure.
3293
3300
3294 At the construction, this class does:
3301 At the construction, this class does:
3295
3302
3296 - write current ``repo.dirstate`` out, and
3303 - write current ``repo.dirstate`` out, and
3297 - save ``.hg/dirstate`` into the backup file
3304 - save ``.hg/dirstate`` into the backup file
3298
3305
3299 This restores ``.hg/dirstate`` from backup file, if ``release()``
3306 This restores ``.hg/dirstate`` from backup file, if ``release()``
3300 is invoked before ``close()``.
3307 is invoked before ``close()``.
3301
3308
3302 This just removes the backup file at ``close()`` before ``release()``.
3309 This just removes the backup file at ``close()`` before ``release()``.
3303 '''
3310 '''
3304
3311
3305 def __init__(self, repo, name):
3312 def __init__(self, repo, name):
3306 repo.dirstate.write()
3313 repo.dirstate.write()
3307 self._repo = repo
3314 self._repo = repo
3308 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3315 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3309 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3316 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3310 self._active = True
3317 self._active = True
3311 self._closed = False
3318 self._closed = False
3312
3319
3313 def __del__(self):
3320 def __del__(self):
3314 if self._active: # still active
3321 if self._active: # still active
3315 # this may occur, even if this class is used correctly:
3322 # this may occur, even if this class is used correctly:
3316 # for example, releasing other resources like transaction
3323 # for example, releasing other resources like transaction
3317 # may raise exception before ``dirstateguard.release`` in
3324 # may raise exception before ``dirstateguard.release`` in
3318 # ``release(tr, ....)``.
3325 # ``release(tr, ....)``.
3319 self._abort()
3326 self._abort()
3320
3327
3321 def close(self):
3328 def close(self):
3322 if not self._active: # already inactivated
3329 if not self._active: # already inactivated
3323 msg = (_("can't close already inactivated backup: %s")
3330 msg = (_("can't close already inactivated backup: %s")
3324 % self._filename)
3331 % self._filename)
3325 raise util.Abort(msg)
3332 raise util.Abort(msg)
3326
3333
3327 self._repo.vfs.unlink(self._filename)
3334 self._repo.vfs.unlink(self._filename)
3328 self._active = False
3335 self._active = False
3329 self._closed = True
3336 self._closed = True
3330
3337
3331 def _abort(self):
3338 def _abort(self):
3332 # this "invalidate()" prevents "wlock.release()" from writing
3339 # this "invalidate()" prevents "wlock.release()" from writing
3333 # changes of dirstate out after restoring to original status
3340 # changes of dirstate out after restoring to original status
3334 self._repo.dirstate.invalidate()
3341 self._repo.dirstate.invalidate()
3335
3342
3336 self._repo.vfs.rename(self._filename, 'dirstate')
3343 self._repo.vfs.rename(self._filename, 'dirstate')
3337 self._active = False
3344 self._active = False
3338
3345
3339 def release(self):
3346 def release(self):
3340 if not self._closed:
3347 if not self._closed:
3341 if not self._active: # already inactivated
3348 if not self._active: # already inactivated
3342 msg = (_("can't release already inactivated backup: %s")
3349 msg = (_("can't release already inactivated backup: %s")
3343 % self._filename)
3350 % self._filename)
3344 raise util.Abort(msg)
3351 raise util.Abort(msg)
3345 self._abort()
3352 self._abort()
3346
3353
3347 _bundlecompspecs = {'none': None,
3354 _bundlecompspecs = {'none': None,
3348 'bzip2': 'BZ',
3355 'bzip2': 'BZ',
3349 'gzip': 'GZ',
3356 'gzip': 'GZ',
3350 }
3357 }
3351
3358
3352 _bundleversionspecs = {'v1': '01',
3359 _bundleversionspecs = {'v1': '01',
3353 'v2': '02',
3360 'v2': '02',
3354 'bundle2': '02', #legacy
3361 'bundle2': '02', #legacy
3355 }
3362 }
3356
3363
3357 def parsebundletype(repo, spec):
3364 def parsebundletype(repo, spec):
3358 """return the internal bundle type to use from a user input
3365 """return the internal bundle type to use from a user input
3359
3366
3360 This is parsing user specified bundle type as accepted in:
3367 This is parsing user specified bundle type as accepted in:
3361
3368
3362 'hg bundle --type TYPE'.
3369 'hg bundle --type TYPE'.
3363
3370
3364 It accept format in the form [compression][-version]|[version]
3371 It accept format in the form [compression][-version]|[version]
3365
3372
3366 Consensus about extensions of the format for various bundle2 feature
3373 Consensus about extensions of the format for various bundle2 feature
3367 is to prefix any feature with "+". eg "+treemanifest" or "gzip+phases"
3374 is to prefix any feature with "+". eg "+treemanifest" or "gzip+phases"
3368 """
3375 """
3369 comp, version = None, None
3376 comp, version = None, None
3370
3377
3371 if '-' in spec:
3378 if '-' in spec:
3372 comp, version = spec.split('-', 1)
3379 comp, version = spec.split('-', 1)
3373 elif spec in _bundlecompspecs:
3380 elif spec in _bundlecompspecs:
3374 comp = spec
3381 comp = spec
3375 elif spec in _bundleversionspecs:
3382 elif spec in _bundleversionspecs:
3376 version = spec
3383 version = spec
3377 else:
3384 else:
3378 raise util.Abort(_('unknown bundle type specified with --type'))
3385 raise util.Abort(_('unknown bundle type specified with --type'))
3379
3386
3380 if comp is None:
3387 if comp is None:
3381 comp = 'BZ'
3388 comp = 'BZ'
3382 else:
3389 else:
3383 try:
3390 try:
3384 comp = _bundlecompspecs[comp]
3391 comp = _bundlecompspecs[comp]
3385 except KeyError:
3392 except KeyError:
3386 raise util.Abort(_('unknown bundle type specified with --type'))
3393 raise util.Abort(_('unknown bundle type specified with --type'))
3387
3394
3388 if version is None:
3395 if version is None:
3389 version = '01'
3396 version = '01'
3390 if 'generaldelta' in repo.requirements:
3397 if 'generaldelta' in repo.requirements:
3391 version = '02'
3398 version = '02'
3392 else:
3399 else:
3393 try:
3400 try:
3394 version = _bundleversionspecs[version]
3401 version = _bundleversionspecs[version]
3395 except KeyError:
3402 except KeyError:
3396 raise util.Abort(_('unknown bundle type specified with --type'))
3403 raise util.Abort(_('unknown bundle type specified with --type'))
3397
3404
3398 return version, comp
3405 return version, comp
@@ -1,2549 +1,2570 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 def extract(ui, fileobj):
154 def extract(ui, fileobj):
155 '''extract patch from data read from fileobj.
155 '''extract patch from data read from fileobj.
156
156
157 patch can be a normal patch or contained in an email message.
157 patch can be a normal patch or contained in an email message.
158
158
159 return tuple (filename, message, user, date, branch, node, p1, p2).
159 return a dictionnary. Standard keys are:
160 Any item in the returned tuple can be None. If filename is None,
160 - filename,
161 - message,
162 - user,
163 - date,
164 - branch,
165 - node,
166 - p1,
167 - p2.
168 Any item can be missing from the dictionary. If filename is mising,
161 fileobj did not contain a patch. Caller must unlink filename when done.'''
169 fileobj did not contain a patch. Caller must unlink filename when done.'''
162
170
163 # attempt to detect the start of a patch
171 # attempt to detect the start of a patch
164 # (this heuristic is borrowed from quilt)
172 # (this heuristic is borrowed from quilt)
165 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
173 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
166 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
174 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
167 r'---[ \t].*?^\+\+\+[ \t]|'
175 r'---[ \t].*?^\+\+\+[ \t]|'
168 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
176 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
169
177
178 data = {}
170 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
179 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
171 tmpfp = os.fdopen(fd, 'w')
180 tmpfp = os.fdopen(fd, 'w')
172 try:
181 try:
173 msg = email.Parser.Parser().parse(fileobj)
182 msg = email.Parser.Parser().parse(fileobj)
174
183
175 subject = msg['Subject']
184 subject = msg['Subject']
176 user = msg['From']
185 user = msg['From']
177 if not subject and not user:
186 if not subject and not user:
178 # Not an email, restore parsed headers if any
187 # Not an email, restore parsed headers if any
179 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
188 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
180
189
181 # should try to parse msg['Date']
190 # should try to parse msg['Date']
182 date = None
191 date = None
183 nodeid = None
192 nodeid = None
184 branch = None
193 branch = None
185 parents = []
194 parents = []
186
195
187 if subject:
196 if subject:
188 if subject.startswith('[PATCH'):
197 if subject.startswith('[PATCH'):
189 pend = subject.find(']')
198 pend = subject.find(']')
190 if pend >= 0:
199 if pend >= 0:
191 subject = subject[pend + 1:].lstrip()
200 subject = subject[pend + 1:].lstrip()
192 subject = re.sub(r'\n[ \t]+', ' ', subject)
201 subject = re.sub(r'\n[ \t]+', ' ', subject)
193 ui.debug('Subject: %s\n' % subject)
202 ui.debug('Subject: %s\n' % subject)
194 if user:
203 if user:
195 ui.debug('From: %s\n' % user)
204 ui.debug('From: %s\n' % user)
196 diffs_seen = 0
205 diffs_seen = 0
197 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
206 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
198 message = ''
207 message = ''
199 for part in msg.walk():
208 for part in msg.walk():
200 content_type = part.get_content_type()
209 content_type = part.get_content_type()
201 ui.debug('Content-Type: %s\n' % content_type)
210 ui.debug('Content-Type: %s\n' % content_type)
202 if content_type not in ok_types:
211 if content_type not in ok_types:
203 continue
212 continue
204 payload = part.get_payload(decode=True)
213 payload = part.get_payload(decode=True)
205 m = diffre.search(payload)
214 m = diffre.search(payload)
206 if m:
215 if m:
207 hgpatch = False
216 hgpatch = False
208 hgpatchheader = False
217 hgpatchheader = False
209 ignoretext = False
218 ignoretext = False
210
219
211 ui.debug('found patch at byte %d\n' % m.start(0))
220 ui.debug('found patch at byte %d\n' % m.start(0))
212 diffs_seen += 1
221 diffs_seen += 1
213 cfp = cStringIO.StringIO()
222 cfp = cStringIO.StringIO()
214 for line in payload[:m.start(0)].splitlines():
223 for line in payload[:m.start(0)].splitlines():
215 if line.startswith('# HG changeset patch') and not hgpatch:
224 if line.startswith('# HG changeset patch') and not hgpatch:
216 ui.debug('patch generated by hg export\n')
225 ui.debug('patch generated by hg export\n')
217 hgpatch = True
226 hgpatch = True
218 hgpatchheader = True
227 hgpatchheader = True
219 # drop earlier commit message content
228 # drop earlier commit message content
220 cfp.seek(0)
229 cfp.seek(0)
221 cfp.truncate()
230 cfp.truncate()
222 subject = None
231 subject = None
223 elif hgpatchheader:
232 elif hgpatchheader:
224 if line.startswith('# User '):
233 if line.startswith('# User '):
225 user = line[7:]
234 user = line[7:]
226 ui.debug('From: %s\n' % user)
235 ui.debug('From: %s\n' % user)
227 elif line.startswith("# Date "):
236 elif line.startswith("# Date "):
228 date = line[7:]
237 date = line[7:]
229 elif line.startswith("# Branch "):
238 elif line.startswith("# Branch "):
230 branch = line[9:]
239 branch = line[9:]
231 elif line.startswith("# Node ID "):
240 elif line.startswith("# Node ID "):
232 nodeid = line[10:]
241 nodeid = line[10:]
233 elif line.startswith("# Parent "):
242 elif line.startswith("# Parent "):
234 parents.append(line[9:].lstrip())
243 parents.append(line[9:].lstrip())
235 elif not line.startswith("# "):
244 elif not line.startswith("# "):
236 hgpatchheader = False
245 hgpatchheader = False
237 elif line == '---':
246 elif line == '---':
238 ignoretext = True
247 ignoretext = True
239 if not hgpatchheader and not ignoretext:
248 if not hgpatchheader and not ignoretext:
240 cfp.write(line)
249 cfp.write(line)
241 cfp.write('\n')
250 cfp.write('\n')
242 message = cfp.getvalue()
251 message = cfp.getvalue()
243 if tmpfp:
252 if tmpfp:
244 tmpfp.write(payload)
253 tmpfp.write(payload)
245 if not payload.endswith('\n'):
254 if not payload.endswith('\n'):
246 tmpfp.write('\n')
255 tmpfp.write('\n')
247 elif not diffs_seen and message and content_type == 'text/plain':
256 elif not diffs_seen and message and content_type == 'text/plain':
248 message += '\n' + payload
257 message += '\n' + payload
249 except: # re-raises
258 except: # re-raises
250 tmpfp.close()
259 tmpfp.close()
251 os.unlink(tmpname)
260 os.unlink(tmpname)
252 raise
261 raise
253
262
254 if subject and not message.startswith(subject):
263 if subject and not message.startswith(subject):
255 message = '%s\n%s' % (subject, message)
264 message = '%s\n%s' % (subject, message)
256 tmpfp.close()
265 tmpfp.close()
257 if not diffs_seen:
266 if not diffs_seen:
258 os.unlink(tmpname)
267 os.unlink(tmpname)
259 return None, message, user, date, branch, None, None, None
268 data['message'] = message
269 data['user'] = user
270 data['date'] = date
271 data['branch'] = branch
272 return data
260
273
261 if parents:
274 if parents:
262 p1 = parents.pop(0)
275 p1 = parents.pop(0)
263 else:
276 else:
264 p1 = None
277 p1 = None
265
278
266 if parents:
279 if parents:
267 p2 = parents.pop(0)
280 p2 = parents.pop(0)
268 else:
281 else:
269 p2 = None
282 p2 = None
270
283
271 return tmpname, message, user, date, branch, nodeid, p1, p2
284 data['filename'] = tmpname
285 data['message'] = message
286 data['user'] = user
287 data['date'] = date
288 data['branch'] = branch
289 data['nodeid'] = nodeid
290 data['p1'] = p1
291 data['p2'] = p2
292 return data
272
293
273 class patchmeta(object):
294 class patchmeta(object):
274 """Patched file metadata
295 """Patched file metadata
275
296
276 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
297 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
277 or COPY. 'path' is patched file path. 'oldpath' is set to the
298 or COPY. 'path' is patched file path. 'oldpath' is set to the
278 origin file when 'op' is either COPY or RENAME, None otherwise. If
299 origin file when 'op' is either COPY or RENAME, None otherwise. If
279 file mode is changed, 'mode' is a tuple (islink, isexec) where
300 file mode is changed, 'mode' is a tuple (islink, isexec) where
280 'islink' is True if the file is a symlink and 'isexec' is True if
301 'islink' is True if the file is a symlink and 'isexec' is True if
281 the file is executable. Otherwise, 'mode' is None.
302 the file is executable. Otherwise, 'mode' is None.
282 """
303 """
283 def __init__(self, path):
304 def __init__(self, path):
284 self.path = path
305 self.path = path
285 self.oldpath = None
306 self.oldpath = None
286 self.mode = None
307 self.mode = None
287 self.op = 'MODIFY'
308 self.op = 'MODIFY'
288 self.binary = False
309 self.binary = False
289
310
290 def setmode(self, mode):
311 def setmode(self, mode):
291 islink = mode & 0o20000
312 islink = mode & 0o20000
292 isexec = mode & 0o100
313 isexec = mode & 0o100
293 self.mode = (islink, isexec)
314 self.mode = (islink, isexec)
294
315
295 def copy(self):
316 def copy(self):
296 other = patchmeta(self.path)
317 other = patchmeta(self.path)
297 other.oldpath = self.oldpath
318 other.oldpath = self.oldpath
298 other.mode = self.mode
319 other.mode = self.mode
299 other.op = self.op
320 other.op = self.op
300 other.binary = self.binary
321 other.binary = self.binary
301 return other
322 return other
302
323
303 def _ispatchinga(self, afile):
324 def _ispatchinga(self, afile):
304 if afile == '/dev/null':
325 if afile == '/dev/null':
305 return self.op == 'ADD'
326 return self.op == 'ADD'
306 return afile == 'a/' + (self.oldpath or self.path)
327 return afile == 'a/' + (self.oldpath or self.path)
307
328
308 def _ispatchingb(self, bfile):
329 def _ispatchingb(self, bfile):
309 if bfile == '/dev/null':
330 if bfile == '/dev/null':
310 return self.op == 'DELETE'
331 return self.op == 'DELETE'
311 return bfile == 'b/' + self.path
332 return bfile == 'b/' + self.path
312
333
313 def ispatching(self, afile, bfile):
334 def ispatching(self, afile, bfile):
314 return self._ispatchinga(afile) and self._ispatchingb(bfile)
335 return self._ispatchinga(afile) and self._ispatchingb(bfile)
315
336
316 def __repr__(self):
337 def __repr__(self):
317 return "<patchmeta %s %r>" % (self.op, self.path)
338 return "<patchmeta %s %r>" % (self.op, self.path)
318
339
319 def readgitpatch(lr):
340 def readgitpatch(lr):
320 """extract git-style metadata about patches from <patchname>"""
341 """extract git-style metadata about patches from <patchname>"""
321
342
322 # Filter patch for git information
343 # Filter patch for git information
323 gp = None
344 gp = None
324 gitpatches = []
345 gitpatches = []
325 for line in lr:
346 for line in lr:
326 line = line.rstrip(' \r\n')
347 line = line.rstrip(' \r\n')
327 if line.startswith('diff --git a/'):
348 if line.startswith('diff --git a/'):
328 m = gitre.match(line)
349 m = gitre.match(line)
329 if m:
350 if m:
330 if gp:
351 if gp:
331 gitpatches.append(gp)
352 gitpatches.append(gp)
332 dst = m.group(2)
353 dst = m.group(2)
333 gp = patchmeta(dst)
354 gp = patchmeta(dst)
334 elif gp:
355 elif gp:
335 if line.startswith('--- '):
356 if line.startswith('--- '):
336 gitpatches.append(gp)
357 gitpatches.append(gp)
337 gp = None
358 gp = None
338 continue
359 continue
339 if line.startswith('rename from '):
360 if line.startswith('rename from '):
340 gp.op = 'RENAME'
361 gp.op = 'RENAME'
341 gp.oldpath = line[12:]
362 gp.oldpath = line[12:]
342 elif line.startswith('rename to '):
363 elif line.startswith('rename to '):
343 gp.path = line[10:]
364 gp.path = line[10:]
344 elif line.startswith('copy from '):
365 elif line.startswith('copy from '):
345 gp.op = 'COPY'
366 gp.op = 'COPY'
346 gp.oldpath = line[10:]
367 gp.oldpath = line[10:]
347 elif line.startswith('copy to '):
368 elif line.startswith('copy to '):
348 gp.path = line[8:]
369 gp.path = line[8:]
349 elif line.startswith('deleted file'):
370 elif line.startswith('deleted file'):
350 gp.op = 'DELETE'
371 gp.op = 'DELETE'
351 elif line.startswith('new file mode '):
372 elif line.startswith('new file mode '):
352 gp.op = 'ADD'
373 gp.op = 'ADD'
353 gp.setmode(int(line[-6:], 8))
374 gp.setmode(int(line[-6:], 8))
354 elif line.startswith('new mode '):
375 elif line.startswith('new mode '):
355 gp.setmode(int(line[-6:], 8))
376 gp.setmode(int(line[-6:], 8))
356 elif line.startswith('GIT binary patch'):
377 elif line.startswith('GIT binary patch'):
357 gp.binary = True
378 gp.binary = True
358 if gp:
379 if gp:
359 gitpatches.append(gp)
380 gitpatches.append(gp)
360
381
361 return gitpatches
382 return gitpatches
362
383
363 class linereader(object):
384 class linereader(object):
364 # simple class to allow pushing lines back into the input stream
385 # simple class to allow pushing lines back into the input stream
365 def __init__(self, fp):
386 def __init__(self, fp):
366 self.fp = fp
387 self.fp = fp
367 self.buf = []
388 self.buf = []
368
389
369 def push(self, line):
390 def push(self, line):
370 if line is not None:
391 if line is not None:
371 self.buf.append(line)
392 self.buf.append(line)
372
393
373 def readline(self):
394 def readline(self):
374 if self.buf:
395 if self.buf:
375 l = self.buf[0]
396 l = self.buf[0]
376 del self.buf[0]
397 del self.buf[0]
377 return l
398 return l
378 return self.fp.readline()
399 return self.fp.readline()
379
400
380 def __iter__(self):
401 def __iter__(self):
381 while True:
402 while True:
382 l = self.readline()
403 l = self.readline()
383 if not l:
404 if not l:
384 break
405 break
385 yield l
406 yield l
386
407
387 class abstractbackend(object):
408 class abstractbackend(object):
388 def __init__(self, ui):
409 def __init__(self, ui):
389 self.ui = ui
410 self.ui = ui
390
411
391 def getfile(self, fname):
412 def getfile(self, fname):
392 """Return target file data and flags as a (data, (islink,
413 """Return target file data and flags as a (data, (islink,
393 isexec)) tuple. Data is None if file is missing/deleted.
414 isexec)) tuple. Data is None if file is missing/deleted.
394 """
415 """
395 raise NotImplementedError
416 raise NotImplementedError
396
417
397 def setfile(self, fname, data, mode, copysource):
418 def setfile(self, fname, data, mode, copysource):
398 """Write data to target file fname and set its mode. mode is a
419 """Write data to target file fname and set its mode. mode is a
399 (islink, isexec) tuple. If data is None, the file content should
420 (islink, isexec) tuple. If data is None, the file content should
400 be left unchanged. If the file is modified after being copied,
421 be left unchanged. If the file is modified after being copied,
401 copysource is set to the original file name.
422 copysource is set to the original file name.
402 """
423 """
403 raise NotImplementedError
424 raise NotImplementedError
404
425
405 def unlink(self, fname):
426 def unlink(self, fname):
406 """Unlink target file."""
427 """Unlink target file."""
407 raise NotImplementedError
428 raise NotImplementedError
408
429
409 def writerej(self, fname, failed, total, lines):
430 def writerej(self, fname, failed, total, lines):
410 """Write rejected lines for fname. total is the number of hunks
431 """Write rejected lines for fname. total is the number of hunks
411 which failed to apply and total the total number of hunks for this
432 which failed to apply and total the total number of hunks for this
412 files.
433 files.
413 """
434 """
414 pass
435 pass
415
436
416 def exists(self, fname):
437 def exists(self, fname):
417 raise NotImplementedError
438 raise NotImplementedError
418
439
419 class fsbackend(abstractbackend):
440 class fsbackend(abstractbackend):
420 def __init__(self, ui, basedir):
441 def __init__(self, ui, basedir):
421 super(fsbackend, self).__init__(ui)
442 super(fsbackend, self).__init__(ui)
422 self.opener = scmutil.opener(basedir)
443 self.opener = scmutil.opener(basedir)
423
444
424 def _join(self, f):
445 def _join(self, f):
425 return os.path.join(self.opener.base, f)
446 return os.path.join(self.opener.base, f)
426
447
427 def getfile(self, fname):
448 def getfile(self, fname):
428 if self.opener.islink(fname):
449 if self.opener.islink(fname):
429 return (self.opener.readlink(fname), (True, False))
450 return (self.opener.readlink(fname), (True, False))
430
451
431 isexec = False
452 isexec = False
432 try:
453 try:
433 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
454 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
434 except OSError as e:
455 except OSError as e:
435 if e.errno != errno.ENOENT:
456 if e.errno != errno.ENOENT:
436 raise
457 raise
437 try:
458 try:
438 return (self.opener.read(fname), (False, isexec))
459 return (self.opener.read(fname), (False, isexec))
439 except IOError as e:
460 except IOError as e:
440 if e.errno != errno.ENOENT:
461 if e.errno != errno.ENOENT:
441 raise
462 raise
442 return None, None
463 return None, None
443
464
444 def setfile(self, fname, data, mode, copysource):
465 def setfile(self, fname, data, mode, copysource):
445 islink, isexec = mode
466 islink, isexec = mode
446 if data is None:
467 if data is None:
447 self.opener.setflags(fname, islink, isexec)
468 self.opener.setflags(fname, islink, isexec)
448 return
469 return
449 if islink:
470 if islink:
450 self.opener.symlink(data, fname)
471 self.opener.symlink(data, fname)
451 else:
472 else:
452 self.opener.write(fname, data)
473 self.opener.write(fname, data)
453 if isexec:
474 if isexec:
454 self.opener.setflags(fname, False, True)
475 self.opener.setflags(fname, False, True)
455
476
456 def unlink(self, fname):
477 def unlink(self, fname):
457 self.opener.unlinkpath(fname, ignoremissing=True)
478 self.opener.unlinkpath(fname, ignoremissing=True)
458
479
459 def writerej(self, fname, failed, total, lines):
480 def writerej(self, fname, failed, total, lines):
460 fname = fname + ".rej"
481 fname = fname + ".rej"
461 self.ui.warn(
482 self.ui.warn(
462 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
483 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
463 (failed, total, fname))
484 (failed, total, fname))
464 fp = self.opener(fname, 'w')
485 fp = self.opener(fname, 'w')
465 fp.writelines(lines)
486 fp.writelines(lines)
466 fp.close()
487 fp.close()
467
488
468 def exists(self, fname):
489 def exists(self, fname):
469 return self.opener.lexists(fname)
490 return self.opener.lexists(fname)
470
491
471 class workingbackend(fsbackend):
492 class workingbackend(fsbackend):
472 def __init__(self, ui, repo, similarity):
493 def __init__(self, ui, repo, similarity):
473 super(workingbackend, self).__init__(ui, repo.root)
494 super(workingbackend, self).__init__(ui, repo.root)
474 self.repo = repo
495 self.repo = repo
475 self.similarity = similarity
496 self.similarity = similarity
476 self.removed = set()
497 self.removed = set()
477 self.changed = set()
498 self.changed = set()
478 self.copied = []
499 self.copied = []
479
500
480 def _checkknown(self, fname):
501 def _checkknown(self, fname):
481 if self.repo.dirstate[fname] == '?' and self.exists(fname):
502 if self.repo.dirstate[fname] == '?' and self.exists(fname):
482 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
503 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
483
504
484 def setfile(self, fname, data, mode, copysource):
505 def setfile(self, fname, data, mode, copysource):
485 self._checkknown(fname)
506 self._checkknown(fname)
486 super(workingbackend, self).setfile(fname, data, mode, copysource)
507 super(workingbackend, self).setfile(fname, data, mode, copysource)
487 if copysource is not None:
508 if copysource is not None:
488 self.copied.append((copysource, fname))
509 self.copied.append((copysource, fname))
489 self.changed.add(fname)
510 self.changed.add(fname)
490
511
491 def unlink(self, fname):
512 def unlink(self, fname):
492 self._checkknown(fname)
513 self._checkknown(fname)
493 super(workingbackend, self).unlink(fname)
514 super(workingbackend, self).unlink(fname)
494 self.removed.add(fname)
515 self.removed.add(fname)
495 self.changed.add(fname)
516 self.changed.add(fname)
496
517
497 def close(self):
518 def close(self):
498 wctx = self.repo[None]
519 wctx = self.repo[None]
499 changed = set(self.changed)
520 changed = set(self.changed)
500 for src, dst in self.copied:
521 for src, dst in self.copied:
501 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
522 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
502 if self.removed:
523 if self.removed:
503 wctx.forget(sorted(self.removed))
524 wctx.forget(sorted(self.removed))
504 for f in self.removed:
525 for f in self.removed:
505 if f not in self.repo.dirstate:
526 if f not in self.repo.dirstate:
506 # File was deleted and no longer belongs to the
527 # File was deleted and no longer belongs to the
507 # dirstate, it was probably marked added then
528 # dirstate, it was probably marked added then
508 # deleted, and should not be considered by
529 # deleted, and should not be considered by
509 # marktouched().
530 # marktouched().
510 changed.discard(f)
531 changed.discard(f)
511 if changed:
532 if changed:
512 scmutil.marktouched(self.repo, changed, self.similarity)
533 scmutil.marktouched(self.repo, changed, self.similarity)
513 return sorted(self.changed)
534 return sorted(self.changed)
514
535
515 class filestore(object):
536 class filestore(object):
516 def __init__(self, maxsize=None):
537 def __init__(self, maxsize=None):
517 self.opener = None
538 self.opener = None
518 self.files = {}
539 self.files = {}
519 self.created = 0
540 self.created = 0
520 self.maxsize = maxsize
541 self.maxsize = maxsize
521 if self.maxsize is None:
542 if self.maxsize is None:
522 self.maxsize = 4*(2**20)
543 self.maxsize = 4*(2**20)
523 self.size = 0
544 self.size = 0
524 self.data = {}
545 self.data = {}
525
546
526 def setfile(self, fname, data, mode, copied=None):
547 def setfile(self, fname, data, mode, copied=None):
527 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
548 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
528 self.data[fname] = (data, mode, copied)
549 self.data[fname] = (data, mode, copied)
529 self.size += len(data)
550 self.size += len(data)
530 else:
551 else:
531 if self.opener is None:
552 if self.opener is None:
532 root = tempfile.mkdtemp(prefix='hg-patch-')
553 root = tempfile.mkdtemp(prefix='hg-patch-')
533 self.opener = scmutil.opener(root)
554 self.opener = scmutil.opener(root)
534 # Avoid filename issues with these simple names
555 # Avoid filename issues with these simple names
535 fn = str(self.created)
556 fn = str(self.created)
536 self.opener.write(fn, data)
557 self.opener.write(fn, data)
537 self.created += 1
558 self.created += 1
538 self.files[fname] = (fn, mode, copied)
559 self.files[fname] = (fn, mode, copied)
539
560
540 def getfile(self, fname):
561 def getfile(self, fname):
541 if fname in self.data:
562 if fname in self.data:
542 return self.data[fname]
563 return self.data[fname]
543 if not self.opener or fname not in self.files:
564 if not self.opener or fname not in self.files:
544 return None, None, None
565 return None, None, None
545 fn, mode, copied = self.files[fname]
566 fn, mode, copied = self.files[fname]
546 return self.opener.read(fn), mode, copied
567 return self.opener.read(fn), mode, copied
547
568
548 def close(self):
569 def close(self):
549 if self.opener:
570 if self.opener:
550 shutil.rmtree(self.opener.base)
571 shutil.rmtree(self.opener.base)
551
572
552 class repobackend(abstractbackend):
573 class repobackend(abstractbackend):
553 def __init__(self, ui, repo, ctx, store):
574 def __init__(self, ui, repo, ctx, store):
554 super(repobackend, self).__init__(ui)
575 super(repobackend, self).__init__(ui)
555 self.repo = repo
576 self.repo = repo
556 self.ctx = ctx
577 self.ctx = ctx
557 self.store = store
578 self.store = store
558 self.changed = set()
579 self.changed = set()
559 self.removed = set()
580 self.removed = set()
560 self.copied = {}
581 self.copied = {}
561
582
562 def _checkknown(self, fname):
583 def _checkknown(self, fname):
563 if fname not in self.ctx:
584 if fname not in self.ctx:
564 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
585 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
565
586
566 def getfile(self, fname):
587 def getfile(self, fname):
567 try:
588 try:
568 fctx = self.ctx[fname]
589 fctx = self.ctx[fname]
569 except error.LookupError:
590 except error.LookupError:
570 return None, None
591 return None, None
571 flags = fctx.flags()
592 flags = fctx.flags()
572 return fctx.data(), ('l' in flags, 'x' in flags)
593 return fctx.data(), ('l' in flags, 'x' in flags)
573
594
574 def setfile(self, fname, data, mode, copysource):
595 def setfile(self, fname, data, mode, copysource):
575 if copysource:
596 if copysource:
576 self._checkknown(copysource)
597 self._checkknown(copysource)
577 if data is None:
598 if data is None:
578 data = self.ctx[fname].data()
599 data = self.ctx[fname].data()
579 self.store.setfile(fname, data, mode, copysource)
600 self.store.setfile(fname, data, mode, copysource)
580 self.changed.add(fname)
601 self.changed.add(fname)
581 if copysource:
602 if copysource:
582 self.copied[fname] = copysource
603 self.copied[fname] = copysource
583
604
584 def unlink(self, fname):
605 def unlink(self, fname):
585 self._checkknown(fname)
606 self._checkknown(fname)
586 self.removed.add(fname)
607 self.removed.add(fname)
587
608
588 def exists(self, fname):
609 def exists(self, fname):
589 return fname in self.ctx
610 return fname in self.ctx
590
611
591 def close(self):
612 def close(self):
592 return self.changed | self.removed
613 return self.changed | self.removed
593
614
594 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
615 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
595 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
616 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
596 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
617 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
597 eolmodes = ['strict', 'crlf', 'lf', 'auto']
618 eolmodes = ['strict', 'crlf', 'lf', 'auto']
598
619
599 class patchfile(object):
620 class patchfile(object):
600 def __init__(self, ui, gp, backend, store, eolmode='strict'):
621 def __init__(self, ui, gp, backend, store, eolmode='strict'):
601 self.fname = gp.path
622 self.fname = gp.path
602 self.eolmode = eolmode
623 self.eolmode = eolmode
603 self.eol = None
624 self.eol = None
604 self.backend = backend
625 self.backend = backend
605 self.ui = ui
626 self.ui = ui
606 self.lines = []
627 self.lines = []
607 self.exists = False
628 self.exists = False
608 self.missing = True
629 self.missing = True
609 self.mode = gp.mode
630 self.mode = gp.mode
610 self.copysource = gp.oldpath
631 self.copysource = gp.oldpath
611 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
632 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
612 self.remove = gp.op == 'DELETE'
633 self.remove = gp.op == 'DELETE'
613 if self.copysource is None:
634 if self.copysource is None:
614 data, mode = backend.getfile(self.fname)
635 data, mode = backend.getfile(self.fname)
615 else:
636 else:
616 data, mode = store.getfile(self.copysource)[:2]
637 data, mode = store.getfile(self.copysource)[:2]
617 if data is not None:
638 if data is not None:
618 self.exists = self.copysource is None or backend.exists(self.fname)
639 self.exists = self.copysource is None or backend.exists(self.fname)
619 self.missing = False
640 self.missing = False
620 if data:
641 if data:
621 self.lines = mdiff.splitnewlines(data)
642 self.lines = mdiff.splitnewlines(data)
622 if self.mode is None:
643 if self.mode is None:
623 self.mode = mode
644 self.mode = mode
624 if self.lines:
645 if self.lines:
625 # Normalize line endings
646 # Normalize line endings
626 if self.lines[0].endswith('\r\n'):
647 if self.lines[0].endswith('\r\n'):
627 self.eol = '\r\n'
648 self.eol = '\r\n'
628 elif self.lines[0].endswith('\n'):
649 elif self.lines[0].endswith('\n'):
629 self.eol = '\n'
650 self.eol = '\n'
630 if eolmode != 'strict':
651 if eolmode != 'strict':
631 nlines = []
652 nlines = []
632 for l in self.lines:
653 for l in self.lines:
633 if l.endswith('\r\n'):
654 if l.endswith('\r\n'):
634 l = l[:-2] + '\n'
655 l = l[:-2] + '\n'
635 nlines.append(l)
656 nlines.append(l)
636 self.lines = nlines
657 self.lines = nlines
637 else:
658 else:
638 if self.create:
659 if self.create:
639 self.missing = False
660 self.missing = False
640 if self.mode is None:
661 if self.mode is None:
641 self.mode = (False, False)
662 self.mode = (False, False)
642 if self.missing:
663 if self.missing:
643 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
664 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
644
665
645 self.hash = {}
666 self.hash = {}
646 self.dirty = 0
667 self.dirty = 0
647 self.offset = 0
668 self.offset = 0
648 self.skew = 0
669 self.skew = 0
649 self.rej = []
670 self.rej = []
650 self.fileprinted = False
671 self.fileprinted = False
651 self.printfile(False)
672 self.printfile(False)
652 self.hunks = 0
673 self.hunks = 0
653
674
654 def writelines(self, fname, lines, mode):
675 def writelines(self, fname, lines, mode):
655 if self.eolmode == 'auto':
676 if self.eolmode == 'auto':
656 eol = self.eol
677 eol = self.eol
657 elif self.eolmode == 'crlf':
678 elif self.eolmode == 'crlf':
658 eol = '\r\n'
679 eol = '\r\n'
659 else:
680 else:
660 eol = '\n'
681 eol = '\n'
661
682
662 if self.eolmode != 'strict' and eol and eol != '\n':
683 if self.eolmode != 'strict' and eol and eol != '\n':
663 rawlines = []
684 rawlines = []
664 for l in lines:
685 for l in lines:
665 if l and l[-1] == '\n':
686 if l and l[-1] == '\n':
666 l = l[:-1] + eol
687 l = l[:-1] + eol
667 rawlines.append(l)
688 rawlines.append(l)
668 lines = rawlines
689 lines = rawlines
669
690
670 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
691 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
671
692
672 def printfile(self, warn):
693 def printfile(self, warn):
673 if self.fileprinted:
694 if self.fileprinted:
674 return
695 return
675 if warn or self.ui.verbose:
696 if warn or self.ui.verbose:
676 self.fileprinted = True
697 self.fileprinted = True
677 s = _("patching file %s\n") % self.fname
698 s = _("patching file %s\n") % self.fname
678 if warn:
699 if warn:
679 self.ui.warn(s)
700 self.ui.warn(s)
680 else:
701 else:
681 self.ui.note(s)
702 self.ui.note(s)
682
703
683
704
684 def findlines(self, l, linenum):
705 def findlines(self, l, linenum):
685 # looks through the hash and finds candidate lines. The
706 # looks through the hash and finds candidate lines. The
686 # result is a list of line numbers sorted based on distance
707 # result is a list of line numbers sorted based on distance
687 # from linenum
708 # from linenum
688
709
689 cand = self.hash.get(l, [])
710 cand = self.hash.get(l, [])
690 if len(cand) > 1:
711 if len(cand) > 1:
691 # resort our list of potentials forward then back.
712 # resort our list of potentials forward then back.
692 cand.sort(key=lambda x: abs(x - linenum))
713 cand.sort(key=lambda x: abs(x - linenum))
693 return cand
714 return cand
694
715
695 def write_rej(self):
716 def write_rej(self):
696 # our rejects are a little different from patch(1). This always
717 # our rejects are a little different from patch(1). This always
697 # creates rejects in the same form as the original patch. A file
718 # creates rejects in the same form as the original patch. A file
698 # header is inserted so that you can run the reject through patch again
719 # header is inserted so that you can run the reject through patch again
699 # without having to type the filename.
720 # without having to type the filename.
700 if not self.rej:
721 if not self.rej:
701 return
722 return
702 base = os.path.basename(self.fname)
723 base = os.path.basename(self.fname)
703 lines = ["--- %s\n+++ %s\n" % (base, base)]
724 lines = ["--- %s\n+++ %s\n" % (base, base)]
704 for x in self.rej:
725 for x in self.rej:
705 for l in x.hunk:
726 for l in x.hunk:
706 lines.append(l)
727 lines.append(l)
707 if l[-1] != '\n':
728 if l[-1] != '\n':
708 lines.append("\n\ No newline at end of file\n")
729 lines.append("\n\ No newline at end of file\n")
709 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
730 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
710
731
711 def apply(self, h):
732 def apply(self, h):
712 if not h.complete():
733 if not h.complete():
713 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
734 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
714 (h.number, h.desc, len(h.a), h.lena, len(h.b),
735 (h.number, h.desc, len(h.a), h.lena, len(h.b),
715 h.lenb))
736 h.lenb))
716
737
717 self.hunks += 1
738 self.hunks += 1
718
739
719 if self.missing:
740 if self.missing:
720 self.rej.append(h)
741 self.rej.append(h)
721 return -1
742 return -1
722
743
723 if self.exists and self.create:
744 if self.exists and self.create:
724 if self.copysource:
745 if self.copysource:
725 self.ui.warn(_("cannot create %s: destination already "
746 self.ui.warn(_("cannot create %s: destination already "
726 "exists\n") % self.fname)
747 "exists\n") % self.fname)
727 else:
748 else:
728 self.ui.warn(_("file %s already exists\n") % self.fname)
749 self.ui.warn(_("file %s already exists\n") % self.fname)
729 self.rej.append(h)
750 self.rej.append(h)
730 return -1
751 return -1
731
752
732 if isinstance(h, binhunk):
753 if isinstance(h, binhunk):
733 if self.remove:
754 if self.remove:
734 self.backend.unlink(self.fname)
755 self.backend.unlink(self.fname)
735 else:
756 else:
736 l = h.new(self.lines)
757 l = h.new(self.lines)
737 self.lines[:] = l
758 self.lines[:] = l
738 self.offset += len(l)
759 self.offset += len(l)
739 self.dirty = True
760 self.dirty = True
740 return 0
761 return 0
741
762
742 horig = h
763 horig = h
743 if (self.eolmode in ('crlf', 'lf')
764 if (self.eolmode in ('crlf', 'lf')
744 or self.eolmode == 'auto' and self.eol):
765 or self.eolmode == 'auto' and self.eol):
745 # If new eols are going to be normalized, then normalize
766 # If new eols are going to be normalized, then normalize
746 # hunk data before patching. Otherwise, preserve input
767 # hunk data before patching. Otherwise, preserve input
747 # line-endings.
768 # line-endings.
748 h = h.getnormalized()
769 h = h.getnormalized()
749
770
750 # fast case first, no offsets, no fuzz
771 # fast case first, no offsets, no fuzz
751 old, oldstart, new, newstart = h.fuzzit(0, False)
772 old, oldstart, new, newstart = h.fuzzit(0, False)
752 oldstart += self.offset
773 oldstart += self.offset
753 orig_start = oldstart
774 orig_start = oldstart
754 # if there's skew we want to emit the "(offset %d lines)" even
775 # if there's skew we want to emit the "(offset %d lines)" even
755 # when the hunk cleanly applies at start + skew, so skip the
776 # when the hunk cleanly applies at start + skew, so skip the
756 # fast case code
777 # fast case code
757 if (self.skew == 0 and
778 if (self.skew == 0 and
758 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
779 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
759 if self.remove:
780 if self.remove:
760 self.backend.unlink(self.fname)
781 self.backend.unlink(self.fname)
761 else:
782 else:
762 self.lines[oldstart:oldstart + len(old)] = new
783 self.lines[oldstart:oldstart + len(old)] = new
763 self.offset += len(new) - len(old)
784 self.offset += len(new) - len(old)
764 self.dirty = True
785 self.dirty = True
765 return 0
786 return 0
766
787
767 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
788 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
768 self.hash = {}
789 self.hash = {}
769 for x, s in enumerate(self.lines):
790 for x, s in enumerate(self.lines):
770 self.hash.setdefault(s, []).append(x)
791 self.hash.setdefault(s, []).append(x)
771
792
772 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
793 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
773 for toponly in [True, False]:
794 for toponly in [True, False]:
774 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
795 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
775 oldstart = oldstart + self.offset + self.skew
796 oldstart = oldstart + self.offset + self.skew
776 oldstart = min(oldstart, len(self.lines))
797 oldstart = min(oldstart, len(self.lines))
777 if old:
798 if old:
778 cand = self.findlines(old[0][1:], oldstart)
799 cand = self.findlines(old[0][1:], oldstart)
779 else:
800 else:
780 # Only adding lines with no or fuzzed context, just
801 # Only adding lines with no or fuzzed context, just
781 # take the skew in account
802 # take the skew in account
782 cand = [oldstart]
803 cand = [oldstart]
783
804
784 for l in cand:
805 for l in cand:
785 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
806 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
786 self.lines[l : l + len(old)] = new
807 self.lines[l : l + len(old)] = new
787 self.offset += len(new) - len(old)
808 self.offset += len(new) - len(old)
788 self.skew = l - orig_start
809 self.skew = l - orig_start
789 self.dirty = True
810 self.dirty = True
790 offset = l - orig_start - fuzzlen
811 offset = l - orig_start - fuzzlen
791 if fuzzlen:
812 if fuzzlen:
792 msg = _("Hunk #%d succeeded at %d "
813 msg = _("Hunk #%d succeeded at %d "
793 "with fuzz %d "
814 "with fuzz %d "
794 "(offset %d lines).\n")
815 "(offset %d lines).\n")
795 self.printfile(True)
816 self.printfile(True)
796 self.ui.warn(msg %
817 self.ui.warn(msg %
797 (h.number, l + 1, fuzzlen, offset))
818 (h.number, l + 1, fuzzlen, offset))
798 else:
819 else:
799 msg = _("Hunk #%d succeeded at %d "
820 msg = _("Hunk #%d succeeded at %d "
800 "(offset %d lines).\n")
821 "(offset %d lines).\n")
801 self.ui.note(msg % (h.number, l + 1, offset))
822 self.ui.note(msg % (h.number, l + 1, offset))
802 return fuzzlen
823 return fuzzlen
803 self.printfile(True)
824 self.printfile(True)
804 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
825 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
805 self.rej.append(horig)
826 self.rej.append(horig)
806 return -1
827 return -1
807
828
808 def close(self):
829 def close(self):
809 if self.dirty:
830 if self.dirty:
810 self.writelines(self.fname, self.lines, self.mode)
831 self.writelines(self.fname, self.lines, self.mode)
811 self.write_rej()
832 self.write_rej()
812 return len(self.rej)
833 return len(self.rej)
813
834
814 class header(object):
835 class header(object):
815 """patch header
836 """patch header
816 """
837 """
817 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
838 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
818 diff_re = re.compile('diff -r .* (.*)$')
839 diff_re = re.compile('diff -r .* (.*)$')
819 allhunks_re = re.compile('(?:index|deleted file) ')
840 allhunks_re = re.compile('(?:index|deleted file) ')
820 pretty_re = re.compile('(?:new file|deleted file) ')
841 pretty_re = re.compile('(?:new file|deleted file) ')
821 special_re = re.compile('(?:index|deleted|copy|rename) ')
842 special_re = re.compile('(?:index|deleted|copy|rename) ')
822 newfile_re = re.compile('(?:new file)')
843 newfile_re = re.compile('(?:new file)')
823
844
824 def __init__(self, header):
845 def __init__(self, header):
825 self.header = header
846 self.header = header
826 self.hunks = []
847 self.hunks = []
827
848
828 def binary(self):
849 def binary(self):
829 return any(h.startswith('index ') for h in self.header)
850 return any(h.startswith('index ') for h in self.header)
830
851
831 def pretty(self, fp):
852 def pretty(self, fp):
832 for h in self.header:
853 for h in self.header:
833 if h.startswith('index '):
854 if h.startswith('index '):
834 fp.write(_('this modifies a binary file (all or nothing)\n'))
855 fp.write(_('this modifies a binary file (all or nothing)\n'))
835 break
856 break
836 if self.pretty_re.match(h):
857 if self.pretty_re.match(h):
837 fp.write(h)
858 fp.write(h)
838 if self.binary():
859 if self.binary():
839 fp.write(_('this is a binary file\n'))
860 fp.write(_('this is a binary file\n'))
840 break
861 break
841 if h.startswith('---'):
862 if h.startswith('---'):
842 fp.write(_('%d hunks, %d lines changed\n') %
863 fp.write(_('%d hunks, %d lines changed\n') %
843 (len(self.hunks),
864 (len(self.hunks),
844 sum([max(h.added, h.removed) for h in self.hunks])))
865 sum([max(h.added, h.removed) for h in self.hunks])))
845 break
866 break
846 fp.write(h)
867 fp.write(h)
847
868
848 def write(self, fp):
869 def write(self, fp):
849 fp.write(''.join(self.header))
870 fp.write(''.join(self.header))
850
871
851 def allhunks(self):
872 def allhunks(self):
852 return any(self.allhunks_re.match(h) for h in self.header)
873 return any(self.allhunks_re.match(h) for h in self.header)
853
874
854 def files(self):
875 def files(self):
855 match = self.diffgit_re.match(self.header[0])
876 match = self.diffgit_re.match(self.header[0])
856 if match:
877 if match:
857 fromfile, tofile = match.groups()
878 fromfile, tofile = match.groups()
858 if fromfile == tofile:
879 if fromfile == tofile:
859 return [fromfile]
880 return [fromfile]
860 return [fromfile, tofile]
881 return [fromfile, tofile]
861 else:
882 else:
862 return self.diff_re.match(self.header[0]).groups()
883 return self.diff_re.match(self.header[0]).groups()
863
884
864 def filename(self):
885 def filename(self):
865 return self.files()[-1]
886 return self.files()[-1]
866
887
867 def __repr__(self):
888 def __repr__(self):
868 return '<header %s>' % (' '.join(map(repr, self.files())))
889 return '<header %s>' % (' '.join(map(repr, self.files())))
869
890
870 def isnewfile(self):
891 def isnewfile(self):
871 return any(self.newfile_re.match(h) for h in self.header)
892 return any(self.newfile_re.match(h) for h in self.header)
872
893
873 def special(self):
894 def special(self):
874 # Special files are shown only at the header level and not at the hunk
895 # Special files are shown only at the header level and not at the hunk
875 # level for example a file that has been deleted is a special file.
896 # level for example a file that has been deleted is a special file.
876 # The user cannot change the content of the operation, in the case of
897 # The user cannot change the content of the operation, in the case of
877 # the deleted file he has to take the deletion or not take it, he
898 # the deleted file he has to take the deletion or not take it, he
878 # cannot take some of it.
899 # cannot take some of it.
879 # Newly added files are special if they are empty, they are not special
900 # Newly added files are special if they are empty, they are not special
880 # if they have some content as we want to be able to change it
901 # if they have some content as we want to be able to change it
881 nocontent = len(self.header) == 2
902 nocontent = len(self.header) == 2
882 emptynewfile = self.isnewfile() and nocontent
903 emptynewfile = self.isnewfile() and nocontent
883 return emptynewfile or \
904 return emptynewfile or \
884 any(self.special_re.match(h) for h in self.header)
905 any(self.special_re.match(h) for h in self.header)
885
906
886 class recordhunk(object):
907 class recordhunk(object):
887 """patch hunk
908 """patch hunk
888
909
889 XXX shouldn't we merge this with the other hunk class?
910 XXX shouldn't we merge this with the other hunk class?
890 """
911 """
891 maxcontext = 3
912 maxcontext = 3
892
913
893 def __init__(self, header, fromline, toline, proc, before, hunk, after):
914 def __init__(self, header, fromline, toline, proc, before, hunk, after):
894 def trimcontext(number, lines):
915 def trimcontext(number, lines):
895 delta = len(lines) - self.maxcontext
916 delta = len(lines) - self.maxcontext
896 if False and delta > 0:
917 if False and delta > 0:
897 return number + delta, lines[:self.maxcontext]
918 return number + delta, lines[:self.maxcontext]
898 return number, lines
919 return number, lines
899
920
900 self.header = header
921 self.header = header
901 self.fromline, self.before = trimcontext(fromline, before)
922 self.fromline, self.before = trimcontext(fromline, before)
902 self.toline, self.after = trimcontext(toline, after)
923 self.toline, self.after = trimcontext(toline, after)
903 self.proc = proc
924 self.proc = proc
904 self.hunk = hunk
925 self.hunk = hunk
905 self.added, self.removed = self.countchanges(self.hunk)
926 self.added, self.removed = self.countchanges(self.hunk)
906
927
907 def __eq__(self, v):
928 def __eq__(self, v):
908 if not isinstance(v, recordhunk):
929 if not isinstance(v, recordhunk):
909 return False
930 return False
910
931
911 return ((v.hunk == self.hunk) and
932 return ((v.hunk == self.hunk) and
912 (v.proc == self.proc) and
933 (v.proc == self.proc) and
913 (self.fromline == v.fromline) and
934 (self.fromline == v.fromline) and
914 (self.header.files() == v.header.files()))
935 (self.header.files() == v.header.files()))
915
936
916 def __hash__(self):
937 def __hash__(self):
917 return hash((tuple(self.hunk),
938 return hash((tuple(self.hunk),
918 tuple(self.header.files()),
939 tuple(self.header.files()),
919 self.fromline,
940 self.fromline,
920 self.proc))
941 self.proc))
921
942
922 def countchanges(self, hunk):
943 def countchanges(self, hunk):
923 """hunk -> (n+,n-)"""
944 """hunk -> (n+,n-)"""
924 add = len([h for h in hunk if h[0] == '+'])
945 add = len([h for h in hunk if h[0] == '+'])
925 rem = len([h for h in hunk if h[0] == '-'])
946 rem = len([h for h in hunk if h[0] == '-'])
926 return add, rem
947 return add, rem
927
948
928 def write(self, fp):
949 def write(self, fp):
929 delta = len(self.before) + len(self.after)
950 delta = len(self.before) + len(self.after)
930 if self.after and self.after[-1] == '\\ No newline at end of file\n':
951 if self.after and self.after[-1] == '\\ No newline at end of file\n':
931 delta -= 1
952 delta -= 1
932 fromlen = delta + self.removed
953 fromlen = delta + self.removed
933 tolen = delta + self.added
954 tolen = delta + self.added
934 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
955 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
935 (self.fromline, fromlen, self.toline, tolen,
956 (self.fromline, fromlen, self.toline, tolen,
936 self.proc and (' ' + self.proc)))
957 self.proc and (' ' + self.proc)))
937 fp.write(''.join(self.before + self.hunk + self.after))
958 fp.write(''.join(self.before + self.hunk + self.after))
938
959
939 pretty = write
960 pretty = write
940
961
941 def filename(self):
962 def filename(self):
942 return self.header.filename()
963 return self.header.filename()
943
964
944 def __repr__(self):
965 def __repr__(self):
945 return '<hunk %r@%d>' % (self.filename(), self.fromline)
966 return '<hunk %r@%d>' % (self.filename(), self.fromline)
946
967
947 def filterpatch(ui, headers, operation=None):
968 def filterpatch(ui, headers, operation=None):
948 """Interactively filter patch chunks into applied-only chunks"""
969 """Interactively filter patch chunks into applied-only chunks"""
949 if operation is None:
970 if operation is None:
950 operation = _('record')
971 operation = _('record')
951
972
952 def prompt(skipfile, skipall, query, chunk):
973 def prompt(skipfile, skipall, query, chunk):
953 """prompt query, and process base inputs
974 """prompt query, and process base inputs
954
975
955 - y/n for the rest of file
976 - y/n for the rest of file
956 - y/n for the rest
977 - y/n for the rest
957 - ? (help)
978 - ? (help)
958 - q (quit)
979 - q (quit)
959
980
960 Return True/False and possibly updated skipfile and skipall.
981 Return True/False and possibly updated skipfile and skipall.
961 """
982 """
962 newpatches = None
983 newpatches = None
963 if skipall is not None:
984 if skipall is not None:
964 return skipall, skipfile, skipall, newpatches
985 return skipall, skipfile, skipall, newpatches
965 if skipfile is not None:
986 if skipfile is not None:
966 return skipfile, skipfile, skipall, newpatches
987 return skipfile, skipfile, skipall, newpatches
967 while True:
988 while True:
968 resps = _('[Ynesfdaq?]'
989 resps = _('[Ynesfdaq?]'
969 '$$ &Yes, record this change'
990 '$$ &Yes, record this change'
970 '$$ &No, skip this change'
991 '$$ &No, skip this change'
971 '$$ &Edit this change manually'
992 '$$ &Edit this change manually'
972 '$$ &Skip remaining changes to this file'
993 '$$ &Skip remaining changes to this file'
973 '$$ Record remaining changes to this &file'
994 '$$ Record remaining changes to this &file'
974 '$$ &Done, skip remaining changes and files'
995 '$$ &Done, skip remaining changes and files'
975 '$$ Record &all changes to all remaining files'
996 '$$ Record &all changes to all remaining files'
976 '$$ &Quit, recording no changes'
997 '$$ &Quit, recording no changes'
977 '$$ &? (display help)')
998 '$$ &? (display help)')
978 r = ui.promptchoice("%s %s" % (query, resps))
999 r = ui.promptchoice("%s %s" % (query, resps))
979 ui.write("\n")
1000 ui.write("\n")
980 if r == 8: # ?
1001 if r == 8: # ?
981 for c, t in ui.extractchoices(resps)[1]:
1002 for c, t in ui.extractchoices(resps)[1]:
982 ui.write('%s - %s\n' % (c, t.lower()))
1003 ui.write('%s - %s\n' % (c, t.lower()))
983 continue
1004 continue
984 elif r == 0: # yes
1005 elif r == 0: # yes
985 ret = True
1006 ret = True
986 elif r == 1: # no
1007 elif r == 1: # no
987 ret = False
1008 ret = False
988 elif r == 2: # Edit patch
1009 elif r == 2: # Edit patch
989 if chunk is None:
1010 if chunk is None:
990 ui.write(_('cannot edit patch for whole file'))
1011 ui.write(_('cannot edit patch for whole file'))
991 ui.write("\n")
1012 ui.write("\n")
992 continue
1013 continue
993 if chunk.header.binary():
1014 if chunk.header.binary():
994 ui.write(_('cannot edit patch for binary file'))
1015 ui.write(_('cannot edit patch for binary file'))
995 ui.write("\n")
1016 ui.write("\n")
996 continue
1017 continue
997 # Patch comment based on the Git one (based on comment at end of
1018 # Patch comment based on the Git one (based on comment at end of
998 # https://mercurial-scm.org/wiki/RecordExtension)
1019 # https://mercurial-scm.org/wiki/RecordExtension)
999 phelp = '---' + _("""
1020 phelp = '---' + _("""
1000 To remove '-' lines, make them ' ' lines (context).
1021 To remove '-' lines, make them ' ' lines (context).
1001 To remove '+' lines, delete them.
1022 To remove '+' lines, delete them.
1002 Lines starting with # will be removed from the patch.
1023 Lines starting with # will be removed from the patch.
1003
1024
1004 If the patch applies cleanly, the edited hunk will immediately be
1025 If the patch applies cleanly, the edited hunk will immediately be
1005 added to the record list. If it does not apply cleanly, a rejects
1026 added to the record list. If it does not apply cleanly, a rejects
1006 file will be generated: you can use that when you try again. If
1027 file will be generated: you can use that when you try again. If
1007 all lines of the hunk are removed, then the edit is aborted and
1028 all lines of the hunk are removed, then the edit is aborted and
1008 the hunk is left unchanged.
1029 the hunk is left unchanged.
1009 """)
1030 """)
1010 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1031 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1011 suffix=".diff", text=True)
1032 suffix=".diff", text=True)
1012 ncpatchfp = None
1033 ncpatchfp = None
1013 try:
1034 try:
1014 # Write the initial patch
1035 # Write the initial patch
1015 f = os.fdopen(patchfd, "w")
1036 f = os.fdopen(patchfd, "w")
1016 chunk.header.write(f)
1037 chunk.header.write(f)
1017 chunk.write(f)
1038 chunk.write(f)
1018 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1039 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1019 f.close()
1040 f.close()
1020 # Start the editor and wait for it to complete
1041 # Start the editor and wait for it to complete
1021 editor = ui.geteditor()
1042 editor = ui.geteditor()
1022 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1043 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1023 environ={'HGUSER': ui.username()})
1044 environ={'HGUSER': ui.username()})
1024 if ret != 0:
1045 if ret != 0:
1025 ui.warn(_("editor exited with exit code %d\n") % ret)
1046 ui.warn(_("editor exited with exit code %d\n") % ret)
1026 continue
1047 continue
1027 # Remove comment lines
1048 # Remove comment lines
1028 patchfp = open(patchfn)
1049 patchfp = open(patchfn)
1029 ncpatchfp = cStringIO.StringIO()
1050 ncpatchfp = cStringIO.StringIO()
1030 for line in patchfp:
1051 for line in patchfp:
1031 if not line.startswith('#'):
1052 if not line.startswith('#'):
1032 ncpatchfp.write(line)
1053 ncpatchfp.write(line)
1033 patchfp.close()
1054 patchfp.close()
1034 ncpatchfp.seek(0)
1055 ncpatchfp.seek(0)
1035 newpatches = parsepatch(ncpatchfp)
1056 newpatches = parsepatch(ncpatchfp)
1036 finally:
1057 finally:
1037 os.unlink(patchfn)
1058 os.unlink(patchfn)
1038 del ncpatchfp
1059 del ncpatchfp
1039 # Signal that the chunk shouldn't be applied as-is, but
1060 # Signal that the chunk shouldn't be applied as-is, but
1040 # provide the new patch to be used instead.
1061 # provide the new patch to be used instead.
1041 ret = False
1062 ret = False
1042 elif r == 3: # Skip
1063 elif r == 3: # Skip
1043 ret = skipfile = False
1064 ret = skipfile = False
1044 elif r == 4: # file (Record remaining)
1065 elif r == 4: # file (Record remaining)
1045 ret = skipfile = True
1066 ret = skipfile = True
1046 elif r == 5: # done, skip remaining
1067 elif r == 5: # done, skip remaining
1047 ret = skipall = False
1068 ret = skipall = False
1048 elif r == 6: # all
1069 elif r == 6: # all
1049 ret = skipall = True
1070 ret = skipall = True
1050 elif r == 7: # quit
1071 elif r == 7: # quit
1051 raise util.Abort(_('user quit'))
1072 raise util.Abort(_('user quit'))
1052 return ret, skipfile, skipall, newpatches
1073 return ret, skipfile, skipall, newpatches
1053
1074
1054 seen = set()
1075 seen = set()
1055 applied = {} # 'filename' -> [] of chunks
1076 applied = {} # 'filename' -> [] of chunks
1056 skipfile, skipall = None, None
1077 skipfile, skipall = None, None
1057 pos, total = 1, sum(len(h.hunks) for h in headers)
1078 pos, total = 1, sum(len(h.hunks) for h in headers)
1058 for h in headers:
1079 for h in headers:
1059 pos += len(h.hunks)
1080 pos += len(h.hunks)
1060 skipfile = None
1081 skipfile = None
1061 fixoffset = 0
1082 fixoffset = 0
1062 hdr = ''.join(h.header)
1083 hdr = ''.join(h.header)
1063 if hdr in seen:
1084 if hdr in seen:
1064 continue
1085 continue
1065 seen.add(hdr)
1086 seen.add(hdr)
1066 if skipall is None:
1087 if skipall is None:
1067 h.pretty(ui)
1088 h.pretty(ui)
1068 msg = (_('examine changes to %s?') %
1089 msg = (_('examine changes to %s?') %
1069 _(' and ').join("'%s'" % f for f in h.files()))
1090 _(' and ').join("'%s'" % f for f in h.files()))
1070 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1091 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1071 if not r:
1092 if not r:
1072 continue
1093 continue
1073 applied[h.filename()] = [h]
1094 applied[h.filename()] = [h]
1074 if h.allhunks():
1095 if h.allhunks():
1075 applied[h.filename()] += h.hunks
1096 applied[h.filename()] += h.hunks
1076 continue
1097 continue
1077 for i, chunk in enumerate(h.hunks):
1098 for i, chunk in enumerate(h.hunks):
1078 if skipfile is None and skipall is None:
1099 if skipfile is None and skipall is None:
1079 chunk.pretty(ui)
1100 chunk.pretty(ui)
1080 if total == 1:
1101 if total == 1:
1081 msg = _("record this change to '%s'?") % chunk.filename()
1102 msg = _("record this change to '%s'?") % chunk.filename()
1082 else:
1103 else:
1083 idx = pos - len(h.hunks) + i
1104 idx = pos - len(h.hunks) + i
1084 msg = _("record change %d/%d to '%s'?") % (idx, total,
1105 msg = _("record change %d/%d to '%s'?") % (idx, total,
1085 chunk.filename())
1106 chunk.filename())
1086 r, skipfile, skipall, newpatches = prompt(skipfile,
1107 r, skipfile, skipall, newpatches = prompt(skipfile,
1087 skipall, msg, chunk)
1108 skipall, msg, chunk)
1088 if r:
1109 if r:
1089 if fixoffset:
1110 if fixoffset:
1090 chunk = copy.copy(chunk)
1111 chunk = copy.copy(chunk)
1091 chunk.toline += fixoffset
1112 chunk.toline += fixoffset
1092 applied[chunk.filename()].append(chunk)
1113 applied[chunk.filename()].append(chunk)
1093 elif newpatches is not None:
1114 elif newpatches is not None:
1094 for newpatch in newpatches:
1115 for newpatch in newpatches:
1095 for newhunk in newpatch.hunks:
1116 for newhunk in newpatch.hunks:
1096 if fixoffset:
1117 if fixoffset:
1097 newhunk.toline += fixoffset
1118 newhunk.toline += fixoffset
1098 applied[newhunk.filename()].append(newhunk)
1119 applied[newhunk.filename()].append(newhunk)
1099 else:
1120 else:
1100 fixoffset += chunk.removed - chunk.added
1121 fixoffset += chunk.removed - chunk.added
1101 return sum([h for h in applied.itervalues()
1122 return sum([h for h in applied.itervalues()
1102 if h[0].special() or len(h) > 1], [])
1123 if h[0].special() or len(h) > 1], [])
1103 class hunk(object):
1124 class hunk(object):
1104 def __init__(self, desc, num, lr, context):
1125 def __init__(self, desc, num, lr, context):
1105 self.number = num
1126 self.number = num
1106 self.desc = desc
1127 self.desc = desc
1107 self.hunk = [desc]
1128 self.hunk = [desc]
1108 self.a = []
1129 self.a = []
1109 self.b = []
1130 self.b = []
1110 self.starta = self.lena = None
1131 self.starta = self.lena = None
1111 self.startb = self.lenb = None
1132 self.startb = self.lenb = None
1112 if lr is not None:
1133 if lr is not None:
1113 if context:
1134 if context:
1114 self.read_context_hunk(lr)
1135 self.read_context_hunk(lr)
1115 else:
1136 else:
1116 self.read_unified_hunk(lr)
1137 self.read_unified_hunk(lr)
1117
1138
1118 def getnormalized(self):
1139 def getnormalized(self):
1119 """Return a copy with line endings normalized to LF."""
1140 """Return a copy with line endings normalized to LF."""
1120
1141
1121 def normalize(lines):
1142 def normalize(lines):
1122 nlines = []
1143 nlines = []
1123 for line in lines:
1144 for line in lines:
1124 if line.endswith('\r\n'):
1145 if line.endswith('\r\n'):
1125 line = line[:-2] + '\n'
1146 line = line[:-2] + '\n'
1126 nlines.append(line)
1147 nlines.append(line)
1127 return nlines
1148 return nlines
1128
1149
1129 # Dummy object, it is rebuilt manually
1150 # Dummy object, it is rebuilt manually
1130 nh = hunk(self.desc, self.number, None, None)
1151 nh = hunk(self.desc, self.number, None, None)
1131 nh.number = self.number
1152 nh.number = self.number
1132 nh.desc = self.desc
1153 nh.desc = self.desc
1133 nh.hunk = self.hunk
1154 nh.hunk = self.hunk
1134 nh.a = normalize(self.a)
1155 nh.a = normalize(self.a)
1135 nh.b = normalize(self.b)
1156 nh.b = normalize(self.b)
1136 nh.starta = self.starta
1157 nh.starta = self.starta
1137 nh.startb = self.startb
1158 nh.startb = self.startb
1138 nh.lena = self.lena
1159 nh.lena = self.lena
1139 nh.lenb = self.lenb
1160 nh.lenb = self.lenb
1140 return nh
1161 return nh
1141
1162
1142 def read_unified_hunk(self, lr):
1163 def read_unified_hunk(self, lr):
1143 m = unidesc.match(self.desc)
1164 m = unidesc.match(self.desc)
1144 if not m:
1165 if not m:
1145 raise PatchError(_("bad hunk #%d") % self.number)
1166 raise PatchError(_("bad hunk #%d") % self.number)
1146 self.starta, self.lena, self.startb, self.lenb = m.groups()
1167 self.starta, self.lena, self.startb, self.lenb = m.groups()
1147 if self.lena is None:
1168 if self.lena is None:
1148 self.lena = 1
1169 self.lena = 1
1149 else:
1170 else:
1150 self.lena = int(self.lena)
1171 self.lena = int(self.lena)
1151 if self.lenb is None:
1172 if self.lenb is None:
1152 self.lenb = 1
1173 self.lenb = 1
1153 else:
1174 else:
1154 self.lenb = int(self.lenb)
1175 self.lenb = int(self.lenb)
1155 self.starta = int(self.starta)
1176 self.starta = int(self.starta)
1156 self.startb = int(self.startb)
1177 self.startb = int(self.startb)
1157 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1178 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1158 self.b)
1179 self.b)
1159 # if we hit eof before finishing out the hunk, the last line will
1180 # if we hit eof before finishing out the hunk, the last line will
1160 # be zero length. Lets try to fix it up.
1181 # be zero length. Lets try to fix it up.
1161 while len(self.hunk[-1]) == 0:
1182 while len(self.hunk[-1]) == 0:
1162 del self.hunk[-1]
1183 del self.hunk[-1]
1163 del self.a[-1]
1184 del self.a[-1]
1164 del self.b[-1]
1185 del self.b[-1]
1165 self.lena -= 1
1186 self.lena -= 1
1166 self.lenb -= 1
1187 self.lenb -= 1
1167 self._fixnewline(lr)
1188 self._fixnewline(lr)
1168
1189
1169 def read_context_hunk(self, lr):
1190 def read_context_hunk(self, lr):
1170 self.desc = lr.readline()
1191 self.desc = lr.readline()
1171 m = contextdesc.match(self.desc)
1192 m = contextdesc.match(self.desc)
1172 if not m:
1193 if not m:
1173 raise PatchError(_("bad hunk #%d") % self.number)
1194 raise PatchError(_("bad hunk #%d") % self.number)
1174 self.starta, aend = m.groups()
1195 self.starta, aend = m.groups()
1175 self.starta = int(self.starta)
1196 self.starta = int(self.starta)
1176 if aend is None:
1197 if aend is None:
1177 aend = self.starta
1198 aend = self.starta
1178 self.lena = int(aend) - self.starta
1199 self.lena = int(aend) - self.starta
1179 if self.starta:
1200 if self.starta:
1180 self.lena += 1
1201 self.lena += 1
1181 for x in xrange(self.lena):
1202 for x in xrange(self.lena):
1182 l = lr.readline()
1203 l = lr.readline()
1183 if l.startswith('---'):
1204 if l.startswith('---'):
1184 # lines addition, old block is empty
1205 # lines addition, old block is empty
1185 lr.push(l)
1206 lr.push(l)
1186 break
1207 break
1187 s = l[2:]
1208 s = l[2:]
1188 if l.startswith('- ') or l.startswith('! '):
1209 if l.startswith('- ') or l.startswith('! '):
1189 u = '-' + s
1210 u = '-' + s
1190 elif l.startswith(' '):
1211 elif l.startswith(' '):
1191 u = ' ' + s
1212 u = ' ' + s
1192 else:
1213 else:
1193 raise PatchError(_("bad hunk #%d old text line %d") %
1214 raise PatchError(_("bad hunk #%d old text line %d") %
1194 (self.number, x))
1215 (self.number, x))
1195 self.a.append(u)
1216 self.a.append(u)
1196 self.hunk.append(u)
1217 self.hunk.append(u)
1197
1218
1198 l = lr.readline()
1219 l = lr.readline()
1199 if l.startswith('\ '):
1220 if l.startswith('\ '):
1200 s = self.a[-1][:-1]
1221 s = self.a[-1][:-1]
1201 self.a[-1] = s
1222 self.a[-1] = s
1202 self.hunk[-1] = s
1223 self.hunk[-1] = s
1203 l = lr.readline()
1224 l = lr.readline()
1204 m = contextdesc.match(l)
1225 m = contextdesc.match(l)
1205 if not m:
1226 if not m:
1206 raise PatchError(_("bad hunk #%d") % self.number)
1227 raise PatchError(_("bad hunk #%d") % self.number)
1207 self.startb, bend = m.groups()
1228 self.startb, bend = m.groups()
1208 self.startb = int(self.startb)
1229 self.startb = int(self.startb)
1209 if bend is None:
1230 if bend is None:
1210 bend = self.startb
1231 bend = self.startb
1211 self.lenb = int(bend) - self.startb
1232 self.lenb = int(bend) - self.startb
1212 if self.startb:
1233 if self.startb:
1213 self.lenb += 1
1234 self.lenb += 1
1214 hunki = 1
1235 hunki = 1
1215 for x in xrange(self.lenb):
1236 for x in xrange(self.lenb):
1216 l = lr.readline()
1237 l = lr.readline()
1217 if l.startswith('\ '):
1238 if l.startswith('\ '):
1218 # XXX: the only way to hit this is with an invalid line range.
1239 # XXX: the only way to hit this is with an invalid line range.
1219 # The no-eol marker is not counted in the line range, but I
1240 # The no-eol marker is not counted in the line range, but I
1220 # guess there are diff(1) out there which behave differently.
1241 # guess there are diff(1) out there which behave differently.
1221 s = self.b[-1][:-1]
1242 s = self.b[-1][:-1]
1222 self.b[-1] = s
1243 self.b[-1] = s
1223 self.hunk[hunki - 1] = s
1244 self.hunk[hunki - 1] = s
1224 continue
1245 continue
1225 if not l:
1246 if not l:
1226 # line deletions, new block is empty and we hit EOF
1247 # line deletions, new block is empty and we hit EOF
1227 lr.push(l)
1248 lr.push(l)
1228 break
1249 break
1229 s = l[2:]
1250 s = l[2:]
1230 if l.startswith('+ ') or l.startswith('! '):
1251 if l.startswith('+ ') or l.startswith('! '):
1231 u = '+' + s
1252 u = '+' + s
1232 elif l.startswith(' '):
1253 elif l.startswith(' '):
1233 u = ' ' + s
1254 u = ' ' + s
1234 elif len(self.b) == 0:
1255 elif len(self.b) == 0:
1235 # line deletions, new block is empty
1256 # line deletions, new block is empty
1236 lr.push(l)
1257 lr.push(l)
1237 break
1258 break
1238 else:
1259 else:
1239 raise PatchError(_("bad hunk #%d old text line %d") %
1260 raise PatchError(_("bad hunk #%d old text line %d") %
1240 (self.number, x))
1261 (self.number, x))
1241 self.b.append(s)
1262 self.b.append(s)
1242 while True:
1263 while True:
1243 if hunki >= len(self.hunk):
1264 if hunki >= len(self.hunk):
1244 h = ""
1265 h = ""
1245 else:
1266 else:
1246 h = self.hunk[hunki]
1267 h = self.hunk[hunki]
1247 hunki += 1
1268 hunki += 1
1248 if h == u:
1269 if h == u:
1249 break
1270 break
1250 elif h.startswith('-'):
1271 elif h.startswith('-'):
1251 continue
1272 continue
1252 else:
1273 else:
1253 self.hunk.insert(hunki - 1, u)
1274 self.hunk.insert(hunki - 1, u)
1254 break
1275 break
1255
1276
1256 if not self.a:
1277 if not self.a:
1257 # this happens when lines were only added to the hunk
1278 # this happens when lines were only added to the hunk
1258 for x in self.hunk:
1279 for x in self.hunk:
1259 if x.startswith('-') or x.startswith(' '):
1280 if x.startswith('-') or x.startswith(' '):
1260 self.a.append(x)
1281 self.a.append(x)
1261 if not self.b:
1282 if not self.b:
1262 # this happens when lines were only deleted from the hunk
1283 # this happens when lines were only deleted from the hunk
1263 for x in self.hunk:
1284 for x in self.hunk:
1264 if x.startswith('+') or x.startswith(' '):
1285 if x.startswith('+') or x.startswith(' '):
1265 self.b.append(x[1:])
1286 self.b.append(x[1:])
1266 # @@ -start,len +start,len @@
1287 # @@ -start,len +start,len @@
1267 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1288 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1268 self.startb, self.lenb)
1289 self.startb, self.lenb)
1269 self.hunk[0] = self.desc
1290 self.hunk[0] = self.desc
1270 self._fixnewline(lr)
1291 self._fixnewline(lr)
1271
1292
1272 def _fixnewline(self, lr):
1293 def _fixnewline(self, lr):
1273 l = lr.readline()
1294 l = lr.readline()
1274 if l.startswith('\ '):
1295 if l.startswith('\ '):
1275 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1296 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1276 else:
1297 else:
1277 lr.push(l)
1298 lr.push(l)
1278
1299
1279 def complete(self):
1300 def complete(self):
1280 return len(self.a) == self.lena and len(self.b) == self.lenb
1301 return len(self.a) == self.lena and len(self.b) == self.lenb
1281
1302
1282 def _fuzzit(self, old, new, fuzz, toponly):
1303 def _fuzzit(self, old, new, fuzz, toponly):
1283 # this removes context lines from the top and bottom of list 'l'. It
1304 # this removes context lines from the top and bottom of list 'l'. It
1284 # checks the hunk to make sure only context lines are removed, and then
1305 # checks the hunk to make sure only context lines are removed, and then
1285 # returns a new shortened list of lines.
1306 # returns a new shortened list of lines.
1286 fuzz = min(fuzz, len(old))
1307 fuzz = min(fuzz, len(old))
1287 if fuzz:
1308 if fuzz:
1288 top = 0
1309 top = 0
1289 bot = 0
1310 bot = 0
1290 hlen = len(self.hunk)
1311 hlen = len(self.hunk)
1291 for x in xrange(hlen - 1):
1312 for x in xrange(hlen - 1):
1292 # the hunk starts with the @@ line, so use x+1
1313 # the hunk starts with the @@ line, so use x+1
1293 if self.hunk[x + 1][0] == ' ':
1314 if self.hunk[x + 1][0] == ' ':
1294 top += 1
1315 top += 1
1295 else:
1316 else:
1296 break
1317 break
1297 if not toponly:
1318 if not toponly:
1298 for x in xrange(hlen - 1):
1319 for x in xrange(hlen - 1):
1299 if self.hunk[hlen - bot - 1][0] == ' ':
1320 if self.hunk[hlen - bot - 1][0] == ' ':
1300 bot += 1
1321 bot += 1
1301 else:
1322 else:
1302 break
1323 break
1303
1324
1304 bot = min(fuzz, bot)
1325 bot = min(fuzz, bot)
1305 top = min(fuzz, top)
1326 top = min(fuzz, top)
1306 return old[top:len(old) - bot], new[top:len(new) - bot], top
1327 return old[top:len(old) - bot], new[top:len(new) - bot], top
1307 return old, new, 0
1328 return old, new, 0
1308
1329
1309 def fuzzit(self, fuzz, toponly):
1330 def fuzzit(self, fuzz, toponly):
1310 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1331 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1311 oldstart = self.starta + top
1332 oldstart = self.starta + top
1312 newstart = self.startb + top
1333 newstart = self.startb + top
1313 # zero length hunk ranges already have their start decremented
1334 # zero length hunk ranges already have their start decremented
1314 if self.lena and oldstart > 0:
1335 if self.lena and oldstart > 0:
1315 oldstart -= 1
1336 oldstart -= 1
1316 if self.lenb and newstart > 0:
1337 if self.lenb and newstart > 0:
1317 newstart -= 1
1338 newstart -= 1
1318 return old, oldstart, new, newstart
1339 return old, oldstart, new, newstart
1319
1340
1320 class binhunk(object):
1341 class binhunk(object):
1321 'A binary patch file.'
1342 'A binary patch file.'
1322 def __init__(self, lr, fname):
1343 def __init__(self, lr, fname):
1323 self.text = None
1344 self.text = None
1324 self.delta = False
1345 self.delta = False
1325 self.hunk = ['GIT binary patch\n']
1346 self.hunk = ['GIT binary patch\n']
1326 self._fname = fname
1347 self._fname = fname
1327 self._read(lr)
1348 self._read(lr)
1328
1349
1329 def complete(self):
1350 def complete(self):
1330 return self.text is not None
1351 return self.text is not None
1331
1352
1332 def new(self, lines):
1353 def new(self, lines):
1333 if self.delta:
1354 if self.delta:
1334 return [applybindelta(self.text, ''.join(lines))]
1355 return [applybindelta(self.text, ''.join(lines))]
1335 return [self.text]
1356 return [self.text]
1336
1357
1337 def _read(self, lr):
1358 def _read(self, lr):
1338 def getline(lr, hunk):
1359 def getline(lr, hunk):
1339 l = lr.readline()
1360 l = lr.readline()
1340 hunk.append(l)
1361 hunk.append(l)
1341 return l.rstrip('\r\n')
1362 return l.rstrip('\r\n')
1342
1363
1343 size = 0
1364 size = 0
1344 while True:
1365 while True:
1345 line = getline(lr, self.hunk)
1366 line = getline(lr, self.hunk)
1346 if not line:
1367 if not line:
1347 raise PatchError(_('could not extract "%s" binary data')
1368 raise PatchError(_('could not extract "%s" binary data')
1348 % self._fname)
1369 % self._fname)
1349 if line.startswith('literal '):
1370 if line.startswith('literal '):
1350 size = int(line[8:].rstrip())
1371 size = int(line[8:].rstrip())
1351 break
1372 break
1352 if line.startswith('delta '):
1373 if line.startswith('delta '):
1353 size = int(line[6:].rstrip())
1374 size = int(line[6:].rstrip())
1354 self.delta = True
1375 self.delta = True
1355 break
1376 break
1356 dec = []
1377 dec = []
1357 line = getline(lr, self.hunk)
1378 line = getline(lr, self.hunk)
1358 while len(line) > 1:
1379 while len(line) > 1:
1359 l = line[0]
1380 l = line[0]
1360 if l <= 'Z' and l >= 'A':
1381 if l <= 'Z' and l >= 'A':
1361 l = ord(l) - ord('A') + 1
1382 l = ord(l) - ord('A') + 1
1362 else:
1383 else:
1363 l = ord(l) - ord('a') + 27
1384 l = ord(l) - ord('a') + 27
1364 try:
1385 try:
1365 dec.append(base85.b85decode(line[1:])[:l])
1386 dec.append(base85.b85decode(line[1:])[:l])
1366 except ValueError as e:
1387 except ValueError as e:
1367 raise PatchError(_('could not decode "%s" binary patch: %s')
1388 raise PatchError(_('could not decode "%s" binary patch: %s')
1368 % (self._fname, str(e)))
1389 % (self._fname, str(e)))
1369 line = getline(lr, self.hunk)
1390 line = getline(lr, self.hunk)
1370 text = zlib.decompress(''.join(dec))
1391 text = zlib.decompress(''.join(dec))
1371 if len(text) != size:
1392 if len(text) != size:
1372 raise PatchError(_('"%s" length is %d bytes, should be %d')
1393 raise PatchError(_('"%s" length is %d bytes, should be %d')
1373 % (self._fname, len(text), size))
1394 % (self._fname, len(text), size))
1374 self.text = text
1395 self.text = text
1375
1396
1376 def parsefilename(str):
1397 def parsefilename(str):
1377 # --- filename \t|space stuff
1398 # --- filename \t|space stuff
1378 s = str[4:].rstrip('\r\n')
1399 s = str[4:].rstrip('\r\n')
1379 i = s.find('\t')
1400 i = s.find('\t')
1380 if i < 0:
1401 if i < 0:
1381 i = s.find(' ')
1402 i = s.find(' ')
1382 if i < 0:
1403 if i < 0:
1383 return s
1404 return s
1384 return s[:i]
1405 return s[:i]
1385
1406
1386 def reversehunks(hunks):
1407 def reversehunks(hunks):
1387 '''reverse the signs in the hunks given as argument
1408 '''reverse the signs in the hunks given as argument
1388
1409
1389 This function operates on hunks coming out of patch.filterpatch, that is
1410 This function operates on hunks coming out of patch.filterpatch, that is
1390 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1411 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1391
1412
1392 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1413 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1393 ... --- a/folder1/g
1414 ... --- a/folder1/g
1394 ... +++ b/folder1/g
1415 ... +++ b/folder1/g
1395 ... @@ -1,7 +1,7 @@
1416 ... @@ -1,7 +1,7 @@
1396 ... +firstline
1417 ... +firstline
1397 ... c
1418 ... c
1398 ... 1
1419 ... 1
1399 ... 2
1420 ... 2
1400 ... + 3
1421 ... + 3
1401 ... -4
1422 ... -4
1402 ... 5
1423 ... 5
1403 ... d
1424 ... d
1404 ... +lastline"""
1425 ... +lastline"""
1405 >>> hunks = parsepatch(rawpatch)
1426 >>> hunks = parsepatch(rawpatch)
1406 >>> hunkscomingfromfilterpatch = []
1427 >>> hunkscomingfromfilterpatch = []
1407 >>> for h in hunks:
1428 >>> for h in hunks:
1408 ... hunkscomingfromfilterpatch.append(h)
1429 ... hunkscomingfromfilterpatch.append(h)
1409 ... hunkscomingfromfilterpatch.extend(h.hunks)
1430 ... hunkscomingfromfilterpatch.extend(h.hunks)
1410
1431
1411 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1432 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1412 >>> fp = cStringIO.StringIO()
1433 >>> fp = cStringIO.StringIO()
1413 >>> for c in reversedhunks:
1434 >>> for c in reversedhunks:
1414 ... c.write(fp)
1435 ... c.write(fp)
1415 >>> fp.seek(0)
1436 >>> fp.seek(0)
1416 >>> reversedpatch = fp.read()
1437 >>> reversedpatch = fp.read()
1417 >>> print reversedpatch
1438 >>> print reversedpatch
1418 diff --git a/folder1/g b/folder1/g
1439 diff --git a/folder1/g b/folder1/g
1419 --- a/folder1/g
1440 --- a/folder1/g
1420 +++ b/folder1/g
1441 +++ b/folder1/g
1421 @@ -1,4 +1,3 @@
1442 @@ -1,4 +1,3 @@
1422 -firstline
1443 -firstline
1423 c
1444 c
1424 1
1445 1
1425 2
1446 2
1426 @@ -1,6 +2,6 @@
1447 @@ -1,6 +2,6 @@
1427 c
1448 c
1428 1
1449 1
1429 2
1450 2
1430 - 3
1451 - 3
1431 +4
1452 +4
1432 5
1453 5
1433 d
1454 d
1434 @@ -5,3 +6,2 @@
1455 @@ -5,3 +6,2 @@
1435 5
1456 5
1436 d
1457 d
1437 -lastline
1458 -lastline
1438
1459
1439 '''
1460 '''
1440
1461
1441 import crecord as crecordmod
1462 import crecord as crecordmod
1442 newhunks = []
1463 newhunks = []
1443 for c in hunks:
1464 for c in hunks:
1444 if isinstance(c, crecordmod.uihunk):
1465 if isinstance(c, crecordmod.uihunk):
1445 # curses hunks encapsulate the record hunk in _hunk
1466 # curses hunks encapsulate the record hunk in _hunk
1446 c = c._hunk
1467 c = c._hunk
1447 if isinstance(c, recordhunk):
1468 if isinstance(c, recordhunk):
1448 for j, line in enumerate(c.hunk):
1469 for j, line in enumerate(c.hunk):
1449 if line.startswith("-"):
1470 if line.startswith("-"):
1450 c.hunk[j] = "+" + c.hunk[j][1:]
1471 c.hunk[j] = "+" + c.hunk[j][1:]
1451 elif line.startswith("+"):
1472 elif line.startswith("+"):
1452 c.hunk[j] = "-" + c.hunk[j][1:]
1473 c.hunk[j] = "-" + c.hunk[j][1:]
1453 c.added, c.removed = c.removed, c.added
1474 c.added, c.removed = c.removed, c.added
1454 newhunks.append(c)
1475 newhunks.append(c)
1455 return newhunks
1476 return newhunks
1456
1477
1457 def parsepatch(originalchunks):
1478 def parsepatch(originalchunks):
1458 """patch -> [] of headers -> [] of hunks """
1479 """patch -> [] of headers -> [] of hunks """
1459 class parser(object):
1480 class parser(object):
1460 """patch parsing state machine"""
1481 """patch parsing state machine"""
1461 def __init__(self):
1482 def __init__(self):
1462 self.fromline = 0
1483 self.fromline = 0
1463 self.toline = 0
1484 self.toline = 0
1464 self.proc = ''
1485 self.proc = ''
1465 self.header = None
1486 self.header = None
1466 self.context = []
1487 self.context = []
1467 self.before = []
1488 self.before = []
1468 self.hunk = []
1489 self.hunk = []
1469 self.headers = []
1490 self.headers = []
1470
1491
1471 def addrange(self, limits):
1492 def addrange(self, limits):
1472 fromstart, fromend, tostart, toend, proc = limits
1493 fromstart, fromend, tostart, toend, proc = limits
1473 self.fromline = int(fromstart)
1494 self.fromline = int(fromstart)
1474 self.toline = int(tostart)
1495 self.toline = int(tostart)
1475 self.proc = proc
1496 self.proc = proc
1476
1497
1477 def addcontext(self, context):
1498 def addcontext(self, context):
1478 if self.hunk:
1499 if self.hunk:
1479 h = recordhunk(self.header, self.fromline, self.toline,
1500 h = recordhunk(self.header, self.fromline, self.toline,
1480 self.proc, self.before, self.hunk, context)
1501 self.proc, self.before, self.hunk, context)
1481 self.header.hunks.append(h)
1502 self.header.hunks.append(h)
1482 self.fromline += len(self.before) + h.removed
1503 self.fromline += len(self.before) + h.removed
1483 self.toline += len(self.before) + h.added
1504 self.toline += len(self.before) + h.added
1484 self.before = []
1505 self.before = []
1485 self.hunk = []
1506 self.hunk = []
1486 self.proc = ''
1507 self.proc = ''
1487 self.context = context
1508 self.context = context
1488
1509
1489 def addhunk(self, hunk):
1510 def addhunk(self, hunk):
1490 if self.context:
1511 if self.context:
1491 self.before = self.context
1512 self.before = self.context
1492 self.context = []
1513 self.context = []
1493 self.hunk = hunk
1514 self.hunk = hunk
1494
1515
1495 def newfile(self, hdr):
1516 def newfile(self, hdr):
1496 self.addcontext([])
1517 self.addcontext([])
1497 h = header(hdr)
1518 h = header(hdr)
1498 self.headers.append(h)
1519 self.headers.append(h)
1499 self.header = h
1520 self.header = h
1500
1521
1501 def addother(self, line):
1522 def addother(self, line):
1502 pass # 'other' lines are ignored
1523 pass # 'other' lines are ignored
1503
1524
1504 def finished(self):
1525 def finished(self):
1505 self.addcontext([])
1526 self.addcontext([])
1506 return self.headers
1527 return self.headers
1507
1528
1508 transitions = {
1529 transitions = {
1509 'file': {'context': addcontext,
1530 'file': {'context': addcontext,
1510 'file': newfile,
1531 'file': newfile,
1511 'hunk': addhunk,
1532 'hunk': addhunk,
1512 'range': addrange},
1533 'range': addrange},
1513 'context': {'file': newfile,
1534 'context': {'file': newfile,
1514 'hunk': addhunk,
1535 'hunk': addhunk,
1515 'range': addrange,
1536 'range': addrange,
1516 'other': addother},
1537 'other': addother},
1517 'hunk': {'context': addcontext,
1538 'hunk': {'context': addcontext,
1518 'file': newfile,
1539 'file': newfile,
1519 'range': addrange},
1540 'range': addrange},
1520 'range': {'context': addcontext,
1541 'range': {'context': addcontext,
1521 'hunk': addhunk},
1542 'hunk': addhunk},
1522 'other': {'other': addother},
1543 'other': {'other': addother},
1523 }
1544 }
1524
1545
1525 p = parser()
1546 p = parser()
1526 fp = cStringIO.StringIO()
1547 fp = cStringIO.StringIO()
1527 fp.write(''.join(originalchunks))
1548 fp.write(''.join(originalchunks))
1528 fp.seek(0)
1549 fp.seek(0)
1529
1550
1530 state = 'context'
1551 state = 'context'
1531 for newstate, data in scanpatch(fp):
1552 for newstate, data in scanpatch(fp):
1532 try:
1553 try:
1533 p.transitions[state][newstate](p, data)
1554 p.transitions[state][newstate](p, data)
1534 except KeyError:
1555 except KeyError:
1535 raise PatchError('unhandled transition: %s -> %s' %
1556 raise PatchError('unhandled transition: %s -> %s' %
1536 (state, newstate))
1557 (state, newstate))
1537 state = newstate
1558 state = newstate
1538 del fp
1559 del fp
1539 return p.finished()
1560 return p.finished()
1540
1561
1541 def pathtransform(path, strip, prefix):
1562 def pathtransform(path, strip, prefix):
1542 '''turn a path from a patch into a path suitable for the repository
1563 '''turn a path from a patch into a path suitable for the repository
1543
1564
1544 prefix, if not empty, is expected to be normalized with a / at the end.
1565 prefix, if not empty, is expected to be normalized with a / at the end.
1545
1566
1546 Returns (stripped components, path in repository).
1567 Returns (stripped components, path in repository).
1547
1568
1548 >>> pathtransform('a/b/c', 0, '')
1569 >>> pathtransform('a/b/c', 0, '')
1549 ('', 'a/b/c')
1570 ('', 'a/b/c')
1550 >>> pathtransform(' a/b/c ', 0, '')
1571 >>> pathtransform(' a/b/c ', 0, '')
1551 ('', ' a/b/c')
1572 ('', ' a/b/c')
1552 >>> pathtransform(' a/b/c ', 2, '')
1573 >>> pathtransform(' a/b/c ', 2, '')
1553 ('a/b/', 'c')
1574 ('a/b/', 'c')
1554 >>> pathtransform('a/b/c', 0, 'd/e/')
1575 >>> pathtransform('a/b/c', 0, 'd/e/')
1555 ('', 'd/e/a/b/c')
1576 ('', 'd/e/a/b/c')
1556 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1577 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1557 ('a//b/', 'd/e/c')
1578 ('a//b/', 'd/e/c')
1558 >>> pathtransform('a/b/c', 3, '')
1579 >>> pathtransform('a/b/c', 3, '')
1559 Traceback (most recent call last):
1580 Traceback (most recent call last):
1560 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1581 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1561 '''
1582 '''
1562 pathlen = len(path)
1583 pathlen = len(path)
1563 i = 0
1584 i = 0
1564 if strip == 0:
1585 if strip == 0:
1565 return '', prefix + path.rstrip()
1586 return '', prefix + path.rstrip()
1566 count = strip
1587 count = strip
1567 while count > 0:
1588 while count > 0:
1568 i = path.find('/', i)
1589 i = path.find('/', i)
1569 if i == -1:
1590 if i == -1:
1570 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1591 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1571 (count, strip, path))
1592 (count, strip, path))
1572 i += 1
1593 i += 1
1573 # consume '//' in the path
1594 # consume '//' in the path
1574 while i < pathlen - 1 and path[i] == '/':
1595 while i < pathlen - 1 and path[i] == '/':
1575 i += 1
1596 i += 1
1576 count -= 1
1597 count -= 1
1577 return path[:i].lstrip(), prefix + path[i:].rstrip()
1598 return path[:i].lstrip(), prefix + path[i:].rstrip()
1578
1599
1579 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1600 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1580 nulla = afile_orig == "/dev/null"
1601 nulla = afile_orig == "/dev/null"
1581 nullb = bfile_orig == "/dev/null"
1602 nullb = bfile_orig == "/dev/null"
1582 create = nulla and hunk.starta == 0 and hunk.lena == 0
1603 create = nulla and hunk.starta == 0 and hunk.lena == 0
1583 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1604 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1584 abase, afile = pathtransform(afile_orig, strip, prefix)
1605 abase, afile = pathtransform(afile_orig, strip, prefix)
1585 gooda = not nulla and backend.exists(afile)
1606 gooda = not nulla and backend.exists(afile)
1586 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1607 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1587 if afile == bfile:
1608 if afile == bfile:
1588 goodb = gooda
1609 goodb = gooda
1589 else:
1610 else:
1590 goodb = not nullb and backend.exists(bfile)
1611 goodb = not nullb and backend.exists(bfile)
1591 missing = not goodb and not gooda and not create
1612 missing = not goodb and not gooda and not create
1592
1613
1593 # some diff programs apparently produce patches where the afile is
1614 # some diff programs apparently produce patches where the afile is
1594 # not /dev/null, but afile starts with bfile
1615 # not /dev/null, but afile starts with bfile
1595 abasedir = afile[:afile.rfind('/') + 1]
1616 abasedir = afile[:afile.rfind('/') + 1]
1596 bbasedir = bfile[:bfile.rfind('/') + 1]
1617 bbasedir = bfile[:bfile.rfind('/') + 1]
1597 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1618 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1598 and hunk.starta == 0 and hunk.lena == 0):
1619 and hunk.starta == 0 and hunk.lena == 0):
1599 create = True
1620 create = True
1600 missing = False
1621 missing = False
1601
1622
1602 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1623 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1603 # diff is between a file and its backup. In this case, the original
1624 # diff is between a file and its backup. In this case, the original
1604 # file should be patched (see original mpatch code).
1625 # file should be patched (see original mpatch code).
1605 isbackup = (abase == bbase and bfile.startswith(afile))
1626 isbackup = (abase == bbase and bfile.startswith(afile))
1606 fname = None
1627 fname = None
1607 if not missing:
1628 if not missing:
1608 if gooda and goodb:
1629 if gooda and goodb:
1609 if isbackup:
1630 if isbackup:
1610 fname = afile
1631 fname = afile
1611 else:
1632 else:
1612 fname = bfile
1633 fname = bfile
1613 elif gooda:
1634 elif gooda:
1614 fname = afile
1635 fname = afile
1615
1636
1616 if not fname:
1637 if not fname:
1617 if not nullb:
1638 if not nullb:
1618 if isbackup:
1639 if isbackup:
1619 fname = afile
1640 fname = afile
1620 else:
1641 else:
1621 fname = bfile
1642 fname = bfile
1622 elif not nulla:
1643 elif not nulla:
1623 fname = afile
1644 fname = afile
1624 else:
1645 else:
1625 raise PatchError(_("undefined source and destination files"))
1646 raise PatchError(_("undefined source and destination files"))
1626
1647
1627 gp = patchmeta(fname)
1648 gp = patchmeta(fname)
1628 if create:
1649 if create:
1629 gp.op = 'ADD'
1650 gp.op = 'ADD'
1630 elif remove:
1651 elif remove:
1631 gp.op = 'DELETE'
1652 gp.op = 'DELETE'
1632 return gp
1653 return gp
1633
1654
1634 def scanpatch(fp):
1655 def scanpatch(fp):
1635 """like patch.iterhunks, but yield different events
1656 """like patch.iterhunks, but yield different events
1636
1657
1637 - ('file', [header_lines + fromfile + tofile])
1658 - ('file', [header_lines + fromfile + tofile])
1638 - ('context', [context_lines])
1659 - ('context', [context_lines])
1639 - ('hunk', [hunk_lines])
1660 - ('hunk', [hunk_lines])
1640 - ('range', (-start,len, +start,len, proc))
1661 - ('range', (-start,len, +start,len, proc))
1641 """
1662 """
1642 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1663 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1643 lr = linereader(fp)
1664 lr = linereader(fp)
1644
1665
1645 def scanwhile(first, p):
1666 def scanwhile(first, p):
1646 """scan lr while predicate holds"""
1667 """scan lr while predicate holds"""
1647 lines = [first]
1668 lines = [first]
1648 while True:
1669 while True:
1649 line = lr.readline()
1670 line = lr.readline()
1650 if not line:
1671 if not line:
1651 break
1672 break
1652 if p(line):
1673 if p(line):
1653 lines.append(line)
1674 lines.append(line)
1654 else:
1675 else:
1655 lr.push(line)
1676 lr.push(line)
1656 break
1677 break
1657 return lines
1678 return lines
1658
1679
1659 while True:
1680 while True:
1660 line = lr.readline()
1681 line = lr.readline()
1661 if not line:
1682 if not line:
1662 break
1683 break
1663 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1684 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1664 def notheader(line):
1685 def notheader(line):
1665 s = line.split(None, 1)
1686 s = line.split(None, 1)
1666 return not s or s[0] not in ('---', 'diff')
1687 return not s or s[0] not in ('---', 'diff')
1667 header = scanwhile(line, notheader)
1688 header = scanwhile(line, notheader)
1668 fromfile = lr.readline()
1689 fromfile = lr.readline()
1669 if fromfile.startswith('---'):
1690 if fromfile.startswith('---'):
1670 tofile = lr.readline()
1691 tofile = lr.readline()
1671 header += [fromfile, tofile]
1692 header += [fromfile, tofile]
1672 else:
1693 else:
1673 lr.push(fromfile)
1694 lr.push(fromfile)
1674 yield 'file', header
1695 yield 'file', header
1675 elif line[0] == ' ':
1696 elif line[0] == ' ':
1676 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1697 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1677 elif line[0] in '-+':
1698 elif line[0] in '-+':
1678 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1699 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1679 else:
1700 else:
1680 m = lines_re.match(line)
1701 m = lines_re.match(line)
1681 if m:
1702 if m:
1682 yield 'range', m.groups()
1703 yield 'range', m.groups()
1683 else:
1704 else:
1684 yield 'other', line
1705 yield 'other', line
1685
1706
1686 def scangitpatch(lr, firstline):
1707 def scangitpatch(lr, firstline):
1687 """
1708 """
1688 Git patches can emit:
1709 Git patches can emit:
1689 - rename a to b
1710 - rename a to b
1690 - change b
1711 - change b
1691 - copy a to c
1712 - copy a to c
1692 - change c
1713 - change c
1693
1714
1694 We cannot apply this sequence as-is, the renamed 'a' could not be
1715 We cannot apply this sequence as-is, the renamed 'a' could not be
1695 found for it would have been renamed already. And we cannot copy
1716 found for it would have been renamed already. And we cannot copy
1696 from 'b' instead because 'b' would have been changed already. So
1717 from 'b' instead because 'b' would have been changed already. So
1697 we scan the git patch for copy and rename commands so we can
1718 we scan the git patch for copy and rename commands so we can
1698 perform the copies ahead of time.
1719 perform the copies ahead of time.
1699 """
1720 """
1700 pos = 0
1721 pos = 0
1701 try:
1722 try:
1702 pos = lr.fp.tell()
1723 pos = lr.fp.tell()
1703 fp = lr.fp
1724 fp = lr.fp
1704 except IOError:
1725 except IOError:
1705 fp = cStringIO.StringIO(lr.fp.read())
1726 fp = cStringIO.StringIO(lr.fp.read())
1706 gitlr = linereader(fp)
1727 gitlr = linereader(fp)
1707 gitlr.push(firstline)
1728 gitlr.push(firstline)
1708 gitpatches = readgitpatch(gitlr)
1729 gitpatches = readgitpatch(gitlr)
1709 fp.seek(pos)
1730 fp.seek(pos)
1710 return gitpatches
1731 return gitpatches
1711
1732
1712 def iterhunks(fp):
1733 def iterhunks(fp):
1713 """Read a patch and yield the following events:
1734 """Read a patch and yield the following events:
1714 - ("file", afile, bfile, firsthunk): select a new target file.
1735 - ("file", afile, bfile, firsthunk): select a new target file.
1715 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1736 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1716 "file" event.
1737 "file" event.
1717 - ("git", gitchanges): current diff is in git format, gitchanges
1738 - ("git", gitchanges): current diff is in git format, gitchanges
1718 maps filenames to gitpatch records. Unique event.
1739 maps filenames to gitpatch records. Unique event.
1719 """
1740 """
1720 afile = ""
1741 afile = ""
1721 bfile = ""
1742 bfile = ""
1722 state = None
1743 state = None
1723 hunknum = 0
1744 hunknum = 0
1724 emitfile = newfile = False
1745 emitfile = newfile = False
1725 gitpatches = None
1746 gitpatches = None
1726
1747
1727 # our states
1748 # our states
1728 BFILE = 1
1749 BFILE = 1
1729 context = None
1750 context = None
1730 lr = linereader(fp)
1751 lr = linereader(fp)
1731
1752
1732 while True:
1753 while True:
1733 x = lr.readline()
1754 x = lr.readline()
1734 if not x:
1755 if not x:
1735 break
1756 break
1736 if state == BFILE and (
1757 if state == BFILE and (
1737 (not context and x[0] == '@')
1758 (not context and x[0] == '@')
1738 or (context is not False and x.startswith('***************'))
1759 or (context is not False and x.startswith('***************'))
1739 or x.startswith('GIT binary patch')):
1760 or x.startswith('GIT binary patch')):
1740 gp = None
1761 gp = None
1741 if (gitpatches and
1762 if (gitpatches and
1742 gitpatches[-1].ispatching(afile, bfile)):
1763 gitpatches[-1].ispatching(afile, bfile)):
1743 gp = gitpatches.pop()
1764 gp = gitpatches.pop()
1744 if x.startswith('GIT binary patch'):
1765 if x.startswith('GIT binary patch'):
1745 h = binhunk(lr, gp.path)
1766 h = binhunk(lr, gp.path)
1746 else:
1767 else:
1747 if context is None and x.startswith('***************'):
1768 if context is None and x.startswith('***************'):
1748 context = True
1769 context = True
1749 h = hunk(x, hunknum + 1, lr, context)
1770 h = hunk(x, hunknum + 1, lr, context)
1750 hunknum += 1
1771 hunknum += 1
1751 if emitfile:
1772 if emitfile:
1752 emitfile = False
1773 emitfile = False
1753 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1774 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1754 yield 'hunk', h
1775 yield 'hunk', h
1755 elif x.startswith('diff --git a/'):
1776 elif x.startswith('diff --git a/'):
1756 m = gitre.match(x.rstrip(' \r\n'))
1777 m = gitre.match(x.rstrip(' \r\n'))
1757 if not m:
1778 if not m:
1758 continue
1779 continue
1759 if gitpatches is None:
1780 if gitpatches is None:
1760 # scan whole input for git metadata
1781 # scan whole input for git metadata
1761 gitpatches = scangitpatch(lr, x)
1782 gitpatches = scangitpatch(lr, x)
1762 yield 'git', [g.copy() for g in gitpatches
1783 yield 'git', [g.copy() for g in gitpatches
1763 if g.op in ('COPY', 'RENAME')]
1784 if g.op in ('COPY', 'RENAME')]
1764 gitpatches.reverse()
1785 gitpatches.reverse()
1765 afile = 'a/' + m.group(1)
1786 afile = 'a/' + m.group(1)
1766 bfile = 'b/' + m.group(2)
1787 bfile = 'b/' + m.group(2)
1767 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1788 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1768 gp = gitpatches.pop()
1789 gp = gitpatches.pop()
1769 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1790 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1770 if not gitpatches:
1791 if not gitpatches:
1771 raise PatchError(_('failed to synchronize metadata for "%s"')
1792 raise PatchError(_('failed to synchronize metadata for "%s"')
1772 % afile[2:])
1793 % afile[2:])
1773 gp = gitpatches[-1]
1794 gp = gitpatches[-1]
1774 newfile = True
1795 newfile = True
1775 elif x.startswith('---'):
1796 elif x.startswith('---'):
1776 # check for a unified diff
1797 # check for a unified diff
1777 l2 = lr.readline()
1798 l2 = lr.readline()
1778 if not l2.startswith('+++'):
1799 if not l2.startswith('+++'):
1779 lr.push(l2)
1800 lr.push(l2)
1780 continue
1801 continue
1781 newfile = True
1802 newfile = True
1782 context = False
1803 context = False
1783 afile = parsefilename(x)
1804 afile = parsefilename(x)
1784 bfile = parsefilename(l2)
1805 bfile = parsefilename(l2)
1785 elif x.startswith('***'):
1806 elif x.startswith('***'):
1786 # check for a context diff
1807 # check for a context diff
1787 l2 = lr.readline()
1808 l2 = lr.readline()
1788 if not l2.startswith('---'):
1809 if not l2.startswith('---'):
1789 lr.push(l2)
1810 lr.push(l2)
1790 continue
1811 continue
1791 l3 = lr.readline()
1812 l3 = lr.readline()
1792 lr.push(l3)
1813 lr.push(l3)
1793 if not l3.startswith("***************"):
1814 if not l3.startswith("***************"):
1794 lr.push(l2)
1815 lr.push(l2)
1795 continue
1816 continue
1796 newfile = True
1817 newfile = True
1797 context = True
1818 context = True
1798 afile = parsefilename(x)
1819 afile = parsefilename(x)
1799 bfile = parsefilename(l2)
1820 bfile = parsefilename(l2)
1800
1821
1801 if newfile:
1822 if newfile:
1802 newfile = False
1823 newfile = False
1803 emitfile = True
1824 emitfile = True
1804 state = BFILE
1825 state = BFILE
1805 hunknum = 0
1826 hunknum = 0
1806
1827
1807 while gitpatches:
1828 while gitpatches:
1808 gp = gitpatches.pop()
1829 gp = gitpatches.pop()
1809 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1830 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1810
1831
1811 def applybindelta(binchunk, data):
1832 def applybindelta(binchunk, data):
1812 """Apply a binary delta hunk
1833 """Apply a binary delta hunk
1813 The algorithm used is the algorithm from git's patch-delta.c
1834 The algorithm used is the algorithm from git's patch-delta.c
1814 """
1835 """
1815 def deltahead(binchunk):
1836 def deltahead(binchunk):
1816 i = 0
1837 i = 0
1817 for c in binchunk:
1838 for c in binchunk:
1818 i += 1
1839 i += 1
1819 if not (ord(c) & 0x80):
1840 if not (ord(c) & 0x80):
1820 return i
1841 return i
1821 return i
1842 return i
1822 out = ""
1843 out = ""
1823 s = deltahead(binchunk)
1844 s = deltahead(binchunk)
1824 binchunk = binchunk[s:]
1845 binchunk = binchunk[s:]
1825 s = deltahead(binchunk)
1846 s = deltahead(binchunk)
1826 binchunk = binchunk[s:]
1847 binchunk = binchunk[s:]
1827 i = 0
1848 i = 0
1828 while i < len(binchunk):
1849 while i < len(binchunk):
1829 cmd = ord(binchunk[i])
1850 cmd = ord(binchunk[i])
1830 i += 1
1851 i += 1
1831 if (cmd & 0x80):
1852 if (cmd & 0x80):
1832 offset = 0
1853 offset = 0
1833 size = 0
1854 size = 0
1834 if (cmd & 0x01):
1855 if (cmd & 0x01):
1835 offset = ord(binchunk[i])
1856 offset = ord(binchunk[i])
1836 i += 1
1857 i += 1
1837 if (cmd & 0x02):
1858 if (cmd & 0x02):
1838 offset |= ord(binchunk[i]) << 8
1859 offset |= ord(binchunk[i]) << 8
1839 i += 1
1860 i += 1
1840 if (cmd & 0x04):
1861 if (cmd & 0x04):
1841 offset |= ord(binchunk[i]) << 16
1862 offset |= ord(binchunk[i]) << 16
1842 i += 1
1863 i += 1
1843 if (cmd & 0x08):
1864 if (cmd & 0x08):
1844 offset |= ord(binchunk[i]) << 24
1865 offset |= ord(binchunk[i]) << 24
1845 i += 1
1866 i += 1
1846 if (cmd & 0x10):
1867 if (cmd & 0x10):
1847 size = ord(binchunk[i])
1868 size = ord(binchunk[i])
1848 i += 1
1869 i += 1
1849 if (cmd & 0x20):
1870 if (cmd & 0x20):
1850 size |= ord(binchunk[i]) << 8
1871 size |= ord(binchunk[i]) << 8
1851 i += 1
1872 i += 1
1852 if (cmd & 0x40):
1873 if (cmd & 0x40):
1853 size |= ord(binchunk[i]) << 16
1874 size |= ord(binchunk[i]) << 16
1854 i += 1
1875 i += 1
1855 if size == 0:
1876 if size == 0:
1856 size = 0x10000
1877 size = 0x10000
1857 offset_end = offset + size
1878 offset_end = offset + size
1858 out += data[offset:offset_end]
1879 out += data[offset:offset_end]
1859 elif cmd != 0:
1880 elif cmd != 0:
1860 offset_end = i + cmd
1881 offset_end = i + cmd
1861 out += binchunk[i:offset_end]
1882 out += binchunk[i:offset_end]
1862 i += cmd
1883 i += cmd
1863 else:
1884 else:
1864 raise PatchError(_('unexpected delta opcode 0'))
1885 raise PatchError(_('unexpected delta opcode 0'))
1865 return out
1886 return out
1866
1887
1867 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1888 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1868 """Reads a patch from fp and tries to apply it.
1889 """Reads a patch from fp and tries to apply it.
1869
1890
1870 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1891 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1871 there was any fuzz.
1892 there was any fuzz.
1872
1893
1873 If 'eolmode' is 'strict', the patch content and patched file are
1894 If 'eolmode' is 'strict', the patch content and patched file are
1874 read in binary mode. Otherwise, line endings are ignored when
1895 read in binary mode. Otherwise, line endings are ignored when
1875 patching then normalized according to 'eolmode'.
1896 patching then normalized according to 'eolmode'.
1876 """
1897 """
1877 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1898 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1878 prefix=prefix, eolmode=eolmode)
1899 prefix=prefix, eolmode=eolmode)
1879
1900
1880 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1901 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1881 eolmode='strict'):
1902 eolmode='strict'):
1882
1903
1883 if prefix:
1904 if prefix:
1884 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1905 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1885 prefix)
1906 prefix)
1886 if prefix != '':
1907 if prefix != '':
1887 prefix += '/'
1908 prefix += '/'
1888 def pstrip(p):
1909 def pstrip(p):
1889 return pathtransform(p, strip - 1, prefix)[1]
1910 return pathtransform(p, strip - 1, prefix)[1]
1890
1911
1891 rejects = 0
1912 rejects = 0
1892 err = 0
1913 err = 0
1893 current_file = None
1914 current_file = None
1894
1915
1895 for state, values in iterhunks(fp):
1916 for state, values in iterhunks(fp):
1896 if state == 'hunk':
1917 if state == 'hunk':
1897 if not current_file:
1918 if not current_file:
1898 continue
1919 continue
1899 ret = current_file.apply(values)
1920 ret = current_file.apply(values)
1900 if ret > 0:
1921 if ret > 0:
1901 err = 1
1922 err = 1
1902 elif state == 'file':
1923 elif state == 'file':
1903 if current_file:
1924 if current_file:
1904 rejects += current_file.close()
1925 rejects += current_file.close()
1905 current_file = None
1926 current_file = None
1906 afile, bfile, first_hunk, gp = values
1927 afile, bfile, first_hunk, gp = values
1907 if gp:
1928 if gp:
1908 gp.path = pstrip(gp.path)
1929 gp.path = pstrip(gp.path)
1909 if gp.oldpath:
1930 if gp.oldpath:
1910 gp.oldpath = pstrip(gp.oldpath)
1931 gp.oldpath = pstrip(gp.oldpath)
1911 else:
1932 else:
1912 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1933 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1913 prefix)
1934 prefix)
1914 if gp.op == 'RENAME':
1935 if gp.op == 'RENAME':
1915 backend.unlink(gp.oldpath)
1936 backend.unlink(gp.oldpath)
1916 if not first_hunk:
1937 if not first_hunk:
1917 if gp.op == 'DELETE':
1938 if gp.op == 'DELETE':
1918 backend.unlink(gp.path)
1939 backend.unlink(gp.path)
1919 continue
1940 continue
1920 data, mode = None, None
1941 data, mode = None, None
1921 if gp.op in ('RENAME', 'COPY'):
1942 if gp.op in ('RENAME', 'COPY'):
1922 data, mode = store.getfile(gp.oldpath)[:2]
1943 data, mode = store.getfile(gp.oldpath)[:2]
1923 # FIXME: failing getfile has never been handled here
1944 # FIXME: failing getfile has never been handled here
1924 assert data is not None
1945 assert data is not None
1925 if gp.mode:
1946 if gp.mode:
1926 mode = gp.mode
1947 mode = gp.mode
1927 if gp.op == 'ADD':
1948 if gp.op == 'ADD':
1928 # Added files without content have no hunk and
1949 # Added files without content have no hunk and
1929 # must be created
1950 # must be created
1930 data = ''
1951 data = ''
1931 if data or mode:
1952 if data or mode:
1932 if (gp.op in ('ADD', 'RENAME', 'COPY')
1953 if (gp.op in ('ADD', 'RENAME', 'COPY')
1933 and backend.exists(gp.path)):
1954 and backend.exists(gp.path)):
1934 raise PatchError(_("cannot create %s: destination "
1955 raise PatchError(_("cannot create %s: destination "
1935 "already exists") % gp.path)
1956 "already exists") % gp.path)
1936 backend.setfile(gp.path, data, mode, gp.oldpath)
1957 backend.setfile(gp.path, data, mode, gp.oldpath)
1937 continue
1958 continue
1938 try:
1959 try:
1939 current_file = patcher(ui, gp, backend, store,
1960 current_file = patcher(ui, gp, backend, store,
1940 eolmode=eolmode)
1961 eolmode=eolmode)
1941 except PatchError as inst:
1962 except PatchError as inst:
1942 ui.warn(str(inst) + '\n')
1963 ui.warn(str(inst) + '\n')
1943 current_file = None
1964 current_file = None
1944 rejects += 1
1965 rejects += 1
1945 continue
1966 continue
1946 elif state == 'git':
1967 elif state == 'git':
1947 for gp in values:
1968 for gp in values:
1948 path = pstrip(gp.oldpath)
1969 path = pstrip(gp.oldpath)
1949 data, mode = backend.getfile(path)
1970 data, mode = backend.getfile(path)
1950 if data is None:
1971 if data is None:
1951 # The error ignored here will trigger a getfile()
1972 # The error ignored here will trigger a getfile()
1952 # error in a place more appropriate for error
1973 # error in a place more appropriate for error
1953 # handling, and will not interrupt the patching
1974 # handling, and will not interrupt the patching
1954 # process.
1975 # process.
1955 pass
1976 pass
1956 else:
1977 else:
1957 store.setfile(path, data, mode)
1978 store.setfile(path, data, mode)
1958 else:
1979 else:
1959 raise util.Abort(_('unsupported parser state: %s') % state)
1980 raise util.Abort(_('unsupported parser state: %s') % state)
1960
1981
1961 if current_file:
1982 if current_file:
1962 rejects += current_file.close()
1983 rejects += current_file.close()
1963
1984
1964 if rejects:
1985 if rejects:
1965 return -1
1986 return -1
1966 return err
1987 return err
1967
1988
1968 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1989 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1969 similarity):
1990 similarity):
1970 """use <patcher> to apply <patchname> to the working directory.
1991 """use <patcher> to apply <patchname> to the working directory.
1971 returns whether patch was applied with fuzz factor."""
1992 returns whether patch was applied with fuzz factor."""
1972
1993
1973 fuzz = False
1994 fuzz = False
1974 args = []
1995 args = []
1975 cwd = repo.root
1996 cwd = repo.root
1976 if cwd:
1997 if cwd:
1977 args.append('-d %s' % util.shellquote(cwd))
1998 args.append('-d %s' % util.shellquote(cwd))
1978 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1999 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1979 util.shellquote(patchname)))
2000 util.shellquote(patchname)))
1980 try:
2001 try:
1981 for line in fp:
2002 for line in fp:
1982 line = line.rstrip()
2003 line = line.rstrip()
1983 ui.note(line + '\n')
2004 ui.note(line + '\n')
1984 if line.startswith('patching file '):
2005 if line.startswith('patching file '):
1985 pf = util.parsepatchoutput(line)
2006 pf = util.parsepatchoutput(line)
1986 printed_file = False
2007 printed_file = False
1987 files.add(pf)
2008 files.add(pf)
1988 elif line.find('with fuzz') >= 0:
2009 elif line.find('with fuzz') >= 0:
1989 fuzz = True
2010 fuzz = True
1990 if not printed_file:
2011 if not printed_file:
1991 ui.warn(pf + '\n')
2012 ui.warn(pf + '\n')
1992 printed_file = True
2013 printed_file = True
1993 ui.warn(line + '\n')
2014 ui.warn(line + '\n')
1994 elif line.find('saving rejects to file') >= 0:
2015 elif line.find('saving rejects to file') >= 0:
1995 ui.warn(line + '\n')
2016 ui.warn(line + '\n')
1996 elif line.find('FAILED') >= 0:
2017 elif line.find('FAILED') >= 0:
1997 if not printed_file:
2018 if not printed_file:
1998 ui.warn(pf + '\n')
2019 ui.warn(pf + '\n')
1999 printed_file = True
2020 printed_file = True
2000 ui.warn(line + '\n')
2021 ui.warn(line + '\n')
2001 finally:
2022 finally:
2002 if files:
2023 if files:
2003 scmutil.marktouched(repo, files, similarity)
2024 scmutil.marktouched(repo, files, similarity)
2004 code = fp.close()
2025 code = fp.close()
2005 if code:
2026 if code:
2006 raise PatchError(_("patch command failed: %s") %
2027 raise PatchError(_("patch command failed: %s") %
2007 util.explainexit(code)[0])
2028 util.explainexit(code)[0])
2008 return fuzz
2029 return fuzz
2009
2030
2010 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2031 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2011 eolmode='strict'):
2032 eolmode='strict'):
2012 if files is None:
2033 if files is None:
2013 files = set()
2034 files = set()
2014 if eolmode is None:
2035 if eolmode is None:
2015 eolmode = ui.config('patch', 'eol', 'strict')
2036 eolmode = ui.config('patch', 'eol', 'strict')
2016 if eolmode.lower() not in eolmodes:
2037 if eolmode.lower() not in eolmodes:
2017 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2038 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
2018 eolmode = eolmode.lower()
2039 eolmode = eolmode.lower()
2019
2040
2020 store = filestore()
2041 store = filestore()
2021 try:
2042 try:
2022 fp = open(patchobj, 'rb')
2043 fp = open(patchobj, 'rb')
2023 except TypeError:
2044 except TypeError:
2024 fp = patchobj
2045 fp = patchobj
2025 try:
2046 try:
2026 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2047 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2027 eolmode=eolmode)
2048 eolmode=eolmode)
2028 finally:
2049 finally:
2029 if fp != patchobj:
2050 if fp != patchobj:
2030 fp.close()
2051 fp.close()
2031 files.update(backend.close())
2052 files.update(backend.close())
2032 store.close()
2053 store.close()
2033 if ret < 0:
2054 if ret < 0:
2034 raise PatchError(_('patch failed to apply'))
2055 raise PatchError(_('patch failed to apply'))
2035 return ret > 0
2056 return ret > 0
2036
2057
2037 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2058 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2038 eolmode='strict', similarity=0):
2059 eolmode='strict', similarity=0):
2039 """use builtin patch to apply <patchobj> to the working directory.
2060 """use builtin patch to apply <patchobj> to the working directory.
2040 returns whether patch was applied with fuzz factor."""
2061 returns whether patch was applied with fuzz factor."""
2041 backend = workingbackend(ui, repo, similarity)
2062 backend = workingbackend(ui, repo, similarity)
2042 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2063 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2043
2064
2044 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2065 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2045 eolmode='strict'):
2066 eolmode='strict'):
2046 backend = repobackend(ui, repo, ctx, store)
2067 backend = repobackend(ui, repo, ctx, store)
2047 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2068 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2048
2069
2049 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2070 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2050 similarity=0):
2071 similarity=0):
2051 """Apply <patchname> to the working directory.
2072 """Apply <patchname> to the working directory.
2052
2073
2053 'eolmode' specifies how end of lines should be handled. It can be:
2074 'eolmode' specifies how end of lines should be handled. It can be:
2054 - 'strict': inputs are read in binary mode, EOLs are preserved
2075 - 'strict': inputs are read in binary mode, EOLs are preserved
2055 - 'crlf': EOLs are ignored when patching and reset to CRLF
2076 - 'crlf': EOLs are ignored when patching and reset to CRLF
2056 - 'lf': EOLs are ignored when patching and reset to LF
2077 - 'lf': EOLs are ignored when patching and reset to LF
2057 - None: get it from user settings, default to 'strict'
2078 - None: get it from user settings, default to 'strict'
2058 'eolmode' is ignored when using an external patcher program.
2079 'eolmode' is ignored when using an external patcher program.
2059
2080
2060 Returns whether patch was applied with fuzz factor.
2081 Returns whether patch was applied with fuzz factor.
2061 """
2082 """
2062 patcher = ui.config('ui', 'patch')
2083 patcher = ui.config('ui', 'patch')
2063 if files is None:
2084 if files is None:
2064 files = set()
2085 files = set()
2065 if patcher:
2086 if patcher:
2066 return _externalpatch(ui, repo, patcher, patchname, strip,
2087 return _externalpatch(ui, repo, patcher, patchname, strip,
2067 files, similarity)
2088 files, similarity)
2068 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2089 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2069 similarity)
2090 similarity)
2070
2091
2071 def changedfiles(ui, repo, patchpath, strip=1):
2092 def changedfiles(ui, repo, patchpath, strip=1):
2072 backend = fsbackend(ui, repo.root)
2093 backend = fsbackend(ui, repo.root)
2073 fp = open(patchpath, 'rb')
2094 fp = open(patchpath, 'rb')
2074 try:
2095 try:
2075 changed = set()
2096 changed = set()
2076 for state, values in iterhunks(fp):
2097 for state, values in iterhunks(fp):
2077 if state == 'file':
2098 if state == 'file':
2078 afile, bfile, first_hunk, gp = values
2099 afile, bfile, first_hunk, gp = values
2079 if gp:
2100 if gp:
2080 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2101 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2081 if gp.oldpath:
2102 if gp.oldpath:
2082 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2103 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2083 else:
2104 else:
2084 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2105 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2085 '')
2106 '')
2086 changed.add(gp.path)
2107 changed.add(gp.path)
2087 if gp.op == 'RENAME':
2108 if gp.op == 'RENAME':
2088 changed.add(gp.oldpath)
2109 changed.add(gp.oldpath)
2089 elif state not in ('hunk', 'git'):
2110 elif state not in ('hunk', 'git'):
2090 raise util.Abort(_('unsupported parser state: %s') % state)
2111 raise util.Abort(_('unsupported parser state: %s') % state)
2091 return changed
2112 return changed
2092 finally:
2113 finally:
2093 fp.close()
2114 fp.close()
2094
2115
2095 class GitDiffRequired(Exception):
2116 class GitDiffRequired(Exception):
2096 pass
2117 pass
2097
2118
2098 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2119 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2099 '''return diffopts with all features supported and parsed'''
2120 '''return diffopts with all features supported and parsed'''
2100 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2121 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2101 git=True, whitespace=True, formatchanging=True)
2122 git=True, whitespace=True, formatchanging=True)
2102
2123
2103 diffopts = diffallopts
2124 diffopts = diffallopts
2104
2125
2105 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2126 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2106 whitespace=False, formatchanging=False):
2127 whitespace=False, formatchanging=False):
2107 '''return diffopts with only opted-in features parsed
2128 '''return diffopts with only opted-in features parsed
2108
2129
2109 Features:
2130 Features:
2110 - git: git-style diffs
2131 - git: git-style diffs
2111 - whitespace: whitespace options like ignoreblanklines and ignorews
2132 - whitespace: whitespace options like ignoreblanklines and ignorews
2112 - formatchanging: options that will likely break or cause correctness issues
2133 - formatchanging: options that will likely break or cause correctness issues
2113 with most diff parsers
2134 with most diff parsers
2114 '''
2135 '''
2115 def get(key, name=None, getter=ui.configbool, forceplain=None):
2136 def get(key, name=None, getter=ui.configbool, forceplain=None):
2116 if opts:
2137 if opts:
2117 v = opts.get(key)
2138 v = opts.get(key)
2118 if v:
2139 if v:
2119 return v
2140 return v
2120 if forceplain is not None and ui.plain():
2141 if forceplain is not None and ui.plain():
2121 return forceplain
2142 return forceplain
2122 return getter(section, name or key, None, untrusted=untrusted)
2143 return getter(section, name or key, None, untrusted=untrusted)
2123
2144
2124 # core options, expected to be understood by every diff parser
2145 # core options, expected to be understood by every diff parser
2125 buildopts = {
2146 buildopts = {
2126 'nodates': get('nodates'),
2147 'nodates': get('nodates'),
2127 'showfunc': get('show_function', 'showfunc'),
2148 'showfunc': get('show_function', 'showfunc'),
2128 'context': get('unified', getter=ui.config),
2149 'context': get('unified', getter=ui.config),
2129 }
2150 }
2130
2151
2131 if git:
2152 if git:
2132 buildopts['git'] = get('git')
2153 buildopts['git'] = get('git')
2133 if whitespace:
2154 if whitespace:
2134 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2155 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2135 buildopts['ignorewsamount'] = get('ignore_space_change',
2156 buildopts['ignorewsamount'] = get('ignore_space_change',
2136 'ignorewsamount')
2157 'ignorewsamount')
2137 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2158 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2138 'ignoreblanklines')
2159 'ignoreblanklines')
2139 if formatchanging:
2160 if formatchanging:
2140 buildopts['text'] = opts and opts.get('text')
2161 buildopts['text'] = opts and opts.get('text')
2141 buildopts['nobinary'] = get('nobinary')
2162 buildopts['nobinary'] = get('nobinary')
2142 buildopts['noprefix'] = get('noprefix', forceplain=False)
2163 buildopts['noprefix'] = get('noprefix', forceplain=False)
2143
2164
2144 return mdiff.diffopts(**buildopts)
2165 return mdiff.diffopts(**buildopts)
2145
2166
2146 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2167 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2147 losedatafn=None, prefix='', relroot=''):
2168 losedatafn=None, prefix='', relroot=''):
2148 '''yields diff of changes to files between two nodes, or node and
2169 '''yields diff of changes to files between two nodes, or node and
2149 working directory.
2170 working directory.
2150
2171
2151 if node1 is None, use first dirstate parent instead.
2172 if node1 is None, use first dirstate parent instead.
2152 if node2 is None, compare node1 with working directory.
2173 if node2 is None, compare node1 with working directory.
2153
2174
2154 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2175 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2155 every time some change cannot be represented with the current
2176 every time some change cannot be represented with the current
2156 patch format. Return False to upgrade to git patch format, True to
2177 patch format. Return False to upgrade to git patch format, True to
2157 accept the loss or raise an exception to abort the diff. It is
2178 accept the loss or raise an exception to abort the diff. It is
2158 called with the name of current file being diffed as 'fn'. If set
2179 called with the name of current file being diffed as 'fn'. If set
2159 to None, patches will always be upgraded to git format when
2180 to None, patches will always be upgraded to git format when
2160 necessary.
2181 necessary.
2161
2182
2162 prefix is a filename prefix that is prepended to all filenames on
2183 prefix is a filename prefix that is prepended to all filenames on
2163 display (used for subrepos).
2184 display (used for subrepos).
2164
2185
2165 relroot, if not empty, must be normalized with a trailing /. Any match
2186 relroot, if not empty, must be normalized with a trailing /. Any match
2166 patterns that fall outside it will be ignored.'''
2187 patterns that fall outside it will be ignored.'''
2167
2188
2168 if opts is None:
2189 if opts is None:
2169 opts = mdiff.defaultopts
2190 opts = mdiff.defaultopts
2170
2191
2171 if not node1 and not node2:
2192 if not node1 and not node2:
2172 node1 = repo.dirstate.p1()
2193 node1 = repo.dirstate.p1()
2173
2194
2174 def lrugetfilectx():
2195 def lrugetfilectx():
2175 cache = {}
2196 cache = {}
2176 order = collections.deque()
2197 order = collections.deque()
2177 def getfilectx(f, ctx):
2198 def getfilectx(f, ctx):
2178 fctx = ctx.filectx(f, filelog=cache.get(f))
2199 fctx = ctx.filectx(f, filelog=cache.get(f))
2179 if f not in cache:
2200 if f not in cache:
2180 if len(cache) > 20:
2201 if len(cache) > 20:
2181 del cache[order.popleft()]
2202 del cache[order.popleft()]
2182 cache[f] = fctx.filelog()
2203 cache[f] = fctx.filelog()
2183 else:
2204 else:
2184 order.remove(f)
2205 order.remove(f)
2185 order.append(f)
2206 order.append(f)
2186 return fctx
2207 return fctx
2187 return getfilectx
2208 return getfilectx
2188 getfilectx = lrugetfilectx()
2209 getfilectx = lrugetfilectx()
2189
2210
2190 ctx1 = repo[node1]
2211 ctx1 = repo[node1]
2191 ctx2 = repo[node2]
2212 ctx2 = repo[node2]
2192
2213
2193 relfiltered = False
2214 relfiltered = False
2194 if relroot != '' and match.always():
2215 if relroot != '' and match.always():
2195 # as a special case, create a new matcher with just the relroot
2216 # as a special case, create a new matcher with just the relroot
2196 pats = [relroot]
2217 pats = [relroot]
2197 match = scmutil.match(ctx2, pats, default='path')
2218 match = scmutil.match(ctx2, pats, default='path')
2198 relfiltered = True
2219 relfiltered = True
2199
2220
2200 if not changes:
2221 if not changes:
2201 changes = repo.status(ctx1, ctx2, match=match)
2222 changes = repo.status(ctx1, ctx2, match=match)
2202 modified, added, removed = changes[:3]
2223 modified, added, removed = changes[:3]
2203
2224
2204 if not modified and not added and not removed:
2225 if not modified and not added and not removed:
2205 return []
2226 return []
2206
2227
2207 if repo.ui.debugflag:
2228 if repo.ui.debugflag:
2208 hexfunc = hex
2229 hexfunc = hex
2209 else:
2230 else:
2210 hexfunc = short
2231 hexfunc = short
2211 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2232 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2212
2233
2213 copy = {}
2234 copy = {}
2214 if opts.git or opts.upgrade:
2235 if opts.git or opts.upgrade:
2215 copy = copies.pathcopies(ctx1, ctx2, match=match)
2236 copy = copies.pathcopies(ctx1, ctx2, match=match)
2216
2237
2217 if relroot is not None:
2238 if relroot is not None:
2218 if not relfiltered:
2239 if not relfiltered:
2219 # XXX this would ideally be done in the matcher, but that is
2240 # XXX this would ideally be done in the matcher, but that is
2220 # generally meant to 'or' patterns, not 'and' them. In this case we
2241 # generally meant to 'or' patterns, not 'and' them. In this case we
2221 # need to 'and' all the patterns from the matcher with relroot.
2242 # need to 'and' all the patterns from the matcher with relroot.
2222 def filterrel(l):
2243 def filterrel(l):
2223 return [f for f in l if f.startswith(relroot)]
2244 return [f for f in l if f.startswith(relroot)]
2224 modified = filterrel(modified)
2245 modified = filterrel(modified)
2225 added = filterrel(added)
2246 added = filterrel(added)
2226 removed = filterrel(removed)
2247 removed = filterrel(removed)
2227 relfiltered = True
2248 relfiltered = True
2228 # filter out copies where either side isn't inside the relative root
2249 # filter out copies where either side isn't inside the relative root
2229 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2250 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2230 if dst.startswith(relroot)
2251 if dst.startswith(relroot)
2231 and src.startswith(relroot)))
2252 and src.startswith(relroot)))
2232
2253
2233 def difffn(opts, losedata):
2254 def difffn(opts, losedata):
2234 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2255 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2235 copy, getfilectx, opts, losedata, prefix, relroot)
2256 copy, getfilectx, opts, losedata, prefix, relroot)
2236 if opts.upgrade and not opts.git:
2257 if opts.upgrade and not opts.git:
2237 try:
2258 try:
2238 def losedata(fn):
2259 def losedata(fn):
2239 if not losedatafn or not losedatafn(fn=fn):
2260 if not losedatafn or not losedatafn(fn=fn):
2240 raise GitDiffRequired
2261 raise GitDiffRequired
2241 # Buffer the whole output until we are sure it can be generated
2262 # Buffer the whole output until we are sure it can be generated
2242 return list(difffn(opts.copy(git=False), losedata))
2263 return list(difffn(opts.copy(git=False), losedata))
2243 except GitDiffRequired:
2264 except GitDiffRequired:
2244 return difffn(opts.copy(git=True), None)
2265 return difffn(opts.copy(git=True), None)
2245 else:
2266 else:
2246 return difffn(opts, None)
2267 return difffn(opts, None)
2247
2268
2248 def difflabel(func, *args, **kw):
2269 def difflabel(func, *args, **kw):
2249 '''yields 2-tuples of (output, label) based on the output of func()'''
2270 '''yields 2-tuples of (output, label) based on the output of func()'''
2250 headprefixes = [('diff', 'diff.diffline'),
2271 headprefixes = [('diff', 'diff.diffline'),
2251 ('copy', 'diff.extended'),
2272 ('copy', 'diff.extended'),
2252 ('rename', 'diff.extended'),
2273 ('rename', 'diff.extended'),
2253 ('old', 'diff.extended'),
2274 ('old', 'diff.extended'),
2254 ('new', 'diff.extended'),
2275 ('new', 'diff.extended'),
2255 ('deleted', 'diff.extended'),
2276 ('deleted', 'diff.extended'),
2256 ('---', 'diff.file_a'),
2277 ('---', 'diff.file_a'),
2257 ('+++', 'diff.file_b')]
2278 ('+++', 'diff.file_b')]
2258 textprefixes = [('@', 'diff.hunk'),
2279 textprefixes = [('@', 'diff.hunk'),
2259 ('-', 'diff.deleted'),
2280 ('-', 'diff.deleted'),
2260 ('+', 'diff.inserted')]
2281 ('+', 'diff.inserted')]
2261 head = False
2282 head = False
2262 for chunk in func(*args, **kw):
2283 for chunk in func(*args, **kw):
2263 lines = chunk.split('\n')
2284 lines = chunk.split('\n')
2264 for i, line in enumerate(lines):
2285 for i, line in enumerate(lines):
2265 if i != 0:
2286 if i != 0:
2266 yield ('\n', '')
2287 yield ('\n', '')
2267 if head:
2288 if head:
2268 if line.startswith('@'):
2289 if line.startswith('@'):
2269 head = False
2290 head = False
2270 else:
2291 else:
2271 if line and line[0] not in ' +-@\\':
2292 if line and line[0] not in ' +-@\\':
2272 head = True
2293 head = True
2273 stripline = line
2294 stripline = line
2274 diffline = False
2295 diffline = False
2275 if not head and line and line[0] in '+-':
2296 if not head and line and line[0] in '+-':
2276 # highlight tabs and trailing whitespace, but only in
2297 # highlight tabs and trailing whitespace, but only in
2277 # changed lines
2298 # changed lines
2278 stripline = line.rstrip()
2299 stripline = line.rstrip()
2279 diffline = True
2300 diffline = True
2280
2301
2281 prefixes = textprefixes
2302 prefixes = textprefixes
2282 if head:
2303 if head:
2283 prefixes = headprefixes
2304 prefixes = headprefixes
2284 for prefix, label in prefixes:
2305 for prefix, label in prefixes:
2285 if stripline.startswith(prefix):
2306 if stripline.startswith(prefix):
2286 if diffline:
2307 if diffline:
2287 for token in tabsplitter.findall(stripline):
2308 for token in tabsplitter.findall(stripline):
2288 if '\t' == token[0]:
2309 if '\t' == token[0]:
2289 yield (token, 'diff.tab')
2310 yield (token, 'diff.tab')
2290 else:
2311 else:
2291 yield (token, label)
2312 yield (token, label)
2292 else:
2313 else:
2293 yield (stripline, label)
2314 yield (stripline, label)
2294 break
2315 break
2295 else:
2316 else:
2296 yield (line, '')
2317 yield (line, '')
2297 if line != stripline:
2318 if line != stripline:
2298 yield (line[len(stripline):], 'diff.trailingwhitespace')
2319 yield (line[len(stripline):], 'diff.trailingwhitespace')
2299
2320
2300 def diffui(*args, **kw):
2321 def diffui(*args, **kw):
2301 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2322 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2302 return difflabel(diff, *args, **kw)
2323 return difflabel(diff, *args, **kw)
2303
2324
2304 def _filepairs(ctx1, modified, added, removed, copy, opts):
2325 def _filepairs(ctx1, modified, added, removed, copy, opts):
2305 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2326 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2306 before and f2 is the the name after. For added files, f1 will be None,
2327 before and f2 is the the name after. For added files, f1 will be None,
2307 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2328 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2308 or 'rename' (the latter two only if opts.git is set).'''
2329 or 'rename' (the latter two only if opts.git is set).'''
2309 gone = set()
2330 gone = set()
2310
2331
2311 copyto = dict([(v, k) for k, v in copy.items()])
2332 copyto = dict([(v, k) for k, v in copy.items()])
2312
2333
2313 addedset, removedset = set(added), set(removed)
2334 addedset, removedset = set(added), set(removed)
2314 # Fix up added, since merged-in additions appear as
2335 # Fix up added, since merged-in additions appear as
2315 # modifications during merges
2336 # modifications during merges
2316 for f in modified:
2337 for f in modified:
2317 if f not in ctx1:
2338 if f not in ctx1:
2318 addedset.add(f)
2339 addedset.add(f)
2319
2340
2320 for f in sorted(modified + added + removed):
2341 for f in sorted(modified + added + removed):
2321 copyop = None
2342 copyop = None
2322 f1, f2 = f, f
2343 f1, f2 = f, f
2323 if f in addedset:
2344 if f in addedset:
2324 f1 = None
2345 f1 = None
2325 if f in copy:
2346 if f in copy:
2326 if opts.git:
2347 if opts.git:
2327 f1 = copy[f]
2348 f1 = copy[f]
2328 if f1 in removedset and f1 not in gone:
2349 if f1 in removedset and f1 not in gone:
2329 copyop = 'rename'
2350 copyop = 'rename'
2330 gone.add(f1)
2351 gone.add(f1)
2331 else:
2352 else:
2332 copyop = 'copy'
2353 copyop = 'copy'
2333 elif f in removedset:
2354 elif f in removedset:
2334 f2 = None
2355 f2 = None
2335 if opts.git:
2356 if opts.git:
2336 # have we already reported a copy above?
2357 # have we already reported a copy above?
2337 if (f in copyto and copyto[f] in addedset
2358 if (f in copyto and copyto[f] in addedset
2338 and copy[copyto[f]] == f):
2359 and copy[copyto[f]] == f):
2339 continue
2360 continue
2340 yield f1, f2, copyop
2361 yield f1, f2, copyop
2341
2362
2342 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2363 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2343 copy, getfilectx, opts, losedatafn, prefix, relroot):
2364 copy, getfilectx, opts, losedatafn, prefix, relroot):
2344 '''given input data, generate a diff and yield it in blocks
2365 '''given input data, generate a diff and yield it in blocks
2345
2366
2346 If generating a diff would lose data like flags or binary data and
2367 If generating a diff would lose data like flags or binary data and
2347 losedatafn is not None, it will be called.
2368 losedatafn is not None, it will be called.
2348
2369
2349 relroot is removed and prefix is added to every path in the diff output.
2370 relroot is removed and prefix is added to every path in the diff output.
2350
2371
2351 If relroot is not empty, this function expects every path in modified,
2372 If relroot is not empty, this function expects every path in modified,
2352 added, removed and copy to start with it.'''
2373 added, removed and copy to start with it.'''
2353
2374
2354 def gitindex(text):
2375 def gitindex(text):
2355 if not text:
2376 if not text:
2356 text = ""
2377 text = ""
2357 l = len(text)
2378 l = len(text)
2358 s = util.sha1('blob %d\0' % l)
2379 s = util.sha1('blob %d\0' % l)
2359 s.update(text)
2380 s.update(text)
2360 return s.hexdigest()
2381 return s.hexdigest()
2361
2382
2362 if opts.noprefix:
2383 if opts.noprefix:
2363 aprefix = bprefix = ''
2384 aprefix = bprefix = ''
2364 else:
2385 else:
2365 aprefix = 'a/'
2386 aprefix = 'a/'
2366 bprefix = 'b/'
2387 bprefix = 'b/'
2367
2388
2368 def diffline(f, revs):
2389 def diffline(f, revs):
2369 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2390 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2370 return 'diff %s %s' % (revinfo, f)
2391 return 'diff %s %s' % (revinfo, f)
2371
2392
2372 date1 = util.datestr(ctx1.date())
2393 date1 = util.datestr(ctx1.date())
2373 date2 = util.datestr(ctx2.date())
2394 date2 = util.datestr(ctx2.date())
2374
2395
2375 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2396 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2376
2397
2377 if relroot != '' and (repo.ui.configbool('devel', 'all')
2398 if relroot != '' and (repo.ui.configbool('devel', 'all')
2378 or repo.ui.configbool('devel', 'check-relroot')):
2399 or repo.ui.configbool('devel', 'check-relroot')):
2379 for f in modified + added + removed + copy.keys() + copy.values():
2400 for f in modified + added + removed + copy.keys() + copy.values():
2380 if f is not None and not f.startswith(relroot):
2401 if f is not None and not f.startswith(relroot):
2381 raise AssertionError(
2402 raise AssertionError(
2382 "file %s doesn't start with relroot %s" % (f, relroot))
2403 "file %s doesn't start with relroot %s" % (f, relroot))
2383
2404
2384 for f1, f2, copyop in _filepairs(
2405 for f1, f2, copyop in _filepairs(
2385 ctx1, modified, added, removed, copy, opts):
2406 ctx1, modified, added, removed, copy, opts):
2386 content1 = None
2407 content1 = None
2387 content2 = None
2408 content2 = None
2388 flag1 = None
2409 flag1 = None
2389 flag2 = None
2410 flag2 = None
2390 if f1:
2411 if f1:
2391 content1 = getfilectx(f1, ctx1).data()
2412 content1 = getfilectx(f1, ctx1).data()
2392 if opts.git or losedatafn:
2413 if opts.git or losedatafn:
2393 flag1 = ctx1.flags(f1)
2414 flag1 = ctx1.flags(f1)
2394 if f2:
2415 if f2:
2395 content2 = getfilectx(f2, ctx2).data()
2416 content2 = getfilectx(f2, ctx2).data()
2396 if opts.git or losedatafn:
2417 if opts.git or losedatafn:
2397 flag2 = ctx2.flags(f2)
2418 flag2 = ctx2.flags(f2)
2398 binary = False
2419 binary = False
2399 if opts.git or losedatafn:
2420 if opts.git or losedatafn:
2400 binary = util.binary(content1) or util.binary(content2)
2421 binary = util.binary(content1) or util.binary(content2)
2401
2422
2402 if losedatafn and not opts.git:
2423 if losedatafn and not opts.git:
2403 if (binary or
2424 if (binary or
2404 # copy/rename
2425 # copy/rename
2405 f2 in copy or
2426 f2 in copy or
2406 # empty file creation
2427 # empty file creation
2407 (not f1 and not content2) or
2428 (not f1 and not content2) or
2408 # empty file deletion
2429 # empty file deletion
2409 (not content1 and not f2) or
2430 (not content1 and not f2) or
2410 # create with flags
2431 # create with flags
2411 (not f1 and flag2) or
2432 (not f1 and flag2) or
2412 # change flags
2433 # change flags
2413 (f1 and f2 and flag1 != flag2)):
2434 (f1 and f2 and flag1 != flag2)):
2414 losedatafn(f2 or f1)
2435 losedatafn(f2 or f1)
2415
2436
2416 path1 = f1 or f2
2437 path1 = f1 or f2
2417 path2 = f2 or f1
2438 path2 = f2 or f1
2418 path1 = posixpath.join(prefix, path1[len(relroot):])
2439 path1 = posixpath.join(prefix, path1[len(relroot):])
2419 path2 = posixpath.join(prefix, path2[len(relroot):])
2440 path2 = posixpath.join(prefix, path2[len(relroot):])
2420 header = []
2441 header = []
2421 if opts.git:
2442 if opts.git:
2422 header.append('diff --git %s%s %s%s' %
2443 header.append('diff --git %s%s %s%s' %
2423 (aprefix, path1, bprefix, path2))
2444 (aprefix, path1, bprefix, path2))
2424 if not f1: # added
2445 if not f1: # added
2425 header.append('new file mode %s' % gitmode[flag2])
2446 header.append('new file mode %s' % gitmode[flag2])
2426 elif not f2: # removed
2447 elif not f2: # removed
2427 header.append('deleted file mode %s' % gitmode[flag1])
2448 header.append('deleted file mode %s' % gitmode[flag1])
2428 else: # modified/copied/renamed
2449 else: # modified/copied/renamed
2429 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2450 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2430 if mode1 != mode2:
2451 if mode1 != mode2:
2431 header.append('old mode %s' % mode1)
2452 header.append('old mode %s' % mode1)
2432 header.append('new mode %s' % mode2)
2453 header.append('new mode %s' % mode2)
2433 if copyop is not None:
2454 if copyop is not None:
2434 header.append('%s from %s' % (copyop, path1))
2455 header.append('%s from %s' % (copyop, path1))
2435 header.append('%s to %s' % (copyop, path2))
2456 header.append('%s to %s' % (copyop, path2))
2436 elif revs and not repo.ui.quiet:
2457 elif revs and not repo.ui.quiet:
2437 header.append(diffline(path1, revs))
2458 header.append(diffline(path1, revs))
2438
2459
2439 if binary and opts.git and not opts.nobinary:
2460 if binary and opts.git and not opts.nobinary:
2440 text = mdiff.b85diff(content1, content2)
2461 text = mdiff.b85diff(content1, content2)
2441 if text:
2462 if text:
2442 header.append('index %s..%s' %
2463 header.append('index %s..%s' %
2443 (gitindex(content1), gitindex(content2)))
2464 (gitindex(content1), gitindex(content2)))
2444 else:
2465 else:
2445 text = mdiff.unidiff(content1, date1,
2466 text = mdiff.unidiff(content1, date1,
2446 content2, date2,
2467 content2, date2,
2447 path1, path2, opts=opts)
2468 path1, path2, opts=opts)
2448 if header and (text or len(header) > 1):
2469 if header and (text or len(header) > 1):
2449 yield '\n'.join(header) + '\n'
2470 yield '\n'.join(header) + '\n'
2450 if text:
2471 if text:
2451 yield text
2472 yield text
2452
2473
2453 def diffstatsum(stats):
2474 def diffstatsum(stats):
2454 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2475 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2455 for f, a, r, b in stats:
2476 for f, a, r, b in stats:
2456 maxfile = max(maxfile, encoding.colwidth(f))
2477 maxfile = max(maxfile, encoding.colwidth(f))
2457 maxtotal = max(maxtotal, a + r)
2478 maxtotal = max(maxtotal, a + r)
2458 addtotal += a
2479 addtotal += a
2459 removetotal += r
2480 removetotal += r
2460 binary = binary or b
2481 binary = binary or b
2461
2482
2462 return maxfile, maxtotal, addtotal, removetotal, binary
2483 return maxfile, maxtotal, addtotal, removetotal, binary
2463
2484
2464 def diffstatdata(lines):
2485 def diffstatdata(lines):
2465 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2486 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2466
2487
2467 results = []
2488 results = []
2468 filename, adds, removes, isbinary = None, 0, 0, False
2489 filename, adds, removes, isbinary = None, 0, 0, False
2469
2490
2470 def addresult():
2491 def addresult():
2471 if filename:
2492 if filename:
2472 results.append((filename, adds, removes, isbinary))
2493 results.append((filename, adds, removes, isbinary))
2473
2494
2474 for line in lines:
2495 for line in lines:
2475 if line.startswith('diff'):
2496 if line.startswith('diff'):
2476 addresult()
2497 addresult()
2477 # set numbers to 0 anyway when starting new file
2498 # set numbers to 0 anyway when starting new file
2478 adds, removes, isbinary = 0, 0, False
2499 adds, removes, isbinary = 0, 0, False
2479 if line.startswith('diff --git a/'):
2500 if line.startswith('diff --git a/'):
2480 filename = gitre.search(line).group(2)
2501 filename = gitre.search(line).group(2)
2481 elif line.startswith('diff -r'):
2502 elif line.startswith('diff -r'):
2482 # format: "diff -r ... -r ... filename"
2503 # format: "diff -r ... -r ... filename"
2483 filename = diffre.search(line).group(1)
2504 filename = diffre.search(line).group(1)
2484 elif line.startswith('+') and not line.startswith('+++ '):
2505 elif line.startswith('+') and not line.startswith('+++ '):
2485 adds += 1
2506 adds += 1
2486 elif line.startswith('-') and not line.startswith('--- '):
2507 elif line.startswith('-') and not line.startswith('--- '):
2487 removes += 1
2508 removes += 1
2488 elif (line.startswith('GIT binary patch') or
2509 elif (line.startswith('GIT binary patch') or
2489 line.startswith('Binary file')):
2510 line.startswith('Binary file')):
2490 isbinary = True
2511 isbinary = True
2491 addresult()
2512 addresult()
2492 return results
2513 return results
2493
2514
2494 def diffstat(lines, width=80, git=False):
2515 def diffstat(lines, width=80, git=False):
2495 output = []
2516 output = []
2496 stats = diffstatdata(lines)
2517 stats = diffstatdata(lines)
2497 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2518 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2498
2519
2499 countwidth = len(str(maxtotal))
2520 countwidth = len(str(maxtotal))
2500 if hasbinary and countwidth < 3:
2521 if hasbinary and countwidth < 3:
2501 countwidth = 3
2522 countwidth = 3
2502 graphwidth = width - countwidth - maxname - 6
2523 graphwidth = width - countwidth - maxname - 6
2503 if graphwidth < 10:
2524 if graphwidth < 10:
2504 graphwidth = 10
2525 graphwidth = 10
2505
2526
2506 def scale(i):
2527 def scale(i):
2507 if maxtotal <= graphwidth:
2528 if maxtotal <= graphwidth:
2508 return i
2529 return i
2509 # If diffstat runs out of room it doesn't print anything,
2530 # If diffstat runs out of room it doesn't print anything,
2510 # which isn't very useful, so always print at least one + or -
2531 # which isn't very useful, so always print at least one + or -
2511 # if there were at least some changes.
2532 # if there were at least some changes.
2512 return max(i * graphwidth // maxtotal, int(bool(i)))
2533 return max(i * graphwidth // maxtotal, int(bool(i)))
2513
2534
2514 for filename, adds, removes, isbinary in stats:
2535 for filename, adds, removes, isbinary in stats:
2515 if isbinary:
2536 if isbinary:
2516 count = 'Bin'
2537 count = 'Bin'
2517 else:
2538 else:
2518 count = adds + removes
2539 count = adds + removes
2519 pluses = '+' * scale(adds)
2540 pluses = '+' * scale(adds)
2520 minuses = '-' * scale(removes)
2541 minuses = '-' * scale(removes)
2521 output.append(' %s%s | %*s %s%s\n' %
2542 output.append(' %s%s | %*s %s%s\n' %
2522 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2543 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2523 countwidth, count, pluses, minuses))
2544 countwidth, count, pluses, minuses))
2524
2545
2525 if stats:
2546 if stats:
2526 output.append(_(' %d files changed, %d insertions(+), '
2547 output.append(_(' %d files changed, %d insertions(+), '
2527 '%d deletions(-)\n')
2548 '%d deletions(-)\n')
2528 % (len(stats), totaladds, totalremoves))
2549 % (len(stats), totaladds, totalremoves))
2529
2550
2530 return ''.join(output)
2551 return ''.join(output)
2531
2552
2532 def diffstatui(*args, **kw):
2553 def diffstatui(*args, **kw):
2533 '''like diffstat(), but yields 2-tuples of (output, label) for
2554 '''like diffstat(), but yields 2-tuples of (output, label) for
2534 ui.write()
2555 ui.write()
2535 '''
2556 '''
2536
2557
2537 for line in diffstat(*args, **kw).splitlines():
2558 for line in diffstat(*args, **kw).splitlines():
2538 if line and line[-1] in '+-':
2559 if line and line[-1] in '+-':
2539 name, graph = line.rsplit(' ', 1)
2560 name, graph = line.rsplit(' ', 1)
2540 yield (name + ' ', '')
2561 yield (name + ' ', '')
2541 m = re.search(r'\++', graph)
2562 m = re.search(r'\++', graph)
2542 if m:
2563 if m:
2543 yield (m.group(0), 'diffstat.inserted')
2564 yield (m.group(0), 'diffstat.inserted')
2544 m = re.search(r'-+', graph)
2565 m = re.search(r'-+', graph)
2545 if m:
2566 if m:
2546 yield (m.group(0), 'diffstat.deleted')
2567 yield (m.group(0), 'diffstat.deleted')
2547 else:
2568 else:
2548 yield (line, '')
2569 yield (line, '')
2549 yield ('\n', '')
2570 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now