##// END OF EJS Templates
commit: add a way to return more information from the chunkselector...
Laurent Charignon -
r27155:8d3c5797 default
parent child Browse files
Show More
@@ -1,3408 +1,3409
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, bin, nullid, nullrev, short
8 from node import hex, bin, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import repair, graphmod, revset, phases, obsolete, pathutil
13 import repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import formatter
17 import formatter
18 import crecord as crecordmod
18 import crecord as crecordmod
19 import lock as lockmod
19 import lock as lockmod
20
20
21 def ishunk(x):
21 def ishunk(x):
22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
22 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
23 return isinstance(x, hunkclasses)
23 return isinstance(x, hunkclasses)
24
24
25 def newandmodified(chunks, originalchunks):
25 def newandmodified(chunks, originalchunks):
26 newlyaddedandmodifiedfiles = set()
26 newlyaddedandmodifiedfiles = set()
27 for chunk in chunks:
27 for chunk in chunks:
28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
28 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
29 originalchunks:
29 originalchunks:
30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
30 newlyaddedandmodifiedfiles.add(chunk.header.filename())
31 return newlyaddedandmodifiedfiles
31 return newlyaddedandmodifiedfiles
32
32
33 def parsealiases(cmd):
33 def parsealiases(cmd):
34 return cmd.lstrip("^").split("|")
34 return cmd.lstrip("^").split("|")
35
35
36 def setupwrapcolorwrite(ui):
36 def setupwrapcolorwrite(ui):
37 # wrap ui.write so diff output can be labeled/colorized
37 # wrap ui.write so diff output can be labeled/colorized
38 def wrapwrite(orig, *args, **kw):
38 def wrapwrite(orig, *args, **kw):
39 label = kw.pop('label', '')
39 label = kw.pop('label', '')
40 for chunk, l in patch.difflabel(lambda: args):
40 for chunk, l in patch.difflabel(lambda: args):
41 orig(chunk, label=label + l)
41 orig(chunk, label=label + l)
42
42
43 oldwrite = ui.write
43 oldwrite = ui.write
44 def wrap(*args, **kwargs):
44 def wrap(*args, **kwargs):
45 return wrapwrite(oldwrite, *args, **kwargs)
45 return wrapwrite(oldwrite, *args, **kwargs)
46 setattr(ui, 'write', wrap)
46 setattr(ui, 'write', wrap)
47 return oldwrite
47 return oldwrite
48
48
49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
49 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
50 if usecurses:
50 if usecurses:
51 if testfile:
51 if testfile:
52 recordfn = crecordmod.testdecorator(testfile,
52 recordfn = crecordmod.testdecorator(testfile,
53 crecordmod.testchunkselector)
53 crecordmod.testchunkselector)
54 else:
54 else:
55 recordfn = crecordmod.chunkselector
55 recordfn = crecordmod.chunkselector
56
56
57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
57 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
58
58
59 else:
59 else:
60 return patch.filterpatch(ui, originalhunks, operation)
60 return patch.filterpatch(ui, originalhunks, operation)
61
61
62 def recordfilter(ui, originalhunks, operation=None):
62 def recordfilter(ui, originalhunks, operation=None):
63 """ Prompts the user to filter the originalhunks and return a list of
63 """ Prompts the user to filter the originalhunks and return a list of
64 selected hunks.
64 selected hunks.
65 *operation* is used for ui purposes to indicate the user
65 *operation* is used for ui purposes to indicate the user
66 what kind of filtering they are doing: reverting, committing, shelving, etc.
66 what kind of filtering they are doing: reverting, committing, shelving, etc.
67 *operation* has to be a translated string.
67 *operation* has to be a translated string.
68 """
68 """
69 usecurses = ui.configbool('experimental', 'crecord', False)
69 usecurses = ui.configbool('experimental', 'crecord', False)
70 testfile = ui.config('experimental', 'crecordtest', None)
70 testfile = ui.config('experimental', 'crecordtest', None)
71 oldwrite = setupwrapcolorwrite(ui)
71 oldwrite = setupwrapcolorwrite(ui)
72 try:
72 try:
73 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
73 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
74 operation)
74 testfile, operation)
75 finally:
75 finally:
76 ui.write = oldwrite
76 ui.write = oldwrite
77 return newchunks
77 return newchunks, newopts
78
78
79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
79 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
80 filterfn, *pats, **opts):
80 filterfn, *pats, **opts):
81 import merge as mergemod
81 import merge as mergemod
82
82
83 if not ui.interactive():
83 if not ui.interactive():
84 if cmdsuggest:
84 if cmdsuggest:
85 msg = _('running non-interactively, use %s instead') % cmdsuggest
85 msg = _('running non-interactively, use %s instead') % cmdsuggest
86 else:
86 else:
87 msg = _('running non-interactively')
87 msg = _('running non-interactively')
88 raise error.Abort(msg)
88 raise error.Abort(msg)
89
89
90 # make sure username is set before going interactive
90 # make sure username is set before going interactive
91 if not opts.get('user'):
91 if not opts.get('user'):
92 ui.username() # raise exception, username not provided
92 ui.username() # raise exception, username not provided
93
93
94 def recordfunc(ui, repo, message, match, opts):
94 def recordfunc(ui, repo, message, match, opts):
95 """This is generic record driver.
95 """This is generic record driver.
96
96
97 Its job is to interactively filter local changes, and
97 Its job is to interactively filter local changes, and
98 accordingly prepare working directory into a state in which the
98 accordingly prepare working directory into a state in which the
99 job can be delegated to a non-interactive commit command such as
99 job can be delegated to a non-interactive commit command such as
100 'commit' or 'qrefresh'.
100 'commit' or 'qrefresh'.
101
101
102 After the actual job is done by non-interactive command, the
102 After the actual job is done by non-interactive command, the
103 working directory is restored to its original state.
103 working directory is restored to its original state.
104
104
105 In the end we'll record interesting changes, and everything else
105 In the end we'll record interesting changes, and everything else
106 will be left in place, so the user can continue working.
106 will be left in place, so the user can continue working.
107 """
107 """
108
108
109 checkunfinished(repo, commit=True)
109 checkunfinished(repo, commit=True)
110 merge = len(repo[None].parents()) > 1
110 merge = len(repo[None].parents()) > 1
111 if merge:
111 if merge:
112 raise error.Abort(_('cannot partially commit a merge '
112 raise error.Abort(_('cannot partially commit a merge '
113 '(use "hg commit" instead)'))
113 '(use "hg commit" instead)'))
114
114
115 status = repo.status(match=match)
115 status = repo.status(match=match)
116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
116 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
117 diffopts.nodates = True
117 diffopts.nodates = True
118 diffopts.git = True
118 diffopts.git = True
119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
119 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
120 originalchunks = patch.parsepatch(originaldiff)
120 originalchunks = patch.parsepatch(originaldiff)
121
121
122 # 1. filter patch, so we have intending-to apply subset of it
122 # 1. filter patch, so we have intending-to apply subset of it
123 try:
123 try:
124 chunks = filterfn(ui, originalchunks)
124 chunks, newopts = filterfn(ui, originalchunks)
125 except patch.PatchError as err:
125 except patch.PatchError as err:
126 raise error.Abort(_('error parsing patch: %s') % err)
126 raise error.Abort(_('error parsing patch: %s') % err)
127 opts.update(newopts)
127
128
128 # We need to keep a backup of files that have been newly added and
129 # We need to keep a backup of files that have been newly added and
129 # modified during the recording process because there is a previous
130 # modified during the recording process because there is a previous
130 # version without the edit in the workdir
131 # version without the edit in the workdir
131 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
132 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
132 contenders = set()
133 contenders = set()
133 for h in chunks:
134 for h in chunks:
134 try:
135 try:
135 contenders.update(set(h.files()))
136 contenders.update(set(h.files()))
136 except AttributeError:
137 except AttributeError:
137 pass
138 pass
138
139
139 changed = status.modified + status.added + status.removed
140 changed = status.modified + status.added + status.removed
140 newfiles = [f for f in changed if f in contenders]
141 newfiles = [f for f in changed if f in contenders]
141 if not newfiles:
142 if not newfiles:
142 ui.status(_('no changes to record\n'))
143 ui.status(_('no changes to record\n'))
143 return 0
144 return 0
144
145
145 modified = set(status.modified)
146 modified = set(status.modified)
146
147
147 # 2. backup changed files, so we can restore them in the end
148 # 2. backup changed files, so we can restore them in the end
148
149
149 if backupall:
150 if backupall:
150 tobackup = changed
151 tobackup = changed
151 else:
152 else:
152 tobackup = [f for f in newfiles if f in modified or f in \
153 tobackup = [f for f in newfiles if f in modified or f in \
153 newlyaddedandmodifiedfiles]
154 newlyaddedandmodifiedfiles]
154 backups = {}
155 backups = {}
155 if tobackup:
156 if tobackup:
156 backupdir = repo.join('record-backups')
157 backupdir = repo.join('record-backups')
157 try:
158 try:
158 os.mkdir(backupdir)
159 os.mkdir(backupdir)
159 except OSError as err:
160 except OSError as err:
160 if err.errno != errno.EEXIST:
161 if err.errno != errno.EEXIST:
161 raise
162 raise
162 try:
163 try:
163 # backup continues
164 # backup continues
164 for f in tobackup:
165 for f in tobackup:
165 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
166 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
166 dir=backupdir)
167 dir=backupdir)
167 os.close(fd)
168 os.close(fd)
168 ui.debug('backup %r as %r\n' % (f, tmpname))
169 ui.debug('backup %r as %r\n' % (f, tmpname))
169 util.copyfile(repo.wjoin(f), tmpname)
170 util.copyfile(repo.wjoin(f), tmpname)
170 shutil.copystat(repo.wjoin(f), tmpname)
171 shutil.copystat(repo.wjoin(f), tmpname)
171 backups[f] = tmpname
172 backups[f] = tmpname
172
173
173 fp = cStringIO.StringIO()
174 fp = cStringIO.StringIO()
174 for c in chunks:
175 for c in chunks:
175 fname = c.filename()
176 fname = c.filename()
176 if fname in backups:
177 if fname in backups:
177 c.write(fp)
178 c.write(fp)
178 dopatch = fp.tell()
179 dopatch = fp.tell()
179 fp.seek(0)
180 fp.seek(0)
180
181
181 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
182 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
182 # 3a. apply filtered patch to clean repo (clean)
183 # 3a. apply filtered patch to clean repo (clean)
183 if backups:
184 if backups:
184 # Equivalent to hg.revert
185 # Equivalent to hg.revert
185 choices = lambda key: key in backups
186 choices = lambda key: key in backups
186 mergemod.update(repo, repo.dirstate.p1(),
187 mergemod.update(repo, repo.dirstate.p1(),
187 False, True, choices)
188 False, True, choices)
188
189
189 # 3b. (apply)
190 # 3b. (apply)
190 if dopatch:
191 if dopatch:
191 try:
192 try:
192 ui.debug('applying patch\n')
193 ui.debug('applying patch\n')
193 ui.debug(fp.getvalue())
194 ui.debug(fp.getvalue())
194 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
195 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
195 except patch.PatchError as err:
196 except patch.PatchError as err:
196 raise error.Abort(str(err))
197 raise error.Abort(str(err))
197 del fp
198 del fp
198
199
199 # 4. We prepared working directory according to filtered
200 # 4. We prepared working directory according to filtered
200 # patch. Now is the time to delegate the job to
201 # patch. Now is the time to delegate the job to
201 # commit/qrefresh or the like!
202 # commit/qrefresh or the like!
202
203
203 # Make all of the pathnames absolute.
204 # Make all of the pathnames absolute.
204 newfiles = [repo.wjoin(nf) for nf in newfiles]
205 newfiles = [repo.wjoin(nf) for nf in newfiles]
205 return commitfunc(ui, repo, *newfiles, **opts)
206 return commitfunc(ui, repo, *newfiles, **opts)
206 finally:
207 finally:
207 # 5. finally restore backed-up files
208 # 5. finally restore backed-up files
208 try:
209 try:
209 dirstate = repo.dirstate
210 dirstate = repo.dirstate
210 for realname, tmpname in backups.iteritems():
211 for realname, tmpname in backups.iteritems():
211 ui.debug('restoring %r to %r\n' % (tmpname, realname))
212 ui.debug('restoring %r to %r\n' % (tmpname, realname))
212
213
213 if dirstate[realname] == 'n':
214 if dirstate[realname] == 'n':
214 # without normallookup, restoring timestamp
215 # without normallookup, restoring timestamp
215 # may cause partially committed files
216 # may cause partially committed files
216 # to be treated as unmodified
217 # to be treated as unmodified
217 dirstate.normallookup(realname)
218 dirstate.normallookup(realname)
218
219
219 util.copyfile(tmpname, repo.wjoin(realname))
220 util.copyfile(tmpname, repo.wjoin(realname))
220 # Our calls to copystat() here and above are a
221 # Our calls to copystat() here and above are a
221 # hack to trick any editors that have f open that
222 # hack to trick any editors that have f open that
222 # we haven't modified them.
223 # we haven't modified them.
223 #
224 #
224 # Also note that this racy as an editor could
225 # Also note that this racy as an editor could
225 # notice the file's mtime before we've finished
226 # notice the file's mtime before we've finished
226 # writing it.
227 # writing it.
227 shutil.copystat(tmpname, repo.wjoin(realname))
228 shutil.copystat(tmpname, repo.wjoin(realname))
228 os.unlink(tmpname)
229 os.unlink(tmpname)
229 if tobackup:
230 if tobackup:
230 os.rmdir(backupdir)
231 os.rmdir(backupdir)
231 except OSError:
232 except OSError:
232 pass
233 pass
233
234
234 def recordinwlock(ui, repo, message, match, opts):
235 def recordinwlock(ui, repo, message, match, opts):
235 wlock = repo.wlock()
236 wlock = repo.wlock()
236 try:
237 try:
237 return recordfunc(ui, repo, message, match, opts)
238 return recordfunc(ui, repo, message, match, opts)
238 finally:
239 finally:
239 wlock.release()
240 wlock.release()
240
241
241 return commit(ui, repo, recordinwlock, pats, opts)
242 return commit(ui, repo, recordinwlock, pats, opts)
242
243
243 def findpossible(cmd, table, strict=False):
244 def findpossible(cmd, table, strict=False):
244 """
245 """
245 Return cmd -> (aliases, command table entry)
246 Return cmd -> (aliases, command table entry)
246 for each matching command.
247 for each matching command.
247 Return debug commands (or their aliases) only if no normal command matches.
248 Return debug commands (or their aliases) only if no normal command matches.
248 """
249 """
249 choice = {}
250 choice = {}
250 debugchoice = {}
251 debugchoice = {}
251
252
252 if cmd in table:
253 if cmd in table:
253 # short-circuit exact matches, "log" alias beats "^log|history"
254 # short-circuit exact matches, "log" alias beats "^log|history"
254 keys = [cmd]
255 keys = [cmd]
255 else:
256 else:
256 keys = table.keys()
257 keys = table.keys()
257
258
258 allcmds = []
259 allcmds = []
259 for e in keys:
260 for e in keys:
260 aliases = parsealiases(e)
261 aliases = parsealiases(e)
261 allcmds.extend(aliases)
262 allcmds.extend(aliases)
262 found = None
263 found = None
263 if cmd in aliases:
264 if cmd in aliases:
264 found = cmd
265 found = cmd
265 elif not strict:
266 elif not strict:
266 for a in aliases:
267 for a in aliases:
267 if a.startswith(cmd):
268 if a.startswith(cmd):
268 found = a
269 found = a
269 break
270 break
270 if found is not None:
271 if found is not None:
271 if aliases[0].startswith("debug") or found.startswith("debug"):
272 if aliases[0].startswith("debug") or found.startswith("debug"):
272 debugchoice[found] = (aliases, table[e])
273 debugchoice[found] = (aliases, table[e])
273 else:
274 else:
274 choice[found] = (aliases, table[e])
275 choice[found] = (aliases, table[e])
275
276
276 if not choice and debugchoice:
277 if not choice and debugchoice:
277 choice = debugchoice
278 choice = debugchoice
278
279
279 return choice, allcmds
280 return choice, allcmds
280
281
281 def findcmd(cmd, table, strict=True):
282 def findcmd(cmd, table, strict=True):
282 """Return (aliases, command table entry) for command string."""
283 """Return (aliases, command table entry) for command string."""
283 choice, allcmds = findpossible(cmd, table, strict)
284 choice, allcmds = findpossible(cmd, table, strict)
284
285
285 if cmd in choice:
286 if cmd in choice:
286 return choice[cmd]
287 return choice[cmd]
287
288
288 if len(choice) > 1:
289 if len(choice) > 1:
289 clist = choice.keys()
290 clist = choice.keys()
290 clist.sort()
291 clist.sort()
291 raise error.AmbiguousCommand(cmd, clist)
292 raise error.AmbiguousCommand(cmd, clist)
292
293
293 if choice:
294 if choice:
294 return choice.values()[0]
295 return choice.values()[0]
295
296
296 raise error.UnknownCommand(cmd, allcmds)
297 raise error.UnknownCommand(cmd, allcmds)
297
298
298 def findrepo(p):
299 def findrepo(p):
299 while not os.path.isdir(os.path.join(p, ".hg")):
300 while not os.path.isdir(os.path.join(p, ".hg")):
300 oldp, p = p, os.path.dirname(p)
301 oldp, p = p, os.path.dirname(p)
301 if p == oldp:
302 if p == oldp:
302 return None
303 return None
303
304
304 return p
305 return p
305
306
306 def bailifchanged(repo, merge=True):
307 def bailifchanged(repo, merge=True):
307 if merge and repo.dirstate.p2() != nullid:
308 if merge and repo.dirstate.p2() != nullid:
308 raise error.Abort(_('outstanding uncommitted merge'))
309 raise error.Abort(_('outstanding uncommitted merge'))
309 modified, added, removed, deleted = repo.status()[:4]
310 modified, added, removed, deleted = repo.status()[:4]
310 if modified or added or removed or deleted:
311 if modified or added or removed or deleted:
311 raise error.Abort(_('uncommitted changes'))
312 raise error.Abort(_('uncommitted changes'))
312 ctx = repo[None]
313 ctx = repo[None]
313 for s in sorted(ctx.substate):
314 for s in sorted(ctx.substate):
314 ctx.sub(s).bailifchanged()
315 ctx.sub(s).bailifchanged()
315
316
316 def logmessage(ui, opts):
317 def logmessage(ui, opts):
317 """ get the log message according to -m and -l option """
318 """ get the log message according to -m and -l option """
318 message = opts.get('message')
319 message = opts.get('message')
319 logfile = opts.get('logfile')
320 logfile = opts.get('logfile')
320
321
321 if message and logfile:
322 if message and logfile:
322 raise error.Abort(_('options --message and --logfile are mutually '
323 raise error.Abort(_('options --message and --logfile are mutually '
323 'exclusive'))
324 'exclusive'))
324 if not message and logfile:
325 if not message and logfile:
325 try:
326 try:
326 if logfile == '-':
327 if logfile == '-':
327 message = ui.fin.read()
328 message = ui.fin.read()
328 else:
329 else:
329 message = '\n'.join(util.readfile(logfile).splitlines())
330 message = '\n'.join(util.readfile(logfile).splitlines())
330 except IOError as inst:
331 except IOError as inst:
331 raise error.Abort(_("can't read commit message '%s': %s") %
332 raise error.Abort(_("can't read commit message '%s': %s") %
332 (logfile, inst.strerror))
333 (logfile, inst.strerror))
333 return message
334 return message
334
335
335 def mergeeditform(ctxorbool, baseformname):
336 def mergeeditform(ctxorbool, baseformname):
336 """return appropriate editform name (referencing a committemplate)
337 """return appropriate editform name (referencing a committemplate)
337
338
338 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
339 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
339 merging is committed.
340 merging is committed.
340
341
341 This returns baseformname with '.merge' appended if it is a merge,
342 This returns baseformname with '.merge' appended if it is a merge,
342 otherwise '.normal' is appended.
343 otherwise '.normal' is appended.
343 """
344 """
344 if isinstance(ctxorbool, bool):
345 if isinstance(ctxorbool, bool):
345 if ctxorbool:
346 if ctxorbool:
346 return baseformname + ".merge"
347 return baseformname + ".merge"
347 elif 1 < len(ctxorbool.parents()):
348 elif 1 < len(ctxorbool.parents()):
348 return baseformname + ".merge"
349 return baseformname + ".merge"
349
350
350 return baseformname + ".normal"
351 return baseformname + ".normal"
351
352
352 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
353 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
353 editform='', **opts):
354 editform='', **opts):
354 """get appropriate commit message editor according to '--edit' option
355 """get appropriate commit message editor according to '--edit' option
355
356
356 'finishdesc' is a function to be called with edited commit message
357 'finishdesc' is a function to be called with edited commit message
357 (= 'description' of the new changeset) just after editing, but
358 (= 'description' of the new changeset) just after editing, but
358 before checking empty-ness. It should return actual text to be
359 before checking empty-ness. It should return actual text to be
359 stored into history. This allows to change description before
360 stored into history. This allows to change description before
360 storing.
361 storing.
361
362
362 'extramsg' is a extra message to be shown in the editor instead of
363 'extramsg' is a extra message to be shown in the editor instead of
363 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
364 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
364 is automatically added.
365 is automatically added.
365
366
366 'editform' is a dot-separated list of names, to distinguish
367 'editform' is a dot-separated list of names, to distinguish
367 the purpose of commit text editing.
368 the purpose of commit text editing.
368
369
369 'getcommiteditor' returns 'commitforceeditor' regardless of
370 'getcommiteditor' returns 'commitforceeditor' regardless of
370 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
371 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
371 they are specific for usage in MQ.
372 they are specific for usage in MQ.
372 """
373 """
373 if edit or finishdesc or extramsg:
374 if edit or finishdesc or extramsg:
374 return lambda r, c, s: commitforceeditor(r, c, s,
375 return lambda r, c, s: commitforceeditor(r, c, s,
375 finishdesc=finishdesc,
376 finishdesc=finishdesc,
376 extramsg=extramsg,
377 extramsg=extramsg,
377 editform=editform)
378 editform=editform)
378 elif editform:
379 elif editform:
379 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
380 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
380 else:
381 else:
381 return commiteditor
382 return commiteditor
382
383
383 def loglimit(opts):
384 def loglimit(opts):
384 """get the log limit according to option -l/--limit"""
385 """get the log limit according to option -l/--limit"""
385 limit = opts.get('limit')
386 limit = opts.get('limit')
386 if limit:
387 if limit:
387 try:
388 try:
388 limit = int(limit)
389 limit = int(limit)
389 except ValueError:
390 except ValueError:
390 raise error.Abort(_('limit must be a positive integer'))
391 raise error.Abort(_('limit must be a positive integer'))
391 if limit <= 0:
392 if limit <= 0:
392 raise error.Abort(_('limit must be positive'))
393 raise error.Abort(_('limit must be positive'))
393 else:
394 else:
394 limit = None
395 limit = None
395 return limit
396 return limit
396
397
397 def makefilename(repo, pat, node, desc=None,
398 def makefilename(repo, pat, node, desc=None,
398 total=None, seqno=None, revwidth=None, pathname=None):
399 total=None, seqno=None, revwidth=None, pathname=None):
399 node_expander = {
400 node_expander = {
400 'H': lambda: hex(node),
401 'H': lambda: hex(node),
401 'R': lambda: str(repo.changelog.rev(node)),
402 'R': lambda: str(repo.changelog.rev(node)),
402 'h': lambda: short(node),
403 'h': lambda: short(node),
403 'm': lambda: re.sub('[^\w]', '_', str(desc))
404 'm': lambda: re.sub('[^\w]', '_', str(desc))
404 }
405 }
405 expander = {
406 expander = {
406 '%': lambda: '%',
407 '%': lambda: '%',
407 'b': lambda: os.path.basename(repo.root),
408 'b': lambda: os.path.basename(repo.root),
408 }
409 }
409
410
410 try:
411 try:
411 if node:
412 if node:
412 expander.update(node_expander)
413 expander.update(node_expander)
413 if node:
414 if node:
414 expander['r'] = (lambda:
415 expander['r'] = (lambda:
415 str(repo.changelog.rev(node)).zfill(revwidth or 0))
416 str(repo.changelog.rev(node)).zfill(revwidth or 0))
416 if total is not None:
417 if total is not None:
417 expander['N'] = lambda: str(total)
418 expander['N'] = lambda: str(total)
418 if seqno is not None:
419 if seqno is not None:
419 expander['n'] = lambda: str(seqno)
420 expander['n'] = lambda: str(seqno)
420 if total is not None and seqno is not None:
421 if total is not None and seqno is not None:
421 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
422 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
422 if pathname is not None:
423 if pathname is not None:
423 expander['s'] = lambda: os.path.basename(pathname)
424 expander['s'] = lambda: os.path.basename(pathname)
424 expander['d'] = lambda: os.path.dirname(pathname) or '.'
425 expander['d'] = lambda: os.path.dirname(pathname) or '.'
425 expander['p'] = lambda: pathname
426 expander['p'] = lambda: pathname
426
427
427 newname = []
428 newname = []
428 patlen = len(pat)
429 patlen = len(pat)
429 i = 0
430 i = 0
430 while i < patlen:
431 while i < patlen:
431 c = pat[i]
432 c = pat[i]
432 if c == '%':
433 if c == '%':
433 i += 1
434 i += 1
434 c = pat[i]
435 c = pat[i]
435 c = expander[c]()
436 c = expander[c]()
436 newname.append(c)
437 newname.append(c)
437 i += 1
438 i += 1
438 return ''.join(newname)
439 return ''.join(newname)
439 except KeyError as inst:
440 except KeyError as inst:
440 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
441 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
441 inst.args[0])
442 inst.args[0])
442
443
443 def makefileobj(repo, pat, node=None, desc=None, total=None,
444 def makefileobj(repo, pat, node=None, desc=None, total=None,
444 seqno=None, revwidth=None, mode='wb', modemap=None,
445 seqno=None, revwidth=None, mode='wb', modemap=None,
445 pathname=None):
446 pathname=None):
446
447
447 writable = mode not in ('r', 'rb')
448 writable = mode not in ('r', 'rb')
448
449
449 if not pat or pat == '-':
450 if not pat or pat == '-':
450 if writable:
451 if writable:
451 fp = repo.ui.fout
452 fp = repo.ui.fout
452 else:
453 else:
453 fp = repo.ui.fin
454 fp = repo.ui.fin
454 if util.safehasattr(fp, 'fileno'):
455 if util.safehasattr(fp, 'fileno'):
455 return os.fdopen(os.dup(fp.fileno()), mode)
456 return os.fdopen(os.dup(fp.fileno()), mode)
456 else:
457 else:
457 # if this fp can't be duped properly, return
458 # if this fp can't be duped properly, return
458 # a dummy object that can be closed
459 # a dummy object that can be closed
459 class wrappedfileobj(object):
460 class wrappedfileobj(object):
460 noop = lambda x: None
461 noop = lambda x: None
461 def __init__(self, f):
462 def __init__(self, f):
462 self.f = f
463 self.f = f
463 def __getattr__(self, attr):
464 def __getattr__(self, attr):
464 if attr == 'close':
465 if attr == 'close':
465 return self.noop
466 return self.noop
466 else:
467 else:
467 return getattr(self.f, attr)
468 return getattr(self.f, attr)
468
469
469 return wrappedfileobj(fp)
470 return wrappedfileobj(fp)
470 if util.safehasattr(pat, 'write') and writable:
471 if util.safehasattr(pat, 'write') and writable:
471 return pat
472 return pat
472 if util.safehasattr(pat, 'read') and 'r' in mode:
473 if util.safehasattr(pat, 'read') and 'r' in mode:
473 return pat
474 return pat
474 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
475 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
475 if modemap is not None:
476 if modemap is not None:
476 mode = modemap.get(fn, mode)
477 mode = modemap.get(fn, mode)
477 if mode == 'wb':
478 if mode == 'wb':
478 modemap[fn] = 'ab'
479 modemap[fn] = 'ab'
479 return open(fn, mode)
480 return open(fn, mode)
480
481
481 def openrevlog(repo, cmd, file_, opts):
482 def openrevlog(repo, cmd, file_, opts):
482 """opens the changelog, manifest, a filelog or a given revlog"""
483 """opens the changelog, manifest, a filelog or a given revlog"""
483 cl = opts['changelog']
484 cl = opts['changelog']
484 mf = opts['manifest']
485 mf = opts['manifest']
485 dir = opts['dir']
486 dir = opts['dir']
486 msg = None
487 msg = None
487 if cl and mf:
488 if cl and mf:
488 msg = _('cannot specify --changelog and --manifest at the same time')
489 msg = _('cannot specify --changelog and --manifest at the same time')
489 elif cl and dir:
490 elif cl and dir:
490 msg = _('cannot specify --changelog and --dir at the same time')
491 msg = _('cannot specify --changelog and --dir at the same time')
491 elif cl or mf:
492 elif cl or mf:
492 if file_:
493 if file_:
493 msg = _('cannot specify filename with --changelog or --manifest')
494 msg = _('cannot specify filename with --changelog or --manifest')
494 elif not repo:
495 elif not repo:
495 msg = _('cannot specify --changelog or --manifest or --dir '
496 msg = _('cannot specify --changelog or --manifest or --dir '
496 'without a repository')
497 'without a repository')
497 if msg:
498 if msg:
498 raise error.Abort(msg)
499 raise error.Abort(msg)
499
500
500 r = None
501 r = None
501 if repo:
502 if repo:
502 if cl:
503 if cl:
503 r = repo.unfiltered().changelog
504 r = repo.unfiltered().changelog
504 elif dir:
505 elif dir:
505 if 'treemanifest' not in repo.requirements:
506 if 'treemanifest' not in repo.requirements:
506 raise error.Abort(_("--dir can only be used on repos with "
507 raise error.Abort(_("--dir can only be used on repos with "
507 "treemanifest enabled"))
508 "treemanifest enabled"))
508 dirlog = repo.dirlog(file_)
509 dirlog = repo.dirlog(file_)
509 if len(dirlog):
510 if len(dirlog):
510 r = dirlog
511 r = dirlog
511 elif mf:
512 elif mf:
512 r = repo.manifest
513 r = repo.manifest
513 elif file_:
514 elif file_:
514 filelog = repo.file(file_)
515 filelog = repo.file(file_)
515 if len(filelog):
516 if len(filelog):
516 r = filelog
517 r = filelog
517 if not r:
518 if not r:
518 if not file_:
519 if not file_:
519 raise error.CommandError(cmd, _('invalid arguments'))
520 raise error.CommandError(cmd, _('invalid arguments'))
520 if not os.path.isfile(file_):
521 if not os.path.isfile(file_):
521 raise error.Abort(_("revlog '%s' not found") % file_)
522 raise error.Abort(_("revlog '%s' not found") % file_)
522 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
523 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
523 file_[:-2] + ".i")
524 file_[:-2] + ".i")
524 return r
525 return r
525
526
526 def copy(ui, repo, pats, opts, rename=False):
527 def copy(ui, repo, pats, opts, rename=False):
527 # called with the repo lock held
528 # called with the repo lock held
528 #
529 #
529 # hgsep => pathname that uses "/" to separate directories
530 # hgsep => pathname that uses "/" to separate directories
530 # ossep => pathname that uses os.sep to separate directories
531 # ossep => pathname that uses os.sep to separate directories
531 cwd = repo.getcwd()
532 cwd = repo.getcwd()
532 targets = {}
533 targets = {}
533 after = opts.get("after")
534 after = opts.get("after")
534 dryrun = opts.get("dry_run")
535 dryrun = opts.get("dry_run")
535 wctx = repo[None]
536 wctx = repo[None]
536
537
537 def walkpat(pat):
538 def walkpat(pat):
538 srcs = []
539 srcs = []
539 if after:
540 if after:
540 badstates = '?'
541 badstates = '?'
541 else:
542 else:
542 badstates = '?r'
543 badstates = '?r'
543 m = scmutil.match(repo[None], [pat], opts, globbed=True)
544 m = scmutil.match(repo[None], [pat], opts, globbed=True)
544 for abs in repo.walk(m):
545 for abs in repo.walk(m):
545 state = repo.dirstate[abs]
546 state = repo.dirstate[abs]
546 rel = m.rel(abs)
547 rel = m.rel(abs)
547 exact = m.exact(abs)
548 exact = m.exact(abs)
548 if state in badstates:
549 if state in badstates:
549 if exact and state == '?':
550 if exact and state == '?':
550 ui.warn(_('%s: not copying - file is not managed\n') % rel)
551 ui.warn(_('%s: not copying - file is not managed\n') % rel)
551 if exact and state == 'r':
552 if exact and state == 'r':
552 ui.warn(_('%s: not copying - file has been marked for'
553 ui.warn(_('%s: not copying - file has been marked for'
553 ' remove\n') % rel)
554 ' remove\n') % rel)
554 continue
555 continue
555 # abs: hgsep
556 # abs: hgsep
556 # rel: ossep
557 # rel: ossep
557 srcs.append((abs, rel, exact))
558 srcs.append((abs, rel, exact))
558 return srcs
559 return srcs
559
560
560 # abssrc: hgsep
561 # abssrc: hgsep
561 # relsrc: ossep
562 # relsrc: ossep
562 # otarget: ossep
563 # otarget: ossep
563 def copyfile(abssrc, relsrc, otarget, exact):
564 def copyfile(abssrc, relsrc, otarget, exact):
564 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
565 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
565 if '/' in abstarget:
566 if '/' in abstarget:
566 # We cannot normalize abstarget itself, this would prevent
567 # We cannot normalize abstarget itself, this would prevent
567 # case only renames, like a => A.
568 # case only renames, like a => A.
568 abspath, absname = abstarget.rsplit('/', 1)
569 abspath, absname = abstarget.rsplit('/', 1)
569 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
570 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
570 reltarget = repo.pathto(abstarget, cwd)
571 reltarget = repo.pathto(abstarget, cwd)
571 target = repo.wjoin(abstarget)
572 target = repo.wjoin(abstarget)
572 src = repo.wjoin(abssrc)
573 src = repo.wjoin(abssrc)
573 state = repo.dirstate[abstarget]
574 state = repo.dirstate[abstarget]
574
575
575 scmutil.checkportable(ui, abstarget)
576 scmutil.checkportable(ui, abstarget)
576
577
577 # check for collisions
578 # check for collisions
578 prevsrc = targets.get(abstarget)
579 prevsrc = targets.get(abstarget)
579 if prevsrc is not None:
580 if prevsrc is not None:
580 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
581 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
581 (reltarget, repo.pathto(abssrc, cwd),
582 (reltarget, repo.pathto(abssrc, cwd),
582 repo.pathto(prevsrc, cwd)))
583 repo.pathto(prevsrc, cwd)))
583 return
584 return
584
585
585 # check for overwrites
586 # check for overwrites
586 exists = os.path.lexists(target)
587 exists = os.path.lexists(target)
587 samefile = False
588 samefile = False
588 if exists and abssrc != abstarget:
589 if exists and abssrc != abstarget:
589 if (repo.dirstate.normalize(abssrc) ==
590 if (repo.dirstate.normalize(abssrc) ==
590 repo.dirstate.normalize(abstarget)):
591 repo.dirstate.normalize(abstarget)):
591 if not rename:
592 if not rename:
592 ui.warn(_("%s: can't copy - same file\n") % reltarget)
593 ui.warn(_("%s: can't copy - same file\n") % reltarget)
593 return
594 return
594 exists = False
595 exists = False
595 samefile = True
596 samefile = True
596
597
597 if not after and exists or after and state in 'mn':
598 if not after and exists or after and state in 'mn':
598 if not opts['force']:
599 if not opts['force']:
599 ui.warn(_('%s: not overwriting - file exists\n') %
600 ui.warn(_('%s: not overwriting - file exists\n') %
600 reltarget)
601 reltarget)
601 return
602 return
602
603
603 if after:
604 if after:
604 if not exists:
605 if not exists:
605 if rename:
606 if rename:
606 ui.warn(_('%s: not recording move - %s does not exist\n') %
607 ui.warn(_('%s: not recording move - %s does not exist\n') %
607 (relsrc, reltarget))
608 (relsrc, reltarget))
608 else:
609 else:
609 ui.warn(_('%s: not recording copy - %s does not exist\n') %
610 ui.warn(_('%s: not recording copy - %s does not exist\n') %
610 (relsrc, reltarget))
611 (relsrc, reltarget))
611 return
612 return
612 elif not dryrun:
613 elif not dryrun:
613 try:
614 try:
614 if exists:
615 if exists:
615 os.unlink(target)
616 os.unlink(target)
616 targetdir = os.path.dirname(target) or '.'
617 targetdir = os.path.dirname(target) or '.'
617 if not os.path.isdir(targetdir):
618 if not os.path.isdir(targetdir):
618 os.makedirs(targetdir)
619 os.makedirs(targetdir)
619 if samefile:
620 if samefile:
620 tmp = target + "~hgrename"
621 tmp = target + "~hgrename"
621 os.rename(src, tmp)
622 os.rename(src, tmp)
622 os.rename(tmp, target)
623 os.rename(tmp, target)
623 else:
624 else:
624 util.copyfile(src, target)
625 util.copyfile(src, target)
625 srcexists = True
626 srcexists = True
626 except IOError as inst:
627 except IOError as inst:
627 if inst.errno == errno.ENOENT:
628 if inst.errno == errno.ENOENT:
628 ui.warn(_('%s: deleted in working directory\n') % relsrc)
629 ui.warn(_('%s: deleted in working directory\n') % relsrc)
629 srcexists = False
630 srcexists = False
630 else:
631 else:
631 ui.warn(_('%s: cannot copy - %s\n') %
632 ui.warn(_('%s: cannot copy - %s\n') %
632 (relsrc, inst.strerror))
633 (relsrc, inst.strerror))
633 return True # report a failure
634 return True # report a failure
634
635
635 if ui.verbose or not exact:
636 if ui.verbose or not exact:
636 if rename:
637 if rename:
637 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
638 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
638 else:
639 else:
639 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
640 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
640
641
641 targets[abstarget] = abssrc
642 targets[abstarget] = abssrc
642
643
643 # fix up dirstate
644 # fix up dirstate
644 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
645 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
645 dryrun=dryrun, cwd=cwd)
646 dryrun=dryrun, cwd=cwd)
646 if rename and not dryrun:
647 if rename and not dryrun:
647 if not after and srcexists and not samefile:
648 if not after and srcexists and not samefile:
648 util.unlinkpath(repo.wjoin(abssrc))
649 util.unlinkpath(repo.wjoin(abssrc))
649 wctx.forget([abssrc])
650 wctx.forget([abssrc])
650
651
651 # pat: ossep
652 # pat: ossep
652 # dest ossep
653 # dest ossep
653 # srcs: list of (hgsep, hgsep, ossep, bool)
654 # srcs: list of (hgsep, hgsep, ossep, bool)
654 # return: function that takes hgsep and returns ossep
655 # return: function that takes hgsep and returns ossep
655 def targetpathfn(pat, dest, srcs):
656 def targetpathfn(pat, dest, srcs):
656 if os.path.isdir(pat):
657 if os.path.isdir(pat):
657 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 abspfx = util.localpath(abspfx)
659 abspfx = util.localpath(abspfx)
659 if destdirexists:
660 if destdirexists:
660 striplen = len(os.path.split(abspfx)[0])
661 striplen = len(os.path.split(abspfx)[0])
661 else:
662 else:
662 striplen = len(abspfx)
663 striplen = len(abspfx)
663 if striplen:
664 if striplen:
664 striplen += len(os.sep)
665 striplen += len(os.sep)
665 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
666 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
666 elif destdirexists:
667 elif destdirexists:
667 res = lambda p: os.path.join(dest,
668 res = lambda p: os.path.join(dest,
668 os.path.basename(util.localpath(p)))
669 os.path.basename(util.localpath(p)))
669 else:
670 else:
670 res = lambda p: dest
671 res = lambda p: dest
671 return res
672 return res
672
673
673 # pat: ossep
674 # pat: ossep
674 # dest ossep
675 # dest ossep
675 # srcs: list of (hgsep, hgsep, ossep, bool)
676 # srcs: list of (hgsep, hgsep, ossep, bool)
676 # return: function that takes hgsep and returns ossep
677 # return: function that takes hgsep and returns ossep
677 def targetpathafterfn(pat, dest, srcs):
678 def targetpathafterfn(pat, dest, srcs):
678 if matchmod.patkind(pat):
679 if matchmod.patkind(pat):
679 # a mercurial pattern
680 # a mercurial pattern
680 res = lambda p: os.path.join(dest,
681 res = lambda p: os.path.join(dest,
681 os.path.basename(util.localpath(p)))
682 os.path.basename(util.localpath(p)))
682 else:
683 else:
683 abspfx = pathutil.canonpath(repo.root, cwd, pat)
684 abspfx = pathutil.canonpath(repo.root, cwd, pat)
684 if len(abspfx) < len(srcs[0][0]):
685 if len(abspfx) < len(srcs[0][0]):
685 # A directory. Either the target path contains the last
686 # A directory. Either the target path contains the last
686 # component of the source path or it does not.
687 # component of the source path or it does not.
687 def evalpath(striplen):
688 def evalpath(striplen):
688 score = 0
689 score = 0
689 for s in srcs:
690 for s in srcs:
690 t = os.path.join(dest, util.localpath(s[0])[striplen:])
691 t = os.path.join(dest, util.localpath(s[0])[striplen:])
691 if os.path.lexists(t):
692 if os.path.lexists(t):
692 score += 1
693 score += 1
693 return score
694 return score
694
695
695 abspfx = util.localpath(abspfx)
696 abspfx = util.localpath(abspfx)
696 striplen = len(abspfx)
697 striplen = len(abspfx)
697 if striplen:
698 if striplen:
698 striplen += len(os.sep)
699 striplen += len(os.sep)
699 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
700 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
700 score = evalpath(striplen)
701 score = evalpath(striplen)
701 striplen1 = len(os.path.split(abspfx)[0])
702 striplen1 = len(os.path.split(abspfx)[0])
702 if striplen1:
703 if striplen1:
703 striplen1 += len(os.sep)
704 striplen1 += len(os.sep)
704 if evalpath(striplen1) > score:
705 if evalpath(striplen1) > score:
705 striplen = striplen1
706 striplen = striplen1
706 res = lambda p: os.path.join(dest,
707 res = lambda p: os.path.join(dest,
707 util.localpath(p)[striplen:])
708 util.localpath(p)[striplen:])
708 else:
709 else:
709 # a file
710 # a file
710 if destdirexists:
711 if destdirexists:
711 res = lambda p: os.path.join(dest,
712 res = lambda p: os.path.join(dest,
712 os.path.basename(util.localpath(p)))
713 os.path.basename(util.localpath(p)))
713 else:
714 else:
714 res = lambda p: dest
715 res = lambda p: dest
715 return res
716 return res
716
717
717 pats = scmutil.expandpats(pats)
718 pats = scmutil.expandpats(pats)
718 if not pats:
719 if not pats:
719 raise error.Abort(_('no source or destination specified'))
720 raise error.Abort(_('no source or destination specified'))
720 if len(pats) == 1:
721 if len(pats) == 1:
721 raise error.Abort(_('no destination specified'))
722 raise error.Abort(_('no destination specified'))
722 dest = pats.pop()
723 dest = pats.pop()
723 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
724 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
724 if not destdirexists:
725 if not destdirexists:
725 if len(pats) > 1 or matchmod.patkind(pats[0]):
726 if len(pats) > 1 or matchmod.patkind(pats[0]):
726 raise error.Abort(_('with multiple sources, destination must be an '
727 raise error.Abort(_('with multiple sources, destination must be an '
727 'existing directory'))
728 'existing directory'))
728 if util.endswithsep(dest):
729 if util.endswithsep(dest):
729 raise error.Abort(_('destination %s is not a directory') % dest)
730 raise error.Abort(_('destination %s is not a directory') % dest)
730
731
731 tfn = targetpathfn
732 tfn = targetpathfn
732 if after:
733 if after:
733 tfn = targetpathafterfn
734 tfn = targetpathafterfn
734 copylist = []
735 copylist = []
735 for pat in pats:
736 for pat in pats:
736 srcs = walkpat(pat)
737 srcs = walkpat(pat)
737 if not srcs:
738 if not srcs:
738 continue
739 continue
739 copylist.append((tfn(pat, dest, srcs), srcs))
740 copylist.append((tfn(pat, dest, srcs), srcs))
740 if not copylist:
741 if not copylist:
741 raise error.Abort(_('no files to copy'))
742 raise error.Abort(_('no files to copy'))
742
743
743 errors = 0
744 errors = 0
744 for targetpath, srcs in copylist:
745 for targetpath, srcs in copylist:
745 for abssrc, relsrc, exact in srcs:
746 for abssrc, relsrc, exact in srcs:
746 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
747 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
747 errors += 1
748 errors += 1
748
749
749 if errors:
750 if errors:
750 ui.warn(_('(consider using --after)\n'))
751 ui.warn(_('(consider using --after)\n'))
751
752
752 return errors != 0
753 return errors != 0
753
754
754 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
755 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
755 runargs=None, appendpid=False):
756 runargs=None, appendpid=False):
756 '''Run a command as a service.'''
757 '''Run a command as a service.'''
757
758
758 def writepid(pid):
759 def writepid(pid):
759 if opts['pid_file']:
760 if opts['pid_file']:
760 if appendpid:
761 if appendpid:
761 mode = 'a'
762 mode = 'a'
762 else:
763 else:
763 mode = 'w'
764 mode = 'w'
764 fp = open(opts['pid_file'], mode)
765 fp = open(opts['pid_file'], mode)
765 fp.write(str(pid) + '\n')
766 fp.write(str(pid) + '\n')
766 fp.close()
767 fp.close()
767
768
768 if opts['daemon'] and not opts['daemon_pipefds']:
769 if opts['daemon'] and not opts['daemon_pipefds']:
769 # Signal child process startup with file removal
770 # Signal child process startup with file removal
770 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
771 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
771 os.close(lockfd)
772 os.close(lockfd)
772 try:
773 try:
773 if not runargs:
774 if not runargs:
774 runargs = util.hgcmd() + sys.argv[1:]
775 runargs = util.hgcmd() + sys.argv[1:]
775 runargs.append('--daemon-pipefds=%s' % lockpath)
776 runargs.append('--daemon-pipefds=%s' % lockpath)
776 # Don't pass --cwd to the child process, because we've already
777 # Don't pass --cwd to the child process, because we've already
777 # changed directory.
778 # changed directory.
778 for i in xrange(1, len(runargs)):
779 for i in xrange(1, len(runargs)):
779 if runargs[i].startswith('--cwd='):
780 if runargs[i].startswith('--cwd='):
780 del runargs[i]
781 del runargs[i]
781 break
782 break
782 elif runargs[i].startswith('--cwd'):
783 elif runargs[i].startswith('--cwd'):
783 del runargs[i:i + 2]
784 del runargs[i:i + 2]
784 break
785 break
785 def condfn():
786 def condfn():
786 return not os.path.exists(lockpath)
787 return not os.path.exists(lockpath)
787 pid = util.rundetached(runargs, condfn)
788 pid = util.rundetached(runargs, condfn)
788 if pid < 0:
789 if pid < 0:
789 raise error.Abort(_('child process failed to start'))
790 raise error.Abort(_('child process failed to start'))
790 writepid(pid)
791 writepid(pid)
791 finally:
792 finally:
792 try:
793 try:
793 os.unlink(lockpath)
794 os.unlink(lockpath)
794 except OSError as e:
795 except OSError as e:
795 if e.errno != errno.ENOENT:
796 if e.errno != errno.ENOENT:
796 raise
797 raise
797 if parentfn:
798 if parentfn:
798 return parentfn(pid)
799 return parentfn(pid)
799 else:
800 else:
800 return
801 return
801
802
802 if initfn:
803 if initfn:
803 initfn()
804 initfn()
804
805
805 if not opts['daemon']:
806 if not opts['daemon']:
806 writepid(os.getpid())
807 writepid(os.getpid())
807
808
808 if opts['daemon_pipefds']:
809 if opts['daemon_pipefds']:
809 lockpath = opts['daemon_pipefds']
810 lockpath = opts['daemon_pipefds']
810 try:
811 try:
811 os.setsid()
812 os.setsid()
812 except AttributeError:
813 except AttributeError:
813 pass
814 pass
814 os.unlink(lockpath)
815 os.unlink(lockpath)
815 util.hidewindow()
816 util.hidewindow()
816 sys.stdout.flush()
817 sys.stdout.flush()
817 sys.stderr.flush()
818 sys.stderr.flush()
818
819
819 nullfd = os.open(os.devnull, os.O_RDWR)
820 nullfd = os.open(os.devnull, os.O_RDWR)
820 logfilefd = nullfd
821 logfilefd = nullfd
821 if logfile:
822 if logfile:
822 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
823 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
823 os.dup2(nullfd, 0)
824 os.dup2(nullfd, 0)
824 os.dup2(logfilefd, 1)
825 os.dup2(logfilefd, 1)
825 os.dup2(logfilefd, 2)
826 os.dup2(logfilefd, 2)
826 if nullfd not in (0, 1, 2):
827 if nullfd not in (0, 1, 2):
827 os.close(nullfd)
828 os.close(nullfd)
828 if logfile and logfilefd not in (0, 1, 2):
829 if logfile and logfilefd not in (0, 1, 2):
829 os.close(logfilefd)
830 os.close(logfilefd)
830
831
831 if runfn:
832 if runfn:
832 return runfn()
833 return runfn()
833
834
834 ## facility to let extension process additional data into an import patch
835 ## facility to let extension process additional data into an import patch
835 # list of identifier to be executed in order
836 # list of identifier to be executed in order
836 extrapreimport = [] # run before commit
837 extrapreimport = [] # run before commit
837 extrapostimport = [] # run after commit
838 extrapostimport = [] # run after commit
838 # mapping from identifier to actual import function
839 # mapping from identifier to actual import function
839 #
840 #
840 # 'preimport' are run before the commit is made and are provided the following
841 # 'preimport' are run before the commit is made and are provided the following
841 # arguments:
842 # arguments:
842 # - repo: the localrepository instance,
843 # - repo: the localrepository instance,
843 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
844 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
844 # - extra: the future extra dictionary of the changeset, please mutate it,
845 # - extra: the future extra dictionary of the changeset, please mutate it,
845 # - opts: the import options.
846 # - opts: the import options.
846 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
847 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
847 # mutation of in memory commit and more. Feel free to rework the code to get
848 # mutation of in memory commit and more. Feel free to rework the code to get
848 # there.
849 # there.
849 extrapreimportmap = {}
850 extrapreimportmap = {}
850 # 'postimport' are run after the commit is made and are provided the following
851 # 'postimport' are run after the commit is made and are provided the following
851 # argument:
852 # argument:
852 # - ctx: the changectx created by import.
853 # - ctx: the changectx created by import.
853 extrapostimportmap = {}
854 extrapostimportmap = {}
854
855
855 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
856 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
856 """Utility function used by commands.import to import a single patch
857 """Utility function used by commands.import to import a single patch
857
858
858 This function is explicitly defined here to help the evolve extension to
859 This function is explicitly defined here to help the evolve extension to
859 wrap this part of the import logic.
860 wrap this part of the import logic.
860
861
861 The API is currently a bit ugly because it a simple code translation from
862 The API is currently a bit ugly because it a simple code translation from
862 the import command. Feel free to make it better.
863 the import command. Feel free to make it better.
863
864
864 :hunk: a patch (as a binary string)
865 :hunk: a patch (as a binary string)
865 :parents: nodes that will be parent of the created commit
866 :parents: nodes that will be parent of the created commit
866 :opts: the full dict of option passed to the import command
867 :opts: the full dict of option passed to the import command
867 :msgs: list to save commit message to.
868 :msgs: list to save commit message to.
868 (used in case we need to save it when failing)
869 (used in case we need to save it when failing)
869 :updatefunc: a function that update a repo to a given node
870 :updatefunc: a function that update a repo to a given node
870 updatefunc(<repo>, <node>)
871 updatefunc(<repo>, <node>)
871 """
872 """
872 # avoid cycle context -> subrepo -> cmdutil
873 # avoid cycle context -> subrepo -> cmdutil
873 import context
874 import context
874 extractdata = patch.extract(ui, hunk)
875 extractdata = patch.extract(ui, hunk)
875 tmpname = extractdata.get('filename')
876 tmpname = extractdata.get('filename')
876 message = extractdata.get('message')
877 message = extractdata.get('message')
877 user = extractdata.get('user')
878 user = extractdata.get('user')
878 date = extractdata.get('date')
879 date = extractdata.get('date')
879 branch = extractdata.get('branch')
880 branch = extractdata.get('branch')
880 nodeid = extractdata.get('nodeid')
881 nodeid = extractdata.get('nodeid')
881 p1 = extractdata.get('p1')
882 p1 = extractdata.get('p1')
882 p2 = extractdata.get('p2')
883 p2 = extractdata.get('p2')
883
884
884 update = not opts.get('bypass')
885 update = not opts.get('bypass')
885 strip = opts["strip"]
886 strip = opts["strip"]
886 prefix = opts["prefix"]
887 prefix = opts["prefix"]
887 sim = float(opts.get('similarity') or 0)
888 sim = float(opts.get('similarity') or 0)
888 if not tmpname:
889 if not tmpname:
889 return (None, None, False)
890 return (None, None, False)
890 msg = _('applied to working directory')
891 msg = _('applied to working directory')
891
892
892 rejects = False
893 rejects = False
893
894
894 try:
895 try:
895 cmdline_message = logmessage(ui, opts)
896 cmdline_message = logmessage(ui, opts)
896 if cmdline_message:
897 if cmdline_message:
897 # pickup the cmdline msg
898 # pickup the cmdline msg
898 message = cmdline_message
899 message = cmdline_message
899 elif message:
900 elif message:
900 # pickup the patch msg
901 # pickup the patch msg
901 message = message.strip()
902 message = message.strip()
902 else:
903 else:
903 # launch the editor
904 # launch the editor
904 message = None
905 message = None
905 ui.debug('message:\n%s\n' % message)
906 ui.debug('message:\n%s\n' % message)
906
907
907 if len(parents) == 1:
908 if len(parents) == 1:
908 parents.append(repo[nullid])
909 parents.append(repo[nullid])
909 if opts.get('exact'):
910 if opts.get('exact'):
910 if not nodeid or not p1:
911 if not nodeid or not p1:
911 raise error.Abort(_('not a Mercurial patch'))
912 raise error.Abort(_('not a Mercurial patch'))
912 p1 = repo[p1]
913 p1 = repo[p1]
913 p2 = repo[p2 or nullid]
914 p2 = repo[p2 or nullid]
914 elif p2:
915 elif p2:
915 try:
916 try:
916 p1 = repo[p1]
917 p1 = repo[p1]
917 p2 = repo[p2]
918 p2 = repo[p2]
918 # Without any options, consider p2 only if the
919 # Without any options, consider p2 only if the
919 # patch is being applied on top of the recorded
920 # patch is being applied on top of the recorded
920 # first parent.
921 # first parent.
921 if p1 != parents[0]:
922 if p1 != parents[0]:
922 p1 = parents[0]
923 p1 = parents[0]
923 p2 = repo[nullid]
924 p2 = repo[nullid]
924 except error.RepoError:
925 except error.RepoError:
925 p1, p2 = parents
926 p1, p2 = parents
926 if p2.node() == nullid:
927 if p2.node() == nullid:
927 ui.warn(_("warning: import the patch as a normal revision\n"
928 ui.warn(_("warning: import the patch as a normal revision\n"
928 "(use --exact to import the patch as a merge)\n"))
929 "(use --exact to import the patch as a merge)\n"))
929 else:
930 else:
930 p1, p2 = parents
931 p1, p2 = parents
931
932
932 n = None
933 n = None
933 if update:
934 if update:
934 if p1 != parents[0]:
935 if p1 != parents[0]:
935 updatefunc(repo, p1.node())
936 updatefunc(repo, p1.node())
936 if p2 != parents[1]:
937 if p2 != parents[1]:
937 repo.setparents(p1.node(), p2.node())
938 repo.setparents(p1.node(), p2.node())
938
939
939 if opts.get('exact') or opts.get('import_branch'):
940 if opts.get('exact') or opts.get('import_branch'):
940 repo.dirstate.setbranch(branch or 'default')
941 repo.dirstate.setbranch(branch or 'default')
941
942
942 partial = opts.get('partial', False)
943 partial = opts.get('partial', False)
943 files = set()
944 files = set()
944 try:
945 try:
945 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
946 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
946 files=files, eolmode=None, similarity=sim / 100.0)
947 files=files, eolmode=None, similarity=sim / 100.0)
947 except patch.PatchError as e:
948 except patch.PatchError as e:
948 if not partial:
949 if not partial:
949 raise error.Abort(str(e))
950 raise error.Abort(str(e))
950 if partial:
951 if partial:
951 rejects = True
952 rejects = True
952
953
953 files = list(files)
954 files = list(files)
954 if opts.get('no_commit'):
955 if opts.get('no_commit'):
955 if message:
956 if message:
956 msgs.append(message)
957 msgs.append(message)
957 else:
958 else:
958 if opts.get('exact') or p2:
959 if opts.get('exact') or p2:
959 # If you got here, you either use --force and know what
960 # If you got here, you either use --force and know what
960 # you are doing or used --exact or a merge patch while
961 # you are doing or used --exact or a merge patch while
961 # being updated to its first parent.
962 # being updated to its first parent.
962 m = None
963 m = None
963 else:
964 else:
964 m = scmutil.matchfiles(repo, files or [])
965 m = scmutil.matchfiles(repo, files or [])
965 editform = mergeeditform(repo[None], 'import.normal')
966 editform = mergeeditform(repo[None], 'import.normal')
966 if opts.get('exact'):
967 if opts.get('exact'):
967 editor = None
968 editor = None
968 else:
969 else:
969 editor = getcommiteditor(editform=editform, **opts)
970 editor = getcommiteditor(editform=editform, **opts)
970 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
971 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
971 extra = {}
972 extra = {}
972 for idfunc in extrapreimport:
973 for idfunc in extrapreimport:
973 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
974 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
974 try:
975 try:
975 if partial:
976 if partial:
976 repo.ui.setconfig('ui', 'allowemptycommit', True)
977 repo.ui.setconfig('ui', 'allowemptycommit', True)
977 n = repo.commit(message, opts.get('user') or user,
978 n = repo.commit(message, opts.get('user') or user,
978 opts.get('date') or date, match=m,
979 opts.get('date') or date, match=m,
979 editor=editor, extra=extra)
980 editor=editor, extra=extra)
980 for idfunc in extrapostimport:
981 for idfunc in extrapostimport:
981 extrapostimportmap[idfunc](repo[n])
982 extrapostimportmap[idfunc](repo[n])
982 finally:
983 finally:
983 repo.ui.restoreconfig(allowemptyback)
984 repo.ui.restoreconfig(allowemptyback)
984 else:
985 else:
985 if opts.get('exact') or opts.get('import_branch'):
986 if opts.get('exact') or opts.get('import_branch'):
986 branch = branch or 'default'
987 branch = branch or 'default'
987 else:
988 else:
988 branch = p1.branch()
989 branch = p1.branch()
989 store = patch.filestore()
990 store = patch.filestore()
990 try:
991 try:
991 files = set()
992 files = set()
992 try:
993 try:
993 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
994 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
994 files, eolmode=None)
995 files, eolmode=None)
995 except patch.PatchError as e:
996 except patch.PatchError as e:
996 raise error.Abort(str(e))
997 raise error.Abort(str(e))
997 if opts.get('exact'):
998 if opts.get('exact'):
998 editor = None
999 editor = None
999 else:
1000 else:
1000 editor = getcommiteditor(editform='import.bypass')
1001 editor = getcommiteditor(editform='import.bypass')
1001 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1002 memctx = context.makememctx(repo, (p1.node(), p2.node()),
1002 message,
1003 message,
1003 opts.get('user') or user,
1004 opts.get('user') or user,
1004 opts.get('date') or date,
1005 opts.get('date') or date,
1005 branch, files, store,
1006 branch, files, store,
1006 editor=editor)
1007 editor=editor)
1007 n = memctx.commit()
1008 n = memctx.commit()
1008 finally:
1009 finally:
1009 store.close()
1010 store.close()
1010 if opts.get('exact') and opts.get('no_commit'):
1011 if opts.get('exact') and opts.get('no_commit'):
1011 # --exact with --no-commit is still useful in that it does merge
1012 # --exact with --no-commit is still useful in that it does merge
1012 # and branch bits
1013 # and branch bits
1013 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1014 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1014 elif opts.get('exact') and hex(n) != nodeid:
1015 elif opts.get('exact') and hex(n) != nodeid:
1015 raise error.Abort(_('patch is damaged or loses information'))
1016 raise error.Abort(_('patch is damaged or loses information'))
1016 if n:
1017 if n:
1017 # i18n: refers to a short changeset id
1018 # i18n: refers to a short changeset id
1018 msg = _('created %s') % short(n)
1019 msg = _('created %s') % short(n)
1019 return (msg, n, rejects)
1020 return (msg, n, rejects)
1020 finally:
1021 finally:
1021 os.unlink(tmpname)
1022 os.unlink(tmpname)
1022
1023
1023 # facility to let extensions include additional data in an exported patch
1024 # facility to let extensions include additional data in an exported patch
1024 # list of identifiers to be executed in order
1025 # list of identifiers to be executed in order
1025 extraexport = []
1026 extraexport = []
1026 # mapping from identifier to actual export function
1027 # mapping from identifier to actual export function
1027 # function as to return a string to be added to the header or None
1028 # function as to return a string to be added to the header or None
1028 # it is given two arguments (sequencenumber, changectx)
1029 # it is given two arguments (sequencenumber, changectx)
1029 extraexportmap = {}
1030 extraexportmap = {}
1030
1031
1031 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1032 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
1032 opts=None, match=None):
1033 opts=None, match=None):
1033 '''export changesets as hg patches.'''
1034 '''export changesets as hg patches.'''
1034
1035
1035 total = len(revs)
1036 total = len(revs)
1036 revwidth = max([len(str(rev)) for rev in revs])
1037 revwidth = max([len(str(rev)) for rev in revs])
1037 filemode = {}
1038 filemode = {}
1038
1039
1039 def single(rev, seqno, fp):
1040 def single(rev, seqno, fp):
1040 ctx = repo[rev]
1041 ctx = repo[rev]
1041 node = ctx.node()
1042 node = ctx.node()
1042 parents = [p.node() for p in ctx.parents() if p]
1043 parents = [p.node() for p in ctx.parents() if p]
1043 branch = ctx.branch()
1044 branch = ctx.branch()
1044 if switch_parent:
1045 if switch_parent:
1045 parents.reverse()
1046 parents.reverse()
1046
1047
1047 if parents:
1048 if parents:
1048 prev = parents[0]
1049 prev = parents[0]
1049 else:
1050 else:
1050 prev = nullid
1051 prev = nullid
1051
1052
1052 shouldclose = False
1053 shouldclose = False
1053 if not fp and len(template) > 0:
1054 if not fp and len(template) > 0:
1054 desc_lines = ctx.description().rstrip().split('\n')
1055 desc_lines = ctx.description().rstrip().split('\n')
1055 desc = desc_lines[0] #Commit always has a first line.
1056 desc = desc_lines[0] #Commit always has a first line.
1056 fp = makefileobj(repo, template, node, desc=desc, total=total,
1057 fp = makefileobj(repo, template, node, desc=desc, total=total,
1057 seqno=seqno, revwidth=revwidth, mode='wb',
1058 seqno=seqno, revwidth=revwidth, mode='wb',
1058 modemap=filemode)
1059 modemap=filemode)
1059 if fp != template:
1060 if fp != template:
1060 shouldclose = True
1061 shouldclose = True
1061 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1062 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1062 repo.ui.note("%s\n" % fp.name)
1063 repo.ui.note("%s\n" % fp.name)
1063
1064
1064 if not fp:
1065 if not fp:
1065 write = repo.ui.write
1066 write = repo.ui.write
1066 else:
1067 else:
1067 def write(s, **kw):
1068 def write(s, **kw):
1068 fp.write(s)
1069 fp.write(s)
1069
1070
1070 write("# HG changeset patch\n")
1071 write("# HG changeset patch\n")
1071 write("# User %s\n" % ctx.user())
1072 write("# User %s\n" % ctx.user())
1072 write("# Date %d %d\n" % ctx.date())
1073 write("# Date %d %d\n" % ctx.date())
1073 write("# %s\n" % util.datestr(ctx.date()))
1074 write("# %s\n" % util.datestr(ctx.date()))
1074 if branch and branch != 'default':
1075 if branch and branch != 'default':
1075 write("# Branch %s\n" % branch)
1076 write("# Branch %s\n" % branch)
1076 write("# Node ID %s\n" % hex(node))
1077 write("# Node ID %s\n" % hex(node))
1077 write("# Parent %s\n" % hex(prev))
1078 write("# Parent %s\n" % hex(prev))
1078 if len(parents) > 1:
1079 if len(parents) > 1:
1079 write("# Parent %s\n" % hex(parents[1]))
1080 write("# Parent %s\n" % hex(parents[1]))
1080
1081
1081 for headerid in extraexport:
1082 for headerid in extraexport:
1082 header = extraexportmap[headerid](seqno, ctx)
1083 header = extraexportmap[headerid](seqno, ctx)
1083 if header is not None:
1084 if header is not None:
1084 write('# %s\n' % header)
1085 write('# %s\n' % header)
1085 write(ctx.description().rstrip())
1086 write(ctx.description().rstrip())
1086 write("\n\n")
1087 write("\n\n")
1087
1088
1088 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1089 for chunk, label in patch.diffui(repo, prev, node, match, opts=opts):
1089 write(chunk, label=label)
1090 write(chunk, label=label)
1090
1091
1091 if shouldclose:
1092 if shouldclose:
1092 fp.close()
1093 fp.close()
1093
1094
1094 for seqno, rev in enumerate(revs):
1095 for seqno, rev in enumerate(revs):
1095 single(rev, seqno + 1, fp)
1096 single(rev, seqno + 1, fp)
1096
1097
1097 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1098 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1098 changes=None, stat=False, fp=None, prefix='',
1099 changes=None, stat=False, fp=None, prefix='',
1099 root='', listsubrepos=False):
1100 root='', listsubrepos=False):
1100 '''show diff or diffstat.'''
1101 '''show diff or diffstat.'''
1101 if fp is None:
1102 if fp is None:
1102 write = ui.write
1103 write = ui.write
1103 else:
1104 else:
1104 def write(s, **kw):
1105 def write(s, **kw):
1105 fp.write(s)
1106 fp.write(s)
1106
1107
1107 if root:
1108 if root:
1108 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1109 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1109 else:
1110 else:
1110 relroot = ''
1111 relroot = ''
1111 if relroot != '':
1112 if relroot != '':
1112 # XXX relative roots currently don't work if the root is within a
1113 # XXX relative roots currently don't work if the root is within a
1113 # subrepo
1114 # subrepo
1114 uirelroot = match.uipath(relroot)
1115 uirelroot = match.uipath(relroot)
1115 relroot += '/'
1116 relroot += '/'
1116 for matchroot in match.files():
1117 for matchroot in match.files():
1117 if not matchroot.startswith(relroot):
1118 if not matchroot.startswith(relroot):
1118 ui.warn(_('warning: %s not inside relative root %s\n') % (
1119 ui.warn(_('warning: %s not inside relative root %s\n') % (
1119 match.uipath(matchroot), uirelroot))
1120 match.uipath(matchroot), uirelroot))
1120
1121
1121 if stat:
1122 if stat:
1122 diffopts = diffopts.copy(context=0)
1123 diffopts = diffopts.copy(context=0)
1123 width = 80
1124 width = 80
1124 if not ui.plain():
1125 if not ui.plain():
1125 width = ui.termwidth()
1126 width = ui.termwidth()
1126 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1127 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1127 prefix=prefix, relroot=relroot)
1128 prefix=prefix, relroot=relroot)
1128 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1129 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1129 width=width,
1130 width=width,
1130 git=diffopts.git):
1131 git=diffopts.git):
1131 write(chunk, label=label)
1132 write(chunk, label=label)
1132 else:
1133 else:
1133 for chunk, label in patch.diffui(repo, node1, node2, match,
1134 for chunk, label in patch.diffui(repo, node1, node2, match,
1134 changes, diffopts, prefix=prefix,
1135 changes, diffopts, prefix=prefix,
1135 relroot=relroot):
1136 relroot=relroot):
1136 write(chunk, label=label)
1137 write(chunk, label=label)
1137
1138
1138 if listsubrepos:
1139 if listsubrepos:
1139 ctx1 = repo[node1]
1140 ctx1 = repo[node1]
1140 ctx2 = repo[node2]
1141 ctx2 = repo[node2]
1141 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1142 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1142 tempnode2 = node2
1143 tempnode2 = node2
1143 try:
1144 try:
1144 if node2 is not None:
1145 if node2 is not None:
1145 tempnode2 = ctx2.substate[subpath][1]
1146 tempnode2 = ctx2.substate[subpath][1]
1146 except KeyError:
1147 except KeyError:
1147 # A subrepo that existed in node1 was deleted between node1 and
1148 # A subrepo that existed in node1 was deleted between node1 and
1148 # node2 (inclusive). Thus, ctx2's substate won't contain that
1149 # node2 (inclusive). Thus, ctx2's substate won't contain that
1149 # subpath. The best we can do is to ignore it.
1150 # subpath. The best we can do is to ignore it.
1150 tempnode2 = None
1151 tempnode2 = None
1151 submatch = matchmod.narrowmatcher(subpath, match)
1152 submatch = matchmod.narrowmatcher(subpath, match)
1152 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1153 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1153 stat=stat, fp=fp, prefix=prefix)
1154 stat=stat, fp=fp, prefix=prefix)
1154
1155
1155 class changeset_printer(object):
1156 class changeset_printer(object):
1156 '''show changeset information when templating not requested.'''
1157 '''show changeset information when templating not requested.'''
1157
1158
1158 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1159 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1159 self.ui = ui
1160 self.ui = ui
1160 self.repo = repo
1161 self.repo = repo
1161 self.buffered = buffered
1162 self.buffered = buffered
1162 self.matchfn = matchfn
1163 self.matchfn = matchfn
1163 self.diffopts = diffopts
1164 self.diffopts = diffopts
1164 self.header = {}
1165 self.header = {}
1165 self.hunk = {}
1166 self.hunk = {}
1166 self.lastheader = None
1167 self.lastheader = None
1167 self.footer = None
1168 self.footer = None
1168
1169
1169 def flush(self, ctx):
1170 def flush(self, ctx):
1170 rev = ctx.rev()
1171 rev = ctx.rev()
1171 if rev in self.header:
1172 if rev in self.header:
1172 h = self.header[rev]
1173 h = self.header[rev]
1173 if h != self.lastheader:
1174 if h != self.lastheader:
1174 self.lastheader = h
1175 self.lastheader = h
1175 self.ui.write(h)
1176 self.ui.write(h)
1176 del self.header[rev]
1177 del self.header[rev]
1177 if rev in self.hunk:
1178 if rev in self.hunk:
1178 self.ui.write(self.hunk[rev])
1179 self.ui.write(self.hunk[rev])
1179 del self.hunk[rev]
1180 del self.hunk[rev]
1180 return 1
1181 return 1
1181 return 0
1182 return 0
1182
1183
1183 def close(self):
1184 def close(self):
1184 if self.footer:
1185 if self.footer:
1185 self.ui.write(self.footer)
1186 self.ui.write(self.footer)
1186
1187
1187 def show(self, ctx, copies=None, matchfn=None, **props):
1188 def show(self, ctx, copies=None, matchfn=None, **props):
1188 if self.buffered:
1189 if self.buffered:
1189 self.ui.pushbuffer(labeled=True)
1190 self.ui.pushbuffer(labeled=True)
1190 self._show(ctx, copies, matchfn, props)
1191 self._show(ctx, copies, matchfn, props)
1191 self.hunk[ctx.rev()] = self.ui.popbuffer()
1192 self.hunk[ctx.rev()] = self.ui.popbuffer()
1192 else:
1193 else:
1193 self._show(ctx, copies, matchfn, props)
1194 self._show(ctx, copies, matchfn, props)
1194
1195
1195 def _show(self, ctx, copies, matchfn, props):
1196 def _show(self, ctx, copies, matchfn, props):
1196 '''show a single changeset or file revision'''
1197 '''show a single changeset or file revision'''
1197 changenode = ctx.node()
1198 changenode = ctx.node()
1198 rev = ctx.rev()
1199 rev = ctx.rev()
1199 if self.ui.debugflag:
1200 if self.ui.debugflag:
1200 hexfunc = hex
1201 hexfunc = hex
1201 else:
1202 else:
1202 hexfunc = short
1203 hexfunc = short
1203 # as of now, wctx.node() and wctx.rev() return None, but we want to
1204 # as of now, wctx.node() and wctx.rev() return None, but we want to
1204 # show the same values as {node} and {rev} templatekw
1205 # show the same values as {node} and {rev} templatekw
1205 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1206 revnode = (scmutil.intrev(rev), hexfunc(bin(ctx.hex())))
1206
1207
1207 if self.ui.quiet:
1208 if self.ui.quiet:
1208 self.ui.write("%d:%s\n" % revnode, label='log.node')
1209 self.ui.write("%d:%s\n" % revnode, label='log.node')
1209 return
1210 return
1210
1211
1211 date = util.datestr(ctx.date())
1212 date = util.datestr(ctx.date())
1212
1213
1213 # i18n: column positioning for "hg log"
1214 # i18n: column positioning for "hg log"
1214 self.ui.write(_("changeset: %d:%s\n") % revnode,
1215 self.ui.write(_("changeset: %d:%s\n") % revnode,
1215 label='log.changeset changeset.%s' % ctx.phasestr())
1216 label='log.changeset changeset.%s' % ctx.phasestr())
1216
1217
1217 # branches are shown first before any other names due to backwards
1218 # branches are shown first before any other names due to backwards
1218 # compatibility
1219 # compatibility
1219 branch = ctx.branch()
1220 branch = ctx.branch()
1220 # don't show the default branch name
1221 # don't show the default branch name
1221 if branch != 'default':
1222 if branch != 'default':
1222 # i18n: column positioning for "hg log"
1223 # i18n: column positioning for "hg log"
1223 self.ui.write(_("branch: %s\n") % branch,
1224 self.ui.write(_("branch: %s\n") % branch,
1224 label='log.branch')
1225 label='log.branch')
1225
1226
1226 for name, ns in self.repo.names.iteritems():
1227 for name, ns in self.repo.names.iteritems():
1227 # branches has special logic already handled above, so here we just
1228 # branches has special logic already handled above, so here we just
1228 # skip it
1229 # skip it
1229 if name == 'branches':
1230 if name == 'branches':
1230 continue
1231 continue
1231 # we will use the templatename as the color name since those two
1232 # we will use the templatename as the color name since those two
1232 # should be the same
1233 # should be the same
1233 for name in ns.names(self.repo, changenode):
1234 for name in ns.names(self.repo, changenode):
1234 self.ui.write(ns.logfmt % name,
1235 self.ui.write(ns.logfmt % name,
1235 label='log.%s' % ns.colorname)
1236 label='log.%s' % ns.colorname)
1236 if self.ui.debugflag:
1237 if self.ui.debugflag:
1237 # i18n: column positioning for "hg log"
1238 # i18n: column positioning for "hg log"
1238 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1239 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1239 label='log.phase')
1240 label='log.phase')
1240 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1241 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1241 label = 'log.parent changeset.%s' % pctx.phasestr()
1242 label = 'log.parent changeset.%s' % pctx.phasestr()
1242 # i18n: column positioning for "hg log"
1243 # i18n: column positioning for "hg log"
1243 self.ui.write(_("parent: %d:%s\n")
1244 self.ui.write(_("parent: %d:%s\n")
1244 % (pctx.rev(), hexfunc(pctx.node())),
1245 % (pctx.rev(), hexfunc(pctx.node())),
1245 label=label)
1246 label=label)
1246
1247
1247 if self.ui.debugflag and rev is not None:
1248 if self.ui.debugflag and rev is not None:
1248 mnode = ctx.manifestnode()
1249 mnode = ctx.manifestnode()
1249 # i18n: column positioning for "hg log"
1250 # i18n: column positioning for "hg log"
1250 self.ui.write(_("manifest: %d:%s\n") %
1251 self.ui.write(_("manifest: %d:%s\n") %
1251 (self.repo.manifest.rev(mnode), hex(mnode)),
1252 (self.repo.manifest.rev(mnode), hex(mnode)),
1252 label='ui.debug log.manifest')
1253 label='ui.debug log.manifest')
1253 # i18n: column positioning for "hg log"
1254 # i18n: column positioning for "hg log"
1254 self.ui.write(_("user: %s\n") % ctx.user(),
1255 self.ui.write(_("user: %s\n") % ctx.user(),
1255 label='log.user')
1256 label='log.user')
1256 # i18n: column positioning for "hg log"
1257 # i18n: column positioning for "hg log"
1257 self.ui.write(_("date: %s\n") % date,
1258 self.ui.write(_("date: %s\n") % date,
1258 label='log.date')
1259 label='log.date')
1259
1260
1260 if self.ui.debugflag:
1261 if self.ui.debugflag:
1261 files = ctx.p1().status(ctx)[:3]
1262 files = ctx.p1().status(ctx)[:3]
1262 for key, value in zip([# i18n: column positioning for "hg log"
1263 for key, value in zip([# i18n: column positioning for "hg log"
1263 _("files:"),
1264 _("files:"),
1264 # i18n: column positioning for "hg log"
1265 # i18n: column positioning for "hg log"
1265 _("files+:"),
1266 _("files+:"),
1266 # i18n: column positioning for "hg log"
1267 # i18n: column positioning for "hg log"
1267 _("files-:")], files):
1268 _("files-:")], files):
1268 if value:
1269 if value:
1269 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1270 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1270 label='ui.debug log.files')
1271 label='ui.debug log.files')
1271 elif ctx.files() and self.ui.verbose:
1272 elif ctx.files() and self.ui.verbose:
1272 # i18n: column positioning for "hg log"
1273 # i18n: column positioning for "hg log"
1273 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1274 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1274 label='ui.note log.files')
1275 label='ui.note log.files')
1275 if copies and self.ui.verbose:
1276 if copies and self.ui.verbose:
1276 copies = ['%s (%s)' % c for c in copies]
1277 copies = ['%s (%s)' % c for c in copies]
1277 # i18n: column positioning for "hg log"
1278 # i18n: column positioning for "hg log"
1278 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1279 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1279 label='ui.note log.copies')
1280 label='ui.note log.copies')
1280
1281
1281 extra = ctx.extra()
1282 extra = ctx.extra()
1282 if extra and self.ui.debugflag:
1283 if extra and self.ui.debugflag:
1283 for key, value in sorted(extra.items()):
1284 for key, value in sorted(extra.items()):
1284 # i18n: column positioning for "hg log"
1285 # i18n: column positioning for "hg log"
1285 self.ui.write(_("extra: %s=%s\n")
1286 self.ui.write(_("extra: %s=%s\n")
1286 % (key, value.encode('string_escape')),
1287 % (key, value.encode('string_escape')),
1287 label='ui.debug log.extra')
1288 label='ui.debug log.extra')
1288
1289
1289 description = ctx.description().strip()
1290 description = ctx.description().strip()
1290 if description:
1291 if description:
1291 if self.ui.verbose:
1292 if self.ui.verbose:
1292 self.ui.write(_("description:\n"),
1293 self.ui.write(_("description:\n"),
1293 label='ui.note log.description')
1294 label='ui.note log.description')
1294 self.ui.write(description,
1295 self.ui.write(description,
1295 label='ui.note log.description')
1296 label='ui.note log.description')
1296 self.ui.write("\n\n")
1297 self.ui.write("\n\n")
1297 else:
1298 else:
1298 # i18n: column positioning for "hg log"
1299 # i18n: column positioning for "hg log"
1299 self.ui.write(_("summary: %s\n") %
1300 self.ui.write(_("summary: %s\n") %
1300 description.splitlines()[0],
1301 description.splitlines()[0],
1301 label='log.summary')
1302 label='log.summary')
1302 self.ui.write("\n")
1303 self.ui.write("\n")
1303
1304
1304 self.showpatch(ctx, matchfn)
1305 self.showpatch(ctx, matchfn)
1305
1306
1306 def showpatch(self, ctx, matchfn):
1307 def showpatch(self, ctx, matchfn):
1307 if not matchfn:
1308 if not matchfn:
1308 matchfn = self.matchfn
1309 matchfn = self.matchfn
1309 if matchfn:
1310 if matchfn:
1310 stat = self.diffopts.get('stat')
1311 stat = self.diffopts.get('stat')
1311 diff = self.diffopts.get('patch')
1312 diff = self.diffopts.get('patch')
1312 diffopts = patch.diffallopts(self.ui, self.diffopts)
1313 diffopts = patch.diffallopts(self.ui, self.diffopts)
1313 node = ctx.node()
1314 node = ctx.node()
1314 prev = ctx.p1()
1315 prev = ctx.p1()
1315 if stat:
1316 if stat:
1316 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1317 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1317 match=matchfn, stat=True)
1318 match=matchfn, stat=True)
1318 if diff:
1319 if diff:
1319 if stat:
1320 if stat:
1320 self.ui.write("\n")
1321 self.ui.write("\n")
1321 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1322 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1322 match=matchfn, stat=False)
1323 match=matchfn, stat=False)
1323 self.ui.write("\n")
1324 self.ui.write("\n")
1324
1325
1325 class jsonchangeset(changeset_printer):
1326 class jsonchangeset(changeset_printer):
1326 '''format changeset information.'''
1327 '''format changeset information.'''
1327
1328
1328 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1329 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1329 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1330 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1330 self.cache = {}
1331 self.cache = {}
1331 self._first = True
1332 self._first = True
1332
1333
1333 def close(self):
1334 def close(self):
1334 if not self._first:
1335 if not self._first:
1335 self.ui.write("\n]\n")
1336 self.ui.write("\n]\n")
1336 else:
1337 else:
1337 self.ui.write("[]\n")
1338 self.ui.write("[]\n")
1338
1339
1339 def _show(self, ctx, copies, matchfn, props):
1340 def _show(self, ctx, copies, matchfn, props):
1340 '''show a single changeset or file revision'''
1341 '''show a single changeset or file revision'''
1341 rev = ctx.rev()
1342 rev = ctx.rev()
1342 if rev is None:
1343 if rev is None:
1343 jrev = jnode = 'null'
1344 jrev = jnode = 'null'
1344 else:
1345 else:
1345 jrev = str(rev)
1346 jrev = str(rev)
1346 jnode = '"%s"' % hex(ctx.node())
1347 jnode = '"%s"' % hex(ctx.node())
1347 j = encoding.jsonescape
1348 j = encoding.jsonescape
1348
1349
1349 if self._first:
1350 if self._first:
1350 self.ui.write("[\n {")
1351 self.ui.write("[\n {")
1351 self._first = False
1352 self._first = False
1352 else:
1353 else:
1353 self.ui.write(",\n {")
1354 self.ui.write(",\n {")
1354
1355
1355 if self.ui.quiet:
1356 if self.ui.quiet:
1356 self.ui.write('\n "rev": %s' % jrev)
1357 self.ui.write('\n "rev": %s' % jrev)
1357 self.ui.write(',\n "node": %s' % jnode)
1358 self.ui.write(',\n "node": %s' % jnode)
1358 self.ui.write('\n }')
1359 self.ui.write('\n }')
1359 return
1360 return
1360
1361
1361 self.ui.write('\n "rev": %s' % jrev)
1362 self.ui.write('\n "rev": %s' % jrev)
1362 self.ui.write(',\n "node": %s' % jnode)
1363 self.ui.write(',\n "node": %s' % jnode)
1363 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1364 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1364 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1365 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1365 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1366 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1366 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1367 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1367 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1368 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1368
1369
1369 self.ui.write(',\n "bookmarks": [%s]' %
1370 self.ui.write(',\n "bookmarks": [%s]' %
1370 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1371 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1371 self.ui.write(',\n "tags": [%s]' %
1372 self.ui.write(',\n "tags": [%s]' %
1372 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1373 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1373 self.ui.write(',\n "parents": [%s]' %
1374 self.ui.write(',\n "parents": [%s]' %
1374 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1375 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1375
1376
1376 if self.ui.debugflag:
1377 if self.ui.debugflag:
1377 if rev is None:
1378 if rev is None:
1378 jmanifestnode = 'null'
1379 jmanifestnode = 'null'
1379 else:
1380 else:
1380 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1381 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1381 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1382 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1382
1383
1383 self.ui.write(',\n "extra": {%s}' %
1384 self.ui.write(',\n "extra": {%s}' %
1384 ", ".join('"%s": "%s"' % (j(k), j(v))
1385 ", ".join('"%s": "%s"' % (j(k), j(v))
1385 for k, v in ctx.extra().items()))
1386 for k, v in ctx.extra().items()))
1386
1387
1387 files = ctx.p1().status(ctx)
1388 files = ctx.p1().status(ctx)
1388 self.ui.write(',\n "modified": [%s]' %
1389 self.ui.write(',\n "modified": [%s]' %
1389 ", ".join('"%s"' % j(f) for f in files[0]))
1390 ", ".join('"%s"' % j(f) for f in files[0]))
1390 self.ui.write(',\n "added": [%s]' %
1391 self.ui.write(',\n "added": [%s]' %
1391 ", ".join('"%s"' % j(f) for f in files[1]))
1392 ", ".join('"%s"' % j(f) for f in files[1]))
1392 self.ui.write(',\n "removed": [%s]' %
1393 self.ui.write(',\n "removed": [%s]' %
1393 ", ".join('"%s"' % j(f) for f in files[2]))
1394 ", ".join('"%s"' % j(f) for f in files[2]))
1394
1395
1395 elif self.ui.verbose:
1396 elif self.ui.verbose:
1396 self.ui.write(',\n "files": [%s]' %
1397 self.ui.write(',\n "files": [%s]' %
1397 ", ".join('"%s"' % j(f) for f in ctx.files()))
1398 ", ".join('"%s"' % j(f) for f in ctx.files()))
1398
1399
1399 if copies:
1400 if copies:
1400 self.ui.write(',\n "copies": {%s}' %
1401 self.ui.write(',\n "copies": {%s}' %
1401 ", ".join('"%s": "%s"' % (j(k), j(v))
1402 ", ".join('"%s": "%s"' % (j(k), j(v))
1402 for k, v in copies))
1403 for k, v in copies))
1403
1404
1404 matchfn = self.matchfn
1405 matchfn = self.matchfn
1405 if matchfn:
1406 if matchfn:
1406 stat = self.diffopts.get('stat')
1407 stat = self.diffopts.get('stat')
1407 diff = self.diffopts.get('patch')
1408 diff = self.diffopts.get('patch')
1408 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1409 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1409 node, prev = ctx.node(), ctx.p1().node()
1410 node, prev = ctx.node(), ctx.p1().node()
1410 if stat:
1411 if stat:
1411 self.ui.pushbuffer()
1412 self.ui.pushbuffer()
1412 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1413 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1413 match=matchfn, stat=True)
1414 match=matchfn, stat=True)
1414 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1415 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1415 if diff:
1416 if diff:
1416 self.ui.pushbuffer()
1417 self.ui.pushbuffer()
1417 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1418 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1418 match=matchfn, stat=False)
1419 match=matchfn, stat=False)
1419 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1420 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1420
1421
1421 self.ui.write("\n }")
1422 self.ui.write("\n }")
1422
1423
1423 class changeset_templater(changeset_printer):
1424 class changeset_templater(changeset_printer):
1424 '''format changeset information.'''
1425 '''format changeset information.'''
1425
1426
1426 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1427 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1427 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1428 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1428 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1429 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1429 defaulttempl = {
1430 defaulttempl = {
1430 'parent': '{rev}:{node|formatnode} ',
1431 'parent': '{rev}:{node|formatnode} ',
1431 'manifest': '{rev}:{node|formatnode}',
1432 'manifest': '{rev}:{node|formatnode}',
1432 'file_copy': '{name} ({source})',
1433 'file_copy': '{name} ({source})',
1433 'extra': '{key}={value|stringescape}'
1434 'extra': '{key}={value|stringescape}'
1434 }
1435 }
1435 # filecopy is preserved for compatibility reasons
1436 # filecopy is preserved for compatibility reasons
1436 defaulttempl['filecopy'] = defaulttempl['file_copy']
1437 defaulttempl['filecopy'] = defaulttempl['file_copy']
1437 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1438 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1438 cache=defaulttempl)
1439 cache=defaulttempl)
1439 if tmpl:
1440 if tmpl:
1440 self.t.cache['changeset'] = tmpl
1441 self.t.cache['changeset'] = tmpl
1441
1442
1442 self.cache = {}
1443 self.cache = {}
1443
1444
1444 # find correct templates for current mode
1445 # find correct templates for current mode
1445 tmplmodes = [
1446 tmplmodes = [
1446 (True, None),
1447 (True, None),
1447 (self.ui.verbose, 'verbose'),
1448 (self.ui.verbose, 'verbose'),
1448 (self.ui.quiet, 'quiet'),
1449 (self.ui.quiet, 'quiet'),
1449 (self.ui.debugflag, 'debug'),
1450 (self.ui.debugflag, 'debug'),
1450 ]
1451 ]
1451
1452
1452 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1453 self._parts = {'header': '', 'footer': '', 'changeset': 'changeset',
1453 'docheader': '', 'docfooter': ''}
1454 'docheader': '', 'docfooter': ''}
1454 for mode, postfix in tmplmodes:
1455 for mode, postfix in tmplmodes:
1455 for t in self._parts:
1456 for t in self._parts:
1456 cur = t
1457 cur = t
1457 if postfix:
1458 if postfix:
1458 cur += "_" + postfix
1459 cur += "_" + postfix
1459 if mode and cur in self.t:
1460 if mode and cur in self.t:
1460 self._parts[t] = cur
1461 self._parts[t] = cur
1461
1462
1462 if self._parts['docheader']:
1463 if self._parts['docheader']:
1463 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1464 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1464
1465
1465 def close(self):
1466 def close(self):
1466 if self._parts['docfooter']:
1467 if self._parts['docfooter']:
1467 if not self.footer:
1468 if not self.footer:
1468 self.footer = ""
1469 self.footer = ""
1469 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1470 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1470 return super(changeset_templater, self).close()
1471 return super(changeset_templater, self).close()
1471
1472
1472 def _show(self, ctx, copies, matchfn, props):
1473 def _show(self, ctx, copies, matchfn, props):
1473 '''show a single changeset or file revision'''
1474 '''show a single changeset or file revision'''
1474 props = props.copy()
1475 props = props.copy()
1475 props.update(templatekw.keywords)
1476 props.update(templatekw.keywords)
1476 props['templ'] = self.t
1477 props['templ'] = self.t
1477 props['ctx'] = ctx
1478 props['ctx'] = ctx
1478 props['repo'] = self.repo
1479 props['repo'] = self.repo
1479 props['revcache'] = {'copies': copies}
1480 props['revcache'] = {'copies': copies}
1480 props['cache'] = self.cache
1481 props['cache'] = self.cache
1481
1482
1482 try:
1483 try:
1483 # write header
1484 # write header
1484 if self._parts['header']:
1485 if self._parts['header']:
1485 h = templater.stringify(self.t(self._parts['header'], **props))
1486 h = templater.stringify(self.t(self._parts['header'], **props))
1486 if self.buffered:
1487 if self.buffered:
1487 self.header[ctx.rev()] = h
1488 self.header[ctx.rev()] = h
1488 else:
1489 else:
1489 if self.lastheader != h:
1490 if self.lastheader != h:
1490 self.lastheader = h
1491 self.lastheader = h
1491 self.ui.write(h)
1492 self.ui.write(h)
1492
1493
1493 # write changeset metadata, then patch if requested
1494 # write changeset metadata, then patch if requested
1494 key = self._parts['changeset']
1495 key = self._parts['changeset']
1495 self.ui.write(templater.stringify(self.t(key, **props)))
1496 self.ui.write(templater.stringify(self.t(key, **props)))
1496 self.showpatch(ctx, matchfn)
1497 self.showpatch(ctx, matchfn)
1497
1498
1498 if self._parts['footer']:
1499 if self._parts['footer']:
1499 if not self.footer:
1500 if not self.footer:
1500 self.footer = templater.stringify(
1501 self.footer = templater.stringify(
1501 self.t(self._parts['footer'], **props))
1502 self.t(self._parts['footer'], **props))
1502 except KeyError as inst:
1503 except KeyError as inst:
1503 msg = _("%s: no key named '%s'")
1504 msg = _("%s: no key named '%s'")
1504 raise error.Abort(msg % (self.t.mapfile, inst.args[0]))
1505 raise error.Abort(msg % (self.t.mapfile, inst.args[0]))
1505 except SyntaxError as inst:
1506 except SyntaxError as inst:
1506 raise error.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1507 raise error.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1507
1508
1508 def gettemplate(ui, tmpl, style):
1509 def gettemplate(ui, tmpl, style):
1509 """
1510 """
1510 Find the template matching the given template spec or style.
1511 Find the template matching the given template spec or style.
1511 """
1512 """
1512
1513
1513 # ui settings
1514 # ui settings
1514 if not tmpl and not style: # template are stronger than style
1515 if not tmpl and not style: # template are stronger than style
1515 tmpl = ui.config('ui', 'logtemplate')
1516 tmpl = ui.config('ui', 'logtemplate')
1516 if tmpl:
1517 if tmpl:
1517 try:
1518 try:
1518 tmpl = templater.unquotestring(tmpl)
1519 tmpl = templater.unquotestring(tmpl)
1519 except SyntaxError:
1520 except SyntaxError:
1520 pass
1521 pass
1521 return tmpl, None
1522 return tmpl, None
1522 else:
1523 else:
1523 style = util.expandpath(ui.config('ui', 'style', ''))
1524 style = util.expandpath(ui.config('ui', 'style', ''))
1524
1525
1525 if not tmpl and style:
1526 if not tmpl and style:
1526 mapfile = style
1527 mapfile = style
1527 if not os.path.split(mapfile)[0]:
1528 if not os.path.split(mapfile)[0]:
1528 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1529 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1529 or templater.templatepath(mapfile))
1530 or templater.templatepath(mapfile))
1530 if mapname:
1531 if mapname:
1531 mapfile = mapname
1532 mapfile = mapname
1532 return None, mapfile
1533 return None, mapfile
1533
1534
1534 if not tmpl:
1535 if not tmpl:
1535 return None, None
1536 return None, None
1536
1537
1537 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1538 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1538
1539
1539 def show_changeset(ui, repo, opts, buffered=False):
1540 def show_changeset(ui, repo, opts, buffered=False):
1540 """show one changeset using template or regular display.
1541 """show one changeset using template or regular display.
1541
1542
1542 Display format will be the first non-empty hit of:
1543 Display format will be the first non-empty hit of:
1543 1. option 'template'
1544 1. option 'template'
1544 2. option 'style'
1545 2. option 'style'
1545 3. [ui] setting 'logtemplate'
1546 3. [ui] setting 'logtemplate'
1546 4. [ui] setting 'style'
1547 4. [ui] setting 'style'
1547 If all of these values are either the unset or the empty string,
1548 If all of these values are either the unset or the empty string,
1548 regular display via changeset_printer() is done.
1549 regular display via changeset_printer() is done.
1549 """
1550 """
1550 # options
1551 # options
1551 matchfn = None
1552 matchfn = None
1552 if opts.get('patch') or opts.get('stat'):
1553 if opts.get('patch') or opts.get('stat'):
1553 matchfn = scmutil.matchall(repo)
1554 matchfn = scmutil.matchall(repo)
1554
1555
1555 if opts.get('template') == 'json':
1556 if opts.get('template') == 'json':
1556 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1557 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1557
1558
1558 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1559 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1559
1560
1560 if not tmpl and not mapfile:
1561 if not tmpl and not mapfile:
1561 return changeset_printer(ui, repo, matchfn, opts, buffered)
1562 return changeset_printer(ui, repo, matchfn, opts, buffered)
1562
1563
1563 try:
1564 try:
1564 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1565 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1565 buffered)
1566 buffered)
1566 except SyntaxError as inst:
1567 except SyntaxError as inst:
1567 raise error.Abort(inst.args[0])
1568 raise error.Abort(inst.args[0])
1568 return t
1569 return t
1569
1570
1570 def showmarker(ui, marker):
1571 def showmarker(ui, marker):
1571 """utility function to display obsolescence marker in a readable way
1572 """utility function to display obsolescence marker in a readable way
1572
1573
1573 To be used by debug function."""
1574 To be used by debug function."""
1574 ui.write(hex(marker.precnode()))
1575 ui.write(hex(marker.precnode()))
1575 for repl in marker.succnodes():
1576 for repl in marker.succnodes():
1576 ui.write(' ')
1577 ui.write(' ')
1577 ui.write(hex(repl))
1578 ui.write(hex(repl))
1578 ui.write(' %X ' % marker.flags())
1579 ui.write(' %X ' % marker.flags())
1579 parents = marker.parentnodes()
1580 parents = marker.parentnodes()
1580 if parents is not None:
1581 if parents is not None:
1581 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1582 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1582 ui.write('(%s) ' % util.datestr(marker.date()))
1583 ui.write('(%s) ' % util.datestr(marker.date()))
1583 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1584 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1584 sorted(marker.metadata().items())
1585 sorted(marker.metadata().items())
1585 if t[0] != 'date')))
1586 if t[0] != 'date')))
1586 ui.write('\n')
1587 ui.write('\n')
1587
1588
1588 def finddate(ui, repo, date):
1589 def finddate(ui, repo, date):
1589 """Find the tipmost changeset that matches the given date spec"""
1590 """Find the tipmost changeset that matches the given date spec"""
1590
1591
1591 df = util.matchdate(date)
1592 df = util.matchdate(date)
1592 m = scmutil.matchall(repo)
1593 m = scmutil.matchall(repo)
1593 results = {}
1594 results = {}
1594
1595
1595 def prep(ctx, fns):
1596 def prep(ctx, fns):
1596 d = ctx.date()
1597 d = ctx.date()
1597 if df(d[0]):
1598 if df(d[0]):
1598 results[ctx.rev()] = d
1599 results[ctx.rev()] = d
1599
1600
1600 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1601 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1601 rev = ctx.rev()
1602 rev = ctx.rev()
1602 if rev in results:
1603 if rev in results:
1603 ui.status(_("found revision %s from %s\n") %
1604 ui.status(_("found revision %s from %s\n") %
1604 (rev, util.datestr(results[rev])))
1605 (rev, util.datestr(results[rev])))
1605 return str(rev)
1606 return str(rev)
1606
1607
1607 raise error.Abort(_("revision matching date not found"))
1608 raise error.Abort(_("revision matching date not found"))
1608
1609
1609 def increasingwindows(windowsize=8, sizelimit=512):
1610 def increasingwindows(windowsize=8, sizelimit=512):
1610 while True:
1611 while True:
1611 yield windowsize
1612 yield windowsize
1612 if windowsize < sizelimit:
1613 if windowsize < sizelimit:
1613 windowsize *= 2
1614 windowsize *= 2
1614
1615
1615 class FileWalkError(Exception):
1616 class FileWalkError(Exception):
1616 pass
1617 pass
1617
1618
1618 def walkfilerevs(repo, match, follow, revs, fncache):
1619 def walkfilerevs(repo, match, follow, revs, fncache):
1619 '''Walks the file history for the matched files.
1620 '''Walks the file history for the matched files.
1620
1621
1621 Returns the changeset revs that are involved in the file history.
1622 Returns the changeset revs that are involved in the file history.
1622
1623
1623 Throws FileWalkError if the file history can't be walked using
1624 Throws FileWalkError if the file history can't be walked using
1624 filelogs alone.
1625 filelogs alone.
1625 '''
1626 '''
1626 wanted = set()
1627 wanted = set()
1627 copies = []
1628 copies = []
1628 minrev, maxrev = min(revs), max(revs)
1629 minrev, maxrev = min(revs), max(revs)
1629 def filerevgen(filelog, last):
1630 def filerevgen(filelog, last):
1630 """
1631 """
1631 Only files, no patterns. Check the history of each file.
1632 Only files, no patterns. Check the history of each file.
1632
1633
1633 Examines filelog entries within minrev, maxrev linkrev range
1634 Examines filelog entries within minrev, maxrev linkrev range
1634 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1635 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1635 tuples in backwards order
1636 tuples in backwards order
1636 """
1637 """
1637 cl_count = len(repo)
1638 cl_count = len(repo)
1638 revs = []
1639 revs = []
1639 for j in xrange(0, last + 1):
1640 for j in xrange(0, last + 1):
1640 linkrev = filelog.linkrev(j)
1641 linkrev = filelog.linkrev(j)
1641 if linkrev < minrev:
1642 if linkrev < minrev:
1642 continue
1643 continue
1643 # only yield rev for which we have the changelog, it can
1644 # only yield rev for which we have the changelog, it can
1644 # happen while doing "hg log" during a pull or commit
1645 # happen while doing "hg log" during a pull or commit
1645 if linkrev >= cl_count:
1646 if linkrev >= cl_count:
1646 break
1647 break
1647
1648
1648 parentlinkrevs = []
1649 parentlinkrevs = []
1649 for p in filelog.parentrevs(j):
1650 for p in filelog.parentrevs(j):
1650 if p != nullrev:
1651 if p != nullrev:
1651 parentlinkrevs.append(filelog.linkrev(p))
1652 parentlinkrevs.append(filelog.linkrev(p))
1652 n = filelog.node(j)
1653 n = filelog.node(j)
1653 revs.append((linkrev, parentlinkrevs,
1654 revs.append((linkrev, parentlinkrevs,
1654 follow and filelog.renamed(n)))
1655 follow and filelog.renamed(n)))
1655
1656
1656 return reversed(revs)
1657 return reversed(revs)
1657 def iterfiles():
1658 def iterfiles():
1658 pctx = repo['.']
1659 pctx = repo['.']
1659 for filename in match.files():
1660 for filename in match.files():
1660 if follow:
1661 if follow:
1661 if filename not in pctx:
1662 if filename not in pctx:
1662 raise error.Abort(_('cannot follow file not in parent '
1663 raise error.Abort(_('cannot follow file not in parent '
1663 'revision: "%s"') % filename)
1664 'revision: "%s"') % filename)
1664 yield filename, pctx[filename].filenode()
1665 yield filename, pctx[filename].filenode()
1665 else:
1666 else:
1666 yield filename, None
1667 yield filename, None
1667 for filename_node in copies:
1668 for filename_node in copies:
1668 yield filename_node
1669 yield filename_node
1669
1670
1670 for file_, node in iterfiles():
1671 for file_, node in iterfiles():
1671 filelog = repo.file(file_)
1672 filelog = repo.file(file_)
1672 if not len(filelog):
1673 if not len(filelog):
1673 if node is None:
1674 if node is None:
1674 # A zero count may be a directory or deleted file, so
1675 # A zero count may be a directory or deleted file, so
1675 # try to find matching entries on the slow path.
1676 # try to find matching entries on the slow path.
1676 if follow:
1677 if follow:
1677 raise error.Abort(
1678 raise error.Abort(
1678 _('cannot follow nonexistent file: "%s"') % file_)
1679 _('cannot follow nonexistent file: "%s"') % file_)
1679 raise FileWalkError("Cannot walk via filelog")
1680 raise FileWalkError("Cannot walk via filelog")
1680 else:
1681 else:
1681 continue
1682 continue
1682
1683
1683 if node is None:
1684 if node is None:
1684 last = len(filelog) - 1
1685 last = len(filelog) - 1
1685 else:
1686 else:
1686 last = filelog.rev(node)
1687 last = filelog.rev(node)
1687
1688
1688 # keep track of all ancestors of the file
1689 # keep track of all ancestors of the file
1689 ancestors = set([filelog.linkrev(last)])
1690 ancestors = set([filelog.linkrev(last)])
1690
1691
1691 # iterate from latest to oldest revision
1692 # iterate from latest to oldest revision
1692 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1693 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1693 if not follow:
1694 if not follow:
1694 if rev > maxrev:
1695 if rev > maxrev:
1695 continue
1696 continue
1696 else:
1697 else:
1697 # Note that last might not be the first interesting
1698 # Note that last might not be the first interesting
1698 # rev to us:
1699 # rev to us:
1699 # if the file has been changed after maxrev, we'll
1700 # if the file has been changed after maxrev, we'll
1700 # have linkrev(last) > maxrev, and we still need
1701 # have linkrev(last) > maxrev, and we still need
1701 # to explore the file graph
1702 # to explore the file graph
1702 if rev not in ancestors:
1703 if rev not in ancestors:
1703 continue
1704 continue
1704 # XXX insert 1327 fix here
1705 # XXX insert 1327 fix here
1705 if flparentlinkrevs:
1706 if flparentlinkrevs:
1706 ancestors.update(flparentlinkrevs)
1707 ancestors.update(flparentlinkrevs)
1707
1708
1708 fncache.setdefault(rev, []).append(file_)
1709 fncache.setdefault(rev, []).append(file_)
1709 wanted.add(rev)
1710 wanted.add(rev)
1710 if copied:
1711 if copied:
1711 copies.append(copied)
1712 copies.append(copied)
1712
1713
1713 return wanted
1714 return wanted
1714
1715
1715 class _followfilter(object):
1716 class _followfilter(object):
1716 def __init__(self, repo, onlyfirst=False):
1717 def __init__(self, repo, onlyfirst=False):
1717 self.repo = repo
1718 self.repo = repo
1718 self.startrev = nullrev
1719 self.startrev = nullrev
1719 self.roots = set()
1720 self.roots = set()
1720 self.onlyfirst = onlyfirst
1721 self.onlyfirst = onlyfirst
1721
1722
1722 def match(self, rev):
1723 def match(self, rev):
1723 def realparents(rev):
1724 def realparents(rev):
1724 if self.onlyfirst:
1725 if self.onlyfirst:
1725 return self.repo.changelog.parentrevs(rev)[0:1]
1726 return self.repo.changelog.parentrevs(rev)[0:1]
1726 else:
1727 else:
1727 return filter(lambda x: x != nullrev,
1728 return filter(lambda x: x != nullrev,
1728 self.repo.changelog.parentrevs(rev))
1729 self.repo.changelog.parentrevs(rev))
1729
1730
1730 if self.startrev == nullrev:
1731 if self.startrev == nullrev:
1731 self.startrev = rev
1732 self.startrev = rev
1732 return True
1733 return True
1733
1734
1734 if rev > self.startrev:
1735 if rev > self.startrev:
1735 # forward: all descendants
1736 # forward: all descendants
1736 if not self.roots:
1737 if not self.roots:
1737 self.roots.add(self.startrev)
1738 self.roots.add(self.startrev)
1738 for parent in realparents(rev):
1739 for parent in realparents(rev):
1739 if parent in self.roots:
1740 if parent in self.roots:
1740 self.roots.add(rev)
1741 self.roots.add(rev)
1741 return True
1742 return True
1742 else:
1743 else:
1743 # backwards: all parents
1744 # backwards: all parents
1744 if not self.roots:
1745 if not self.roots:
1745 self.roots.update(realparents(self.startrev))
1746 self.roots.update(realparents(self.startrev))
1746 if rev in self.roots:
1747 if rev in self.roots:
1747 self.roots.remove(rev)
1748 self.roots.remove(rev)
1748 self.roots.update(realparents(rev))
1749 self.roots.update(realparents(rev))
1749 return True
1750 return True
1750
1751
1751 return False
1752 return False
1752
1753
1753 def walkchangerevs(repo, match, opts, prepare):
1754 def walkchangerevs(repo, match, opts, prepare):
1754 '''Iterate over files and the revs in which they changed.
1755 '''Iterate over files and the revs in which they changed.
1755
1756
1756 Callers most commonly need to iterate backwards over the history
1757 Callers most commonly need to iterate backwards over the history
1757 in which they are interested. Doing so has awful (quadratic-looking)
1758 in which they are interested. Doing so has awful (quadratic-looking)
1758 performance, so we use iterators in a "windowed" way.
1759 performance, so we use iterators in a "windowed" way.
1759
1760
1760 We walk a window of revisions in the desired order. Within the
1761 We walk a window of revisions in the desired order. Within the
1761 window, we first walk forwards to gather data, then in the desired
1762 window, we first walk forwards to gather data, then in the desired
1762 order (usually backwards) to display it.
1763 order (usually backwards) to display it.
1763
1764
1764 This function returns an iterator yielding contexts. Before
1765 This function returns an iterator yielding contexts. Before
1765 yielding each context, the iterator will first call the prepare
1766 yielding each context, the iterator will first call the prepare
1766 function on each context in the window in forward order.'''
1767 function on each context in the window in forward order.'''
1767
1768
1768 follow = opts.get('follow') or opts.get('follow_first')
1769 follow = opts.get('follow') or opts.get('follow_first')
1769 revs = _logrevs(repo, opts)
1770 revs = _logrevs(repo, opts)
1770 if not revs:
1771 if not revs:
1771 return []
1772 return []
1772 wanted = set()
1773 wanted = set()
1773 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1774 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1774 opts.get('removed'))
1775 opts.get('removed'))
1775 fncache = {}
1776 fncache = {}
1776 change = repo.changectx
1777 change = repo.changectx
1777
1778
1778 # First step is to fill wanted, the set of revisions that we want to yield.
1779 # First step is to fill wanted, the set of revisions that we want to yield.
1779 # When it does not induce extra cost, we also fill fncache for revisions in
1780 # When it does not induce extra cost, we also fill fncache for revisions in
1780 # wanted: a cache of filenames that were changed (ctx.files()) and that
1781 # wanted: a cache of filenames that were changed (ctx.files()) and that
1781 # match the file filtering conditions.
1782 # match the file filtering conditions.
1782
1783
1783 if match.always():
1784 if match.always():
1784 # No files, no patterns. Display all revs.
1785 # No files, no patterns. Display all revs.
1785 wanted = revs
1786 wanted = revs
1786 elif not slowpath:
1787 elif not slowpath:
1787 # We only have to read through the filelog to find wanted revisions
1788 # We only have to read through the filelog to find wanted revisions
1788
1789
1789 try:
1790 try:
1790 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1791 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1791 except FileWalkError:
1792 except FileWalkError:
1792 slowpath = True
1793 slowpath = True
1793
1794
1794 # We decided to fall back to the slowpath because at least one
1795 # We decided to fall back to the slowpath because at least one
1795 # of the paths was not a file. Check to see if at least one of them
1796 # of the paths was not a file. Check to see if at least one of them
1796 # existed in history, otherwise simply return
1797 # existed in history, otherwise simply return
1797 for path in match.files():
1798 for path in match.files():
1798 if path == '.' or path in repo.store:
1799 if path == '.' or path in repo.store:
1799 break
1800 break
1800 else:
1801 else:
1801 return []
1802 return []
1802
1803
1803 if slowpath:
1804 if slowpath:
1804 # We have to read the changelog to match filenames against
1805 # We have to read the changelog to match filenames against
1805 # changed files
1806 # changed files
1806
1807
1807 if follow:
1808 if follow:
1808 raise error.Abort(_('can only follow copies/renames for explicit '
1809 raise error.Abort(_('can only follow copies/renames for explicit '
1809 'filenames'))
1810 'filenames'))
1810
1811
1811 # The slow path checks files modified in every changeset.
1812 # The slow path checks files modified in every changeset.
1812 # This is really slow on large repos, so compute the set lazily.
1813 # This is really slow on large repos, so compute the set lazily.
1813 class lazywantedset(object):
1814 class lazywantedset(object):
1814 def __init__(self):
1815 def __init__(self):
1815 self.set = set()
1816 self.set = set()
1816 self.revs = set(revs)
1817 self.revs = set(revs)
1817
1818
1818 # No need to worry about locality here because it will be accessed
1819 # No need to worry about locality here because it will be accessed
1819 # in the same order as the increasing window below.
1820 # in the same order as the increasing window below.
1820 def __contains__(self, value):
1821 def __contains__(self, value):
1821 if value in self.set:
1822 if value in self.set:
1822 return True
1823 return True
1823 elif not value in self.revs:
1824 elif not value in self.revs:
1824 return False
1825 return False
1825 else:
1826 else:
1826 self.revs.discard(value)
1827 self.revs.discard(value)
1827 ctx = change(value)
1828 ctx = change(value)
1828 matches = filter(match, ctx.files())
1829 matches = filter(match, ctx.files())
1829 if matches:
1830 if matches:
1830 fncache[value] = matches
1831 fncache[value] = matches
1831 self.set.add(value)
1832 self.set.add(value)
1832 return True
1833 return True
1833 return False
1834 return False
1834
1835
1835 def discard(self, value):
1836 def discard(self, value):
1836 self.revs.discard(value)
1837 self.revs.discard(value)
1837 self.set.discard(value)
1838 self.set.discard(value)
1838
1839
1839 wanted = lazywantedset()
1840 wanted = lazywantedset()
1840
1841
1841 # it might be worthwhile to do this in the iterator if the rev range
1842 # it might be worthwhile to do this in the iterator if the rev range
1842 # is descending and the prune args are all within that range
1843 # is descending and the prune args are all within that range
1843 for rev in opts.get('prune', ()):
1844 for rev in opts.get('prune', ()):
1844 rev = repo[rev].rev()
1845 rev = repo[rev].rev()
1845 ff = _followfilter(repo)
1846 ff = _followfilter(repo)
1846 stop = min(revs[0], revs[-1])
1847 stop = min(revs[0], revs[-1])
1847 for x in xrange(rev, stop - 1, -1):
1848 for x in xrange(rev, stop - 1, -1):
1848 if ff.match(x):
1849 if ff.match(x):
1849 wanted = wanted - [x]
1850 wanted = wanted - [x]
1850
1851
1851 # Now that wanted is correctly initialized, we can iterate over the
1852 # Now that wanted is correctly initialized, we can iterate over the
1852 # revision range, yielding only revisions in wanted.
1853 # revision range, yielding only revisions in wanted.
1853 def iterate():
1854 def iterate():
1854 if follow and match.always():
1855 if follow and match.always():
1855 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1856 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1856 def want(rev):
1857 def want(rev):
1857 return ff.match(rev) and rev in wanted
1858 return ff.match(rev) and rev in wanted
1858 else:
1859 else:
1859 def want(rev):
1860 def want(rev):
1860 return rev in wanted
1861 return rev in wanted
1861
1862
1862 it = iter(revs)
1863 it = iter(revs)
1863 stopiteration = False
1864 stopiteration = False
1864 for windowsize in increasingwindows():
1865 for windowsize in increasingwindows():
1865 nrevs = []
1866 nrevs = []
1866 for i in xrange(windowsize):
1867 for i in xrange(windowsize):
1867 rev = next(it, None)
1868 rev = next(it, None)
1868 if rev is None:
1869 if rev is None:
1869 stopiteration = True
1870 stopiteration = True
1870 break
1871 break
1871 elif want(rev):
1872 elif want(rev):
1872 nrevs.append(rev)
1873 nrevs.append(rev)
1873 for rev in sorted(nrevs):
1874 for rev in sorted(nrevs):
1874 fns = fncache.get(rev)
1875 fns = fncache.get(rev)
1875 ctx = change(rev)
1876 ctx = change(rev)
1876 if not fns:
1877 if not fns:
1877 def fns_generator():
1878 def fns_generator():
1878 for f in ctx.files():
1879 for f in ctx.files():
1879 if match(f):
1880 if match(f):
1880 yield f
1881 yield f
1881 fns = fns_generator()
1882 fns = fns_generator()
1882 prepare(ctx, fns)
1883 prepare(ctx, fns)
1883 for rev in nrevs:
1884 for rev in nrevs:
1884 yield change(rev)
1885 yield change(rev)
1885
1886
1886 if stopiteration:
1887 if stopiteration:
1887 break
1888 break
1888
1889
1889 return iterate()
1890 return iterate()
1890
1891
1891 def _makefollowlogfilematcher(repo, files, followfirst):
1892 def _makefollowlogfilematcher(repo, files, followfirst):
1892 # When displaying a revision with --patch --follow FILE, we have
1893 # When displaying a revision with --patch --follow FILE, we have
1893 # to know which file of the revision must be diffed. With
1894 # to know which file of the revision must be diffed. With
1894 # --follow, we want the names of the ancestors of FILE in the
1895 # --follow, we want the names of the ancestors of FILE in the
1895 # revision, stored in "fcache". "fcache" is populated by
1896 # revision, stored in "fcache". "fcache" is populated by
1896 # reproducing the graph traversal already done by --follow revset
1897 # reproducing the graph traversal already done by --follow revset
1897 # and relating linkrevs to file names (which is not "correct" but
1898 # and relating linkrevs to file names (which is not "correct" but
1898 # good enough).
1899 # good enough).
1899 fcache = {}
1900 fcache = {}
1900 fcacheready = [False]
1901 fcacheready = [False]
1901 pctx = repo['.']
1902 pctx = repo['.']
1902
1903
1903 def populate():
1904 def populate():
1904 for fn in files:
1905 for fn in files:
1905 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1906 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1906 for c in i:
1907 for c in i:
1907 fcache.setdefault(c.linkrev(), set()).add(c.path())
1908 fcache.setdefault(c.linkrev(), set()).add(c.path())
1908
1909
1909 def filematcher(rev):
1910 def filematcher(rev):
1910 if not fcacheready[0]:
1911 if not fcacheready[0]:
1911 # Lazy initialization
1912 # Lazy initialization
1912 fcacheready[0] = True
1913 fcacheready[0] = True
1913 populate()
1914 populate()
1914 return scmutil.matchfiles(repo, fcache.get(rev, []))
1915 return scmutil.matchfiles(repo, fcache.get(rev, []))
1915
1916
1916 return filematcher
1917 return filematcher
1917
1918
1918 def _makenofollowlogfilematcher(repo, pats, opts):
1919 def _makenofollowlogfilematcher(repo, pats, opts):
1919 '''hook for extensions to override the filematcher for non-follow cases'''
1920 '''hook for extensions to override the filematcher for non-follow cases'''
1920 return None
1921 return None
1921
1922
1922 def _makelogrevset(repo, pats, opts, revs):
1923 def _makelogrevset(repo, pats, opts, revs):
1923 """Return (expr, filematcher) where expr is a revset string built
1924 """Return (expr, filematcher) where expr is a revset string built
1924 from log options and file patterns or None. If --stat or --patch
1925 from log options and file patterns or None. If --stat or --patch
1925 are not passed filematcher is None. Otherwise it is a callable
1926 are not passed filematcher is None. Otherwise it is a callable
1926 taking a revision number and returning a match objects filtering
1927 taking a revision number and returning a match objects filtering
1927 the files to be detailed when displaying the revision.
1928 the files to be detailed when displaying the revision.
1928 """
1929 """
1929 opt2revset = {
1930 opt2revset = {
1930 'no_merges': ('not merge()', None),
1931 'no_merges': ('not merge()', None),
1931 'only_merges': ('merge()', None),
1932 'only_merges': ('merge()', None),
1932 '_ancestors': ('ancestors(%(val)s)', None),
1933 '_ancestors': ('ancestors(%(val)s)', None),
1933 '_fancestors': ('_firstancestors(%(val)s)', None),
1934 '_fancestors': ('_firstancestors(%(val)s)', None),
1934 '_descendants': ('descendants(%(val)s)', None),
1935 '_descendants': ('descendants(%(val)s)', None),
1935 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1936 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1936 '_matchfiles': ('_matchfiles(%(val)s)', None),
1937 '_matchfiles': ('_matchfiles(%(val)s)', None),
1937 'date': ('date(%(val)r)', None),
1938 'date': ('date(%(val)r)', None),
1938 'branch': ('branch(%(val)r)', ' or '),
1939 'branch': ('branch(%(val)r)', ' or '),
1939 '_patslog': ('filelog(%(val)r)', ' or '),
1940 '_patslog': ('filelog(%(val)r)', ' or '),
1940 '_patsfollow': ('follow(%(val)r)', ' or '),
1941 '_patsfollow': ('follow(%(val)r)', ' or '),
1941 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1942 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1942 'keyword': ('keyword(%(val)r)', ' or '),
1943 'keyword': ('keyword(%(val)r)', ' or '),
1943 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1944 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1944 'user': ('user(%(val)r)', ' or '),
1945 'user': ('user(%(val)r)', ' or '),
1945 }
1946 }
1946
1947
1947 opts = dict(opts)
1948 opts = dict(opts)
1948 # follow or not follow?
1949 # follow or not follow?
1949 follow = opts.get('follow') or opts.get('follow_first')
1950 follow = opts.get('follow') or opts.get('follow_first')
1950 if opts.get('follow_first'):
1951 if opts.get('follow_first'):
1951 followfirst = 1
1952 followfirst = 1
1952 else:
1953 else:
1953 followfirst = 0
1954 followfirst = 0
1954 # --follow with FILE behavior depends on revs...
1955 # --follow with FILE behavior depends on revs...
1955 it = iter(revs)
1956 it = iter(revs)
1956 startrev = it.next()
1957 startrev = it.next()
1957 followdescendants = startrev < next(it, startrev)
1958 followdescendants = startrev < next(it, startrev)
1958
1959
1959 # branch and only_branch are really aliases and must be handled at
1960 # branch and only_branch are really aliases and must be handled at
1960 # the same time
1961 # the same time
1961 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1962 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1962 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1963 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1963 # pats/include/exclude are passed to match.match() directly in
1964 # pats/include/exclude are passed to match.match() directly in
1964 # _matchfiles() revset but walkchangerevs() builds its matcher with
1965 # _matchfiles() revset but walkchangerevs() builds its matcher with
1965 # scmutil.match(). The difference is input pats are globbed on
1966 # scmutil.match(). The difference is input pats are globbed on
1966 # platforms without shell expansion (windows).
1967 # platforms without shell expansion (windows).
1967 wctx = repo[None]
1968 wctx = repo[None]
1968 match, pats = scmutil.matchandpats(wctx, pats, opts)
1969 match, pats = scmutil.matchandpats(wctx, pats, opts)
1969 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1970 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1970 opts.get('removed'))
1971 opts.get('removed'))
1971 if not slowpath:
1972 if not slowpath:
1972 for f in match.files():
1973 for f in match.files():
1973 if follow and f not in wctx:
1974 if follow and f not in wctx:
1974 # If the file exists, it may be a directory, so let it
1975 # If the file exists, it may be a directory, so let it
1975 # take the slow path.
1976 # take the slow path.
1976 if os.path.exists(repo.wjoin(f)):
1977 if os.path.exists(repo.wjoin(f)):
1977 slowpath = True
1978 slowpath = True
1978 continue
1979 continue
1979 else:
1980 else:
1980 raise error.Abort(_('cannot follow file not in parent '
1981 raise error.Abort(_('cannot follow file not in parent '
1981 'revision: "%s"') % f)
1982 'revision: "%s"') % f)
1982 filelog = repo.file(f)
1983 filelog = repo.file(f)
1983 if not filelog:
1984 if not filelog:
1984 # A zero count may be a directory or deleted file, so
1985 # A zero count may be a directory or deleted file, so
1985 # try to find matching entries on the slow path.
1986 # try to find matching entries on the slow path.
1986 if follow:
1987 if follow:
1987 raise error.Abort(
1988 raise error.Abort(
1988 _('cannot follow nonexistent file: "%s"') % f)
1989 _('cannot follow nonexistent file: "%s"') % f)
1989 slowpath = True
1990 slowpath = True
1990
1991
1991 # We decided to fall back to the slowpath because at least one
1992 # We decided to fall back to the slowpath because at least one
1992 # of the paths was not a file. Check to see if at least one of them
1993 # of the paths was not a file. Check to see if at least one of them
1993 # existed in history - in that case, we'll continue down the
1994 # existed in history - in that case, we'll continue down the
1994 # slowpath; otherwise, we can turn off the slowpath
1995 # slowpath; otherwise, we can turn off the slowpath
1995 if slowpath:
1996 if slowpath:
1996 for path in match.files():
1997 for path in match.files():
1997 if path == '.' or path in repo.store:
1998 if path == '.' or path in repo.store:
1998 break
1999 break
1999 else:
2000 else:
2000 slowpath = False
2001 slowpath = False
2001
2002
2002 fpats = ('_patsfollow', '_patsfollowfirst')
2003 fpats = ('_patsfollow', '_patsfollowfirst')
2003 fnopats = (('_ancestors', '_fancestors'),
2004 fnopats = (('_ancestors', '_fancestors'),
2004 ('_descendants', '_fdescendants'))
2005 ('_descendants', '_fdescendants'))
2005 if slowpath:
2006 if slowpath:
2006 # See walkchangerevs() slow path.
2007 # See walkchangerevs() slow path.
2007 #
2008 #
2008 # pats/include/exclude cannot be represented as separate
2009 # pats/include/exclude cannot be represented as separate
2009 # revset expressions as their filtering logic applies at file
2010 # revset expressions as their filtering logic applies at file
2010 # level. For instance "-I a -X a" matches a revision touching
2011 # level. For instance "-I a -X a" matches a revision touching
2011 # "a" and "b" while "file(a) and not file(b)" does
2012 # "a" and "b" while "file(a) and not file(b)" does
2012 # not. Besides, filesets are evaluated against the working
2013 # not. Besides, filesets are evaluated against the working
2013 # directory.
2014 # directory.
2014 matchargs = ['r:', 'd:relpath']
2015 matchargs = ['r:', 'd:relpath']
2015 for p in pats:
2016 for p in pats:
2016 matchargs.append('p:' + p)
2017 matchargs.append('p:' + p)
2017 for p in opts.get('include', []):
2018 for p in opts.get('include', []):
2018 matchargs.append('i:' + p)
2019 matchargs.append('i:' + p)
2019 for p in opts.get('exclude', []):
2020 for p in opts.get('exclude', []):
2020 matchargs.append('x:' + p)
2021 matchargs.append('x:' + p)
2021 matchargs = ','.join(('%r' % p) for p in matchargs)
2022 matchargs = ','.join(('%r' % p) for p in matchargs)
2022 opts['_matchfiles'] = matchargs
2023 opts['_matchfiles'] = matchargs
2023 if follow:
2024 if follow:
2024 opts[fnopats[0][followfirst]] = '.'
2025 opts[fnopats[0][followfirst]] = '.'
2025 else:
2026 else:
2026 if follow:
2027 if follow:
2027 if pats:
2028 if pats:
2028 # follow() revset interprets its file argument as a
2029 # follow() revset interprets its file argument as a
2029 # manifest entry, so use match.files(), not pats.
2030 # manifest entry, so use match.files(), not pats.
2030 opts[fpats[followfirst]] = list(match.files())
2031 opts[fpats[followfirst]] = list(match.files())
2031 else:
2032 else:
2032 op = fnopats[followdescendants][followfirst]
2033 op = fnopats[followdescendants][followfirst]
2033 opts[op] = 'rev(%d)' % startrev
2034 opts[op] = 'rev(%d)' % startrev
2034 else:
2035 else:
2035 opts['_patslog'] = list(pats)
2036 opts['_patslog'] = list(pats)
2036
2037
2037 filematcher = None
2038 filematcher = None
2038 if opts.get('patch') or opts.get('stat'):
2039 if opts.get('patch') or opts.get('stat'):
2039 # When following files, track renames via a special matcher.
2040 # When following files, track renames via a special matcher.
2040 # If we're forced to take the slowpath it means we're following
2041 # If we're forced to take the slowpath it means we're following
2041 # at least one pattern/directory, so don't bother with rename tracking.
2042 # at least one pattern/directory, so don't bother with rename tracking.
2042 if follow and not match.always() and not slowpath:
2043 if follow and not match.always() and not slowpath:
2043 # _makefollowlogfilematcher expects its files argument to be
2044 # _makefollowlogfilematcher expects its files argument to be
2044 # relative to the repo root, so use match.files(), not pats.
2045 # relative to the repo root, so use match.files(), not pats.
2045 filematcher = _makefollowlogfilematcher(repo, match.files(),
2046 filematcher = _makefollowlogfilematcher(repo, match.files(),
2046 followfirst)
2047 followfirst)
2047 else:
2048 else:
2048 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2049 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2049 if filematcher is None:
2050 if filematcher is None:
2050 filematcher = lambda rev: match
2051 filematcher = lambda rev: match
2051
2052
2052 expr = []
2053 expr = []
2053 for op, val in sorted(opts.iteritems()):
2054 for op, val in sorted(opts.iteritems()):
2054 if not val:
2055 if not val:
2055 continue
2056 continue
2056 if op not in opt2revset:
2057 if op not in opt2revset:
2057 continue
2058 continue
2058 revop, andor = opt2revset[op]
2059 revop, andor = opt2revset[op]
2059 if '%(val)' not in revop:
2060 if '%(val)' not in revop:
2060 expr.append(revop)
2061 expr.append(revop)
2061 else:
2062 else:
2062 if not isinstance(val, list):
2063 if not isinstance(val, list):
2063 e = revop % {'val': val}
2064 e = revop % {'val': val}
2064 else:
2065 else:
2065 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2066 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2066 expr.append(e)
2067 expr.append(e)
2067
2068
2068 if expr:
2069 if expr:
2069 expr = '(' + ' and '.join(expr) + ')'
2070 expr = '(' + ' and '.join(expr) + ')'
2070 else:
2071 else:
2071 expr = None
2072 expr = None
2072 return expr, filematcher
2073 return expr, filematcher
2073
2074
2074 def _logrevs(repo, opts):
2075 def _logrevs(repo, opts):
2075 # Default --rev value depends on --follow but --follow behavior
2076 # Default --rev value depends on --follow but --follow behavior
2076 # depends on revisions resolved from --rev...
2077 # depends on revisions resolved from --rev...
2077 follow = opts.get('follow') or opts.get('follow_first')
2078 follow = opts.get('follow') or opts.get('follow_first')
2078 if opts.get('rev'):
2079 if opts.get('rev'):
2079 revs = scmutil.revrange(repo, opts['rev'])
2080 revs = scmutil.revrange(repo, opts['rev'])
2080 elif follow and repo.dirstate.p1() == nullid:
2081 elif follow and repo.dirstate.p1() == nullid:
2081 revs = revset.baseset()
2082 revs = revset.baseset()
2082 elif follow:
2083 elif follow:
2083 revs = repo.revs('reverse(:.)')
2084 revs = repo.revs('reverse(:.)')
2084 else:
2085 else:
2085 revs = revset.spanset(repo)
2086 revs = revset.spanset(repo)
2086 revs.reverse()
2087 revs.reverse()
2087 return revs
2088 return revs
2088
2089
2089 def getgraphlogrevs(repo, pats, opts):
2090 def getgraphlogrevs(repo, pats, opts):
2090 """Return (revs, expr, filematcher) where revs is an iterable of
2091 """Return (revs, expr, filematcher) where revs is an iterable of
2091 revision numbers, expr is a revset string built from log options
2092 revision numbers, expr is a revset string built from log options
2092 and file patterns or None, and used to filter 'revs'. If --stat or
2093 and file patterns or None, and used to filter 'revs'. If --stat or
2093 --patch are not passed filematcher is None. Otherwise it is a
2094 --patch are not passed filematcher is None. Otherwise it is a
2094 callable taking a revision number and returning a match objects
2095 callable taking a revision number and returning a match objects
2095 filtering the files to be detailed when displaying the revision.
2096 filtering the files to be detailed when displaying the revision.
2096 """
2097 """
2097 limit = loglimit(opts)
2098 limit = loglimit(opts)
2098 revs = _logrevs(repo, opts)
2099 revs = _logrevs(repo, opts)
2099 if not revs:
2100 if not revs:
2100 return revset.baseset(), None, None
2101 return revset.baseset(), None, None
2101 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2102 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2102 if opts.get('rev'):
2103 if opts.get('rev'):
2103 # User-specified revs might be unsorted, but don't sort before
2104 # User-specified revs might be unsorted, but don't sort before
2104 # _makelogrevset because it might depend on the order of revs
2105 # _makelogrevset because it might depend on the order of revs
2105 revs.sort(reverse=True)
2106 revs.sort(reverse=True)
2106 if expr:
2107 if expr:
2107 # Revset matchers often operate faster on revisions in changelog
2108 # Revset matchers often operate faster on revisions in changelog
2108 # order, because most filters deal with the changelog.
2109 # order, because most filters deal with the changelog.
2109 revs.reverse()
2110 revs.reverse()
2110 matcher = revset.match(repo.ui, expr)
2111 matcher = revset.match(repo.ui, expr)
2111 # Revset matches can reorder revisions. "A or B" typically returns
2112 # Revset matches can reorder revisions. "A or B" typically returns
2112 # returns the revision matching A then the revision matching B. Sort
2113 # returns the revision matching A then the revision matching B. Sort
2113 # again to fix that.
2114 # again to fix that.
2114 revs = matcher(repo, revs)
2115 revs = matcher(repo, revs)
2115 revs.sort(reverse=True)
2116 revs.sort(reverse=True)
2116 if limit is not None:
2117 if limit is not None:
2117 limitedrevs = []
2118 limitedrevs = []
2118 for idx, rev in enumerate(revs):
2119 for idx, rev in enumerate(revs):
2119 if idx >= limit:
2120 if idx >= limit:
2120 break
2121 break
2121 limitedrevs.append(rev)
2122 limitedrevs.append(rev)
2122 revs = revset.baseset(limitedrevs)
2123 revs = revset.baseset(limitedrevs)
2123
2124
2124 return revs, expr, filematcher
2125 return revs, expr, filematcher
2125
2126
2126 def getlogrevs(repo, pats, opts):
2127 def getlogrevs(repo, pats, opts):
2127 """Return (revs, expr, filematcher) where revs is an iterable of
2128 """Return (revs, expr, filematcher) where revs is an iterable of
2128 revision numbers, expr is a revset string built from log options
2129 revision numbers, expr is a revset string built from log options
2129 and file patterns or None, and used to filter 'revs'. If --stat or
2130 and file patterns or None, and used to filter 'revs'. If --stat or
2130 --patch are not passed filematcher is None. Otherwise it is a
2131 --patch are not passed filematcher is None. Otherwise it is a
2131 callable taking a revision number and returning a match objects
2132 callable taking a revision number and returning a match objects
2132 filtering the files to be detailed when displaying the revision.
2133 filtering the files to be detailed when displaying the revision.
2133 """
2134 """
2134 limit = loglimit(opts)
2135 limit = loglimit(opts)
2135 revs = _logrevs(repo, opts)
2136 revs = _logrevs(repo, opts)
2136 if not revs:
2137 if not revs:
2137 return revset.baseset([]), None, None
2138 return revset.baseset([]), None, None
2138 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2139 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2139 if expr:
2140 if expr:
2140 # Revset matchers often operate faster on revisions in changelog
2141 # Revset matchers often operate faster on revisions in changelog
2141 # order, because most filters deal with the changelog.
2142 # order, because most filters deal with the changelog.
2142 if not opts.get('rev'):
2143 if not opts.get('rev'):
2143 revs.reverse()
2144 revs.reverse()
2144 matcher = revset.match(repo.ui, expr)
2145 matcher = revset.match(repo.ui, expr)
2145 # Revset matches can reorder revisions. "A or B" typically returns
2146 # Revset matches can reorder revisions. "A or B" typically returns
2146 # returns the revision matching A then the revision matching B. Sort
2147 # returns the revision matching A then the revision matching B. Sort
2147 # again to fix that.
2148 # again to fix that.
2148 revs = matcher(repo, revs)
2149 revs = matcher(repo, revs)
2149 if not opts.get('rev'):
2150 if not opts.get('rev'):
2150 revs.sort(reverse=True)
2151 revs.sort(reverse=True)
2151 if limit is not None:
2152 if limit is not None:
2152 limitedrevs = []
2153 limitedrevs = []
2153 for idx, r in enumerate(revs):
2154 for idx, r in enumerate(revs):
2154 if limit <= idx:
2155 if limit <= idx:
2155 break
2156 break
2156 limitedrevs.append(r)
2157 limitedrevs.append(r)
2157 revs = revset.baseset(limitedrevs)
2158 revs = revset.baseset(limitedrevs)
2158
2159
2159 return revs, expr, filematcher
2160 return revs, expr, filematcher
2160
2161
2161 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2162 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2162 filematcher=None):
2163 filematcher=None):
2163 seen, state = [], graphmod.asciistate()
2164 seen, state = [], graphmod.asciistate()
2164 for rev, type, ctx, parents in dag:
2165 for rev, type, ctx, parents in dag:
2165 char = 'o'
2166 char = 'o'
2166 if ctx.node() in showparents:
2167 if ctx.node() in showparents:
2167 char = '@'
2168 char = '@'
2168 elif ctx.obsolete():
2169 elif ctx.obsolete():
2169 char = 'x'
2170 char = 'x'
2170 elif ctx.closesbranch():
2171 elif ctx.closesbranch():
2171 char = '_'
2172 char = '_'
2172 copies = None
2173 copies = None
2173 if getrenamed and ctx.rev():
2174 if getrenamed and ctx.rev():
2174 copies = []
2175 copies = []
2175 for fn in ctx.files():
2176 for fn in ctx.files():
2176 rename = getrenamed(fn, ctx.rev())
2177 rename = getrenamed(fn, ctx.rev())
2177 if rename:
2178 if rename:
2178 copies.append((fn, rename[0]))
2179 copies.append((fn, rename[0]))
2179 revmatchfn = None
2180 revmatchfn = None
2180 if filematcher is not None:
2181 if filematcher is not None:
2181 revmatchfn = filematcher(ctx.rev())
2182 revmatchfn = filematcher(ctx.rev())
2182 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2183 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2183 lines = displayer.hunk.pop(rev).split('\n')
2184 lines = displayer.hunk.pop(rev).split('\n')
2184 if not lines[-1]:
2185 if not lines[-1]:
2185 del lines[-1]
2186 del lines[-1]
2186 displayer.flush(ctx)
2187 displayer.flush(ctx)
2187 edges = edgefn(type, char, lines, seen, rev, parents)
2188 edges = edgefn(type, char, lines, seen, rev, parents)
2188 for type, char, lines, coldata in edges:
2189 for type, char, lines, coldata in edges:
2189 graphmod.ascii(ui, state, type, char, lines, coldata)
2190 graphmod.ascii(ui, state, type, char, lines, coldata)
2190 displayer.close()
2191 displayer.close()
2191
2192
2192 def graphlog(ui, repo, *pats, **opts):
2193 def graphlog(ui, repo, *pats, **opts):
2193 # Parameters are identical to log command ones
2194 # Parameters are identical to log command ones
2194 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2195 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2195 revdag = graphmod.dagwalker(repo, revs)
2196 revdag = graphmod.dagwalker(repo, revs)
2196
2197
2197 getrenamed = None
2198 getrenamed = None
2198 if opts.get('copies'):
2199 if opts.get('copies'):
2199 endrev = None
2200 endrev = None
2200 if opts.get('rev'):
2201 if opts.get('rev'):
2201 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2202 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2202 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2203 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2203 displayer = show_changeset(ui, repo, opts, buffered=True)
2204 displayer = show_changeset(ui, repo, opts, buffered=True)
2204 showparents = [ctx.node() for ctx in repo[None].parents()]
2205 showparents = [ctx.node() for ctx in repo[None].parents()]
2205 displaygraph(ui, revdag, displayer, showparents,
2206 displaygraph(ui, revdag, displayer, showparents,
2206 graphmod.asciiedges, getrenamed, filematcher)
2207 graphmod.asciiedges, getrenamed, filematcher)
2207
2208
2208 def checkunsupportedgraphflags(pats, opts):
2209 def checkunsupportedgraphflags(pats, opts):
2209 for op in ["newest_first"]:
2210 for op in ["newest_first"]:
2210 if op in opts and opts[op]:
2211 if op in opts and opts[op]:
2211 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2212 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2212 % op.replace("_", "-"))
2213 % op.replace("_", "-"))
2213
2214
2214 def graphrevs(repo, nodes, opts):
2215 def graphrevs(repo, nodes, opts):
2215 limit = loglimit(opts)
2216 limit = loglimit(opts)
2216 nodes.reverse()
2217 nodes.reverse()
2217 if limit is not None:
2218 if limit is not None:
2218 nodes = nodes[:limit]
2219 nodes = nodes[:limit]
2219 return graphmod.nodes(repo, nodes)
2220 return graphmod.nodes(repo, nodes)
2220
2221
2221 def add(ui, repo, match, prefix, explicitonly, **opts):
2222 def add(ui, repo, match, prefix, explicitonly, **opts):
2222 join = lambda f: os.path.join(prefix, f)
2223 join = lambda f: os.path.join(prefix, f)
2223 bad = []
2224 bad = []
2224
2225
2225 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2226 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2226 names = []
2227 names = []
2227 wctx = repo[None]
2228 wctx = repo[None]
2228 cca = None
2229 cca = None
2229 abort, warn = scmutil.checkportabilityalert(ui)
2230 abort, warn = scmutil.checkportabilityalert(ui)
2230 if abort or warn:
2231 if abort or warn:
2231 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2232 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2232
2233
2233 badmatch = matchmod.badmatch(match, badfn)
2234 badmatch = matchmod.badmatch(match, badfn)
2234 dirstate = repo.dirstate
2235 dirstate = repo.dirstate
2235 # We don't want to just call wctx.walk here, since it would return a lot of
2236 # We don't want to just call wctx.walk here, since it would return a lot of
2236 # clean files, which we aren't interested in and takes time.
2237 # clean files, which we aren't interested in and takes time.
2237 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2238 for f in sorted(dirstate.walk(badmatch, sorted(wctx.substate),
2238 True, False, full=False)):
2239 True, False, full=False)):
2239 exact = match.exact(f)
2240 exact = match.exact(f)
2240 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2241 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2241 if cca:
2242 if cca:
2242 cca(f)
2243 cca(f)
2243 names.append(f)
2244 names.append(f)
2244 if ui.verbose or not exact:
2245 if ui.verbose or not exact:
2245 ui.status(_('adding %s\n') % match.rel(f))
2246 ui.status(_('adding %s\n') % match.rel(f))
2246
2247
2247 for subpath in sorted(wctx.substate):
2248 for subpath in sorted(wctx.substate):
2248 sub = wctx.sub(subpath)
2249 sub = wctx.sub(subpath)
2249 try:
2250 try:
2250 submatch = matchmod.narrowmatcher(subpath, match)
2251 submatch = matchmod.narrowmatcher(subpath, match)
2251 if opts.get('subrepos'):
2252 if opts.get('subrepos'):
2252 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2253 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2253 else:
2254 else:
2254 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2255 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2255 except error.LookupError:
2256 except error.LookupError:
2256 ui.status(_("skipping missing subrepository: %s\n")
2257 ui.status(_("skipping missing subrepository: %s\n")
2257 % join(subpath))
2258 % join(subpath))
2258
2259
2259 if not opts.get('dry_run'):
2260 if not opts.get('dry_run'):
2260 rejected = wctx.add(names, prefix)
2261 rejected = wctx.add(names, prefix)
2261 bad.extend(f for f in rejected if f in match.files())
2262 bad.extend(f for f in rejected if f in match.files())
2262 return bad
2263 return bad
2263
2264
2264 def forget(ui, repo, match, prefix, explicitonly):
2265 def forget(ui, repo, match, prefix, explicitonly):
2265 join = lambda f: os.path.join(prefix, f)
2266 join = lambda f: os.path.join(prefix, f)
2266 bad = []
2267 bad = []
2267 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2268 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2268 wctx = repo[None]
2269 wctx = repo[None]
2269 forgot = []
2270 forgot = []
2270
2271
2271 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2272 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2272 forget = sorted(s[0] + s[1] + s[3] + s[6])
2273 forget = sorted(s[0] + s[1] + s[3] + s[6])
2273 if explicitonly:
2274 if explicitonly:
2274 forget = [f for f in forget if match.exact(f)]
2275 forget = [f for f in forget if match.exact(f)]
2275
2276
2276 for subpath in sorted(wctx.substate):
2277 for subpath in sorted(wctx.substate):
2277 sub = wctx.sub(subpath)
2278 sub = wctx.sub(subpath)
2278 try:
2279 try:
2279 submatch = matchmod.narrowmatcher(subpath, match)
2280 submatch = matchmod.narrowmatcher(subpath, match)
2280 subbad, subforgot = sub.forget(submatch, prefix)
2281 subbad, subforgot = sub.forget(submatch, prefix)
2281 bad.extend([subpath + '/' + f for f in subbad])
2282 bad.extend([subpath + '/' + f for f in subbad])
2282 forgot.extend([subpath + '/' + f for f in subforgot])
2283 forgot.extend([subpath + '/' + f for f in subforgot])
2283 except error.LookupError:
2284 except error.LookupError:
2284 ui.status(_("skipping missing subrepository: %s\n")
2285 ui.status(_("skipping missing subrepository: %s\n")
2285 % join(subpath))
2286 % join(subpath))
2286
2287
2287 if not explicitonly:
2288 if not explicitonly:
2288 for f in match.files():
2289 for f in match.files():
2289 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2290 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2290 if f not in forgot:
2291 if f not in forgot:
2291 if repo.wvfs.exists(f):
2292 if repo.wvfs.exists(f):
2292 # Don't complain if the exact case match wasn't given.
2293 # Don't complain if the exact case match wasn't given.
2293 # But don't do this until after checking 'forgot', so
2294 # But don't do this until after checking 'forgot', so
2294 # that subrepo files aren't normalized, and this op is
2295 # that subrepo files aren't normalized, and this op is
2295 # purely from data cached by the status walk above.
2296 # purely from data cached by the status walk above.
2296 if repo.dirstate.normalize(f) in repo.dirstate:
2297 if repo.dirstate.normalize(f) in repo.dirstate:
2297 continue
2298 continue
2298 ui.warn(_('not removing %s: '
2299 ui.warn(_('not removing %s: '
2299 'file is already untracked\n')
2300 'file is already untracked\n')
2300 % match.rel(f))
2301 % match.rel(f))
2301 bad.append(f)
2302 bad.append(f)
2302
2303
2303 for f in forget:
2304 for f in forget:
2304 if ui.verbose or not match.exact(f):
2305 if ui.verbose or not match.exact(f):
2305 ui.status(_('removing %s\n') % match.rel(f))
2306 ui.status(_('removing %s\n') % match.rel(f))
2306
2307
2307 rejected = wctx.forget(forget, prefix)
2308 rejected = wctx.forget(forget, prefix)
2308 bad.extend(f for f in rejected if f in match.files())
2309 bad.extend(f for f in rejected if f in match.files())
2309 forgot.extend(f for f in forget if f not in rejected)
2310 forgot.extend(f for f in forget if f not in rejected)
2310 return bad, forgot
2311 return bad, forgot
2311
2312
2312 def files(ui, ctx, m, fm, fmt, subrepos):
2313 def files(ui, ctx, m, fm, fmt, subrepos):
2313 rev = ctx.rev()
2314 rev = ctx.rev()
2314 ret = 1
2315 ret = 1
2315 ds = ctx.repo().dirstate
2316 ds = ctx.repo().dirstate
2316
2317
2317 for f in ctx.matches(m):
2318 for f in ctx.matches(m):
2318 if rev is None and ds[f] == 'r':
2319 if rev is None and ds[f] == 'r':
2319 continue
2320 continue
2320 fm.startitem()
2321 fm.startitem()
2321 if ui.verbose:
2322 if ui.verbose:
2322 fc = ctx[f]
2323 fc = ctx[f]
2323 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2324 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2324 fm.data(abspath=f)
2325 fm.data(abspath=f)
2325 fm.write('path', fmt, m.rel(f))
2326 fm.write('path', fmt, m.rel(f))
2326 ret = 0
2327 ret = 0
2327
2328
2328 for subpath in sorted(ctx.substate):
2329 for subpath in sorted(ctx.substate):
2329 def matchessubrepo(subpath):
2330 def matchessubrepo(subpath):
2330 return (m.always() or m.exact(subpath)
2331 return (m.always() or m.exact(subpath)
2331 or any(f.startswith(subpath + '/') for f in m.files()))
2332 or any(f.startswith(subpath + '/') for f in m.files()))
2332
2333
2333 if subrepos or matchessubrepo(subpath):
2334 if subrepos or matchessubrepo(subpath):
2334 sub = ctx.sub(subpath)
2335 sub = ctx.sub(subpath)
2335 try:
2336 try:
2336 submatch = matchmod.narrowmatcher(subpath, m)
2337 submatch = matchmod.narrowmatcher(subpath, m)
2337 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2338 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2338 ret = 0
2339 ret = 0
2339 except error.LookupError:
2340 except error.LookupError:
2340 ui.status(_("skipping missing subrepository: %s\n")
2341 ui.status(_("skipping missing subrepository: %s\n")
2341 % m.abs(subpath))
2342 % m.abs(subpath))
2342
2343
2343 return ret
2344 return ret
2344
2345
2345 def remove(ui, repo, m, prefix, after, force, subrepos):
2346 def remove(ui, repo, m, prefix, after, force, subrepos):
2346 join = lambda f: os.path.join(prefix, f)
2347 join = lambda f: os.path.join(prefix, f)
2347 ret = 0
2348 ret = 0
2348 s = repo.status(match=m, clean=True)
2349 s = repo.status(match=m, clean=True)
2349 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2350 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2350
2351
2351 wctx = repo[None]
2352 wctx = repo[None]
2352
2353
2353 for subpath in sorted(wctx.substate):
2354 for subpath in sorted(wctx.substate):
2354 def matchessubrepo(matcher, subpath):
2355 def matchessubrepo(matcher, subpath):
2355 if matcher.exact(subpath):
2356 if matcher.exact(subpath):
2356 return True
2357 return True
2357 for f in matcher.files():
2358 for f in matcher.files():
2358 if f.startswith(subpath):
2359 if f.startswith(subpath):
2359 return True
2360 return True
2360 return False
2361 return False
2361
2362
2362 if subrepos or matchessubrepo(m, subpath):
2363 if subrepos or matchessubrepo(m, subpath):
2363 sub = wctx.sub(subpath)
2364 sub = wctx.sub(subpath)
2364 try:
2365 try:
2365 submatch = matchmod.narrowmatcher(subpath, m)
2366 submatch = matchmod.narrowmatcher(subpath, m)
2366 if sub.removefiles(submatch, prefix, after, force, subrepos):
2367 if sub.removefiles(submatch, prefix, after, force, subrepos):
2367 ret = 1
2368 ret = 1
2368 except error.LookupError:
2369 except error.LookupError:
2369 ui.status(_("skipping missing subrepository: %s\n")
2370 ui.status(_("skipping missing subrepository: %s\n")
2370 % join(subpath))
2371 % join(subpath))
2371
2372
2372 # warn about failure to delete explicit files/dirs
2373 # warn about failure to delete explicit files/dirs
2373 deleteddirs = util.dirs(deleted)
2374 deleteddirs = util.dirs(deleted)
2374 for f in m.files():
2375 for f in m.files():
2375 def insubrepo():
2376 def insubrepo():
2376 for subpath in wctx.substate:
2377 for subpath in wctx.substate:
2377 if f.startswith(subpath):
2378 if f.startswith(subpath):
2378 return True
2379 return True
2379 return False
2380 return False
2380
2381
2381 isdir = f in deleteddirs or wctx.hasdir(f)
2382 isdir = f in deleteddirs or wctx.hasdir(f)
2382 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2383 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2383 continue
2384 continue
2384
2385
2385 if repo.wvfs.exists(f):
2386 if repo.wvfs.exists(f):
2386 if repo.wvfs.isdir(f):
2387 if repo.wvfs.isdir(f):
2387 ui.warn(_('not removing %s: no tracked files\n')
2388 ui.warn(_('not removing %s: no tracked files\n')
2388 % m.rel(f))
2389 % m.rel(f))
2389 else:
2390 else:
2390 ui.warn(_('not removing %s: file is untracked\n')
2391 ui.warn(_('not removing %s: file is untracked\n')
2391 % m.rel(f))
2392 % m.rel(f))
2392 # missing files will generate a warning elsewhere
2393 # missing files will generate a warning elsewhere
2393 ret = 1
2394 ret = 1
2394
2395
2395 if force:
2396 if force:
2396 list = modified + deleted + clean + added
2397 list = modified + deleted + clean + added
2397 elif after:
2398 elif after:
2398 list = deleted
2399 list = deleted
2399 for f in modified + added + clean:
2400 for f in modified + added + clean:
2400 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2401 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2401 ret = 1
2402 ret = 1
2402 else:
2403 else:
2403 list = deleted + clean
2404 list = deleted + clean
2404 for f in modified:
2405 for f in modified:
2405 ui.warn(_('not removing %s: file is modified (use -f'
2406 ui.warn(_('not removing %s: file is modified (use -f'
2406 ' to force removal)\n') % m.rel(f))
2407 ' to force removal)\n') % m.rel(f))
2407 ret = 1
2408 ret = 1
2408 for f in added:
2409 for f in added:
2409 ui.warn(_('not removing %s: file has been marked for add'
2410 ui.warn(_('not removing %s: file has been marked for add'
2410 ' (use forget to undo)\n') % m.rel(f))
2411 ' (use forget to undo)\n') % m.rel(f))
2411 ret = 1
2412 ret = 1
2412
2413
2413 for f in sorted(list):
2414 for f in sorted(list):
2414 if ui.verbose or not m.exact(f):
2415 if ui.verbose or not m.exact(f):
2415 ui.status(_('removing %s\n') % m.rel(f))
2416 ui.status(_('removing %s\n') % m.rel(f))
2416
2417
2417 wlock = repo.wlock()
2418 wlock = repo.wlock()
2418 try:
2419 try:
2419 if not after:
2420 if not after:
2420 for f in list:
2421 for f in list:
2421 if f in added:
2422 if f in added:
2422 continue # we never unlink added files on remove
2423 continue # we never unlink added files on remove
2423 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2424 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2424 repo[None].forget(list)
2425 repo[None].forget(list)
2425 finally:
2426 finally:
2426 wlock.release()
2427 wlock.release()
2427
2428
2428 return ret
2429 return ret
2429
2430
2430 def cat(ui, repo, ctx, matcher, prefix, **opts):
2431 def cat(ui, repo, ctx, matcher, prefix, **opts):
2431 err = 1
2432 err = 1
2432
2433
2433 def write(path):
2434 def write(path):
2434 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2435 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2435 pathname=os.path.join(prefix, path))
2436 pathname=os.path.join(prefix, path))
2436 data = ctx[path].data()
2437 data = ctx[path].data()
2437 if opts.get('decode'):
2438 if opts.get('decode'):
2438 data = repo.wwritedata(path, data)
2439 data = repo.wwritedata(path, data)
2439 fp.write(data)
2440 fp.write(data)
2440 fp.close()
2441 fp.close()
2441
2442
2442 # Automation often uses hg cat on single files, so special case it
2443 # Automation often uses hg cat on single files, so special case it
2443 # for performance to avoid the cost of parsing the manifest.
2444 # for performance to avoid the cost of parsing the manifest.
2444 if len(matcher.files()) == 1 and not matcher.anypats():
2445 if len(matcher.files()) == 1 and not matcher.anypats():
2445 file = matcher.files()[0]
2446 file = matcher.files()[0]
2446 mf = repo.manifest
2447 mf = repo.manifest
2447 mfnode = ctx.manifestnode()
2448 mfnode = ctx.manifestnode()
2448 if mfnode and mf.find(mfnode, file)[0]:
2449 if mfnode and mf.find(mfnode, file)[0]:
2449 write(file)
2450 write(file)
2450 return 0
2451 return 0
2451
2452
2452 # Don't warn about "missing" files that are really in subrepos
2453 # Don't warn about "missing" files that are really in subrepos
2453 def badfn(path, msg):
2454 def badfn(path, msg):
2454 for subpath in ctx.substate:
2455 for subpath in ctx.substate:
2455 if path.startswith(subpath):
2456 if path.startswith(subpath):
2456 return
2457 return
2457 matcher.bad(path, msg)
2458 matcher.bad(path, msg)
2458
2459
2459 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2460 for abs in ctx.walk(matchmod.badmatch(matcher, badfn)):
2460 write(abs)
2461 write(abs)
2461 err = 0
2462 err = 0
2462
2463
2463 for subpath in sorted(ctx.substate):
2464 for subpath in sorted(ctx.substate):
2464 sub = ctx.sub(subpath)
2465 sub = ctx.sub(subpath)
2465 try:
2466 try:
2466 submatch = matchmod.narrowmatcher(subpath, matcher)
2467 submatch = matchmod.narrowmatcher(subpath, matcher)
2467
2468
2468 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2469 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2469 **opts):
2470 **opts):
2470 err = 0
2471 err = 0
2471 except error.RepoLookupError:
2472 except error.RepoLookupError:
2472 ui.status(_("skipping missing subrepository: %s\n")
2473 ui.status(_("skipping missing subrepository: %s\n")
2473 % os.path.join(prefix, subpath))
2474 % os.path.join(prefix, subpath))
2474
2475
2475 return err
2476 return err
2476
2477
2477 def commit(ui, repo, commitfunc, pats, opts):
2478 def commit(ui, repo, commitfunc, pats, opts):
2478 '''commit the specified files or all outstanding changes'''
2479 '''commit the specified files or all outstanding changes'''
2479 date = opts.get('date')
2480 date = opts.get('date')
2480 if date:
2481 if date:
2481 opts['date'] = util.parsedate(date)
2482 opts['date'] = util.parsedate(date)
2482 message = logmessage(ui, opts)
2483 message = logmessage(ui, opts)
2483 matcher = scmutil.match(repo[None], pats, opts)
2484 matcher = scmutil.match(repo[None], pats, opts)
2484
2485
2485 # extract addremove carefully -- this function can be called from a command
2486 # extract addremove carefully -- this function can be called from a command
2486 # that doesn't support addremove
2487 # that doesn't support addremove
2487 if opts.get('addremove'):
2488 if opts.get('addremove'):
2488 if scmutil.addremove(repo, matcher, "", opts) != 0:
2489 if scmutil.addremove(repo, matcher, "", opts) != 0:
2489 raise error.Abort(
2490 raise error.Abort(
2490 _("failed to mark all new/missing files as added/removed"))
2491 _("failed to mark all new/missing files as added/removed"))
2491
2492
2492 return commitfunc(ui, repo, message, matcher, opts)
2493 return commitfunc(ui, repo, message, matcher, opts)
2493
2494
2494 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2495 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2495 # avoid cycle context -> subrepo -> cmdutil
2496 # avoid cycle context -> subrepo -> cmdutil
2496 import context
2497 import context
2497
2498
2498 # amend will reuse the existing user if not specified, but the obsolete
2499 # amend will reuse the existing user if not specified, but the obsolete
2499 # marker creation requires that the current user's name is specified.
2500 # marker creation requires that the current user's name is specified.
2500 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2501 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2501 ui.username() # raise exception if username not set
2502 ui.username() # raise exception if username not set
2502
2503
2503 ui.note(_('amending changeset %s\n') % old)
2504 ui.note(_('amending changeset %s\n') % old)
2504 base = old.p1()
2505 base = old.p1()
2505 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2506 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2506
2507
2507 wlock = lock = newid = None
2508 wlock = lock = newid = None
2508 try:
2509 try:
2509 wlock = repo.wlock()
2510 wlock = repo.wlock()
2510 lock = repo.lock()
2511 lock = repo.lock()
2511 tr = repo.transaction('amend')
2512 tr = repo.transaction('amend')
2512 try:
2513 try:
2513 # See if we got a message from -m or -l, if not, open the editor
2514 # See if we got a message from -m or -l, if not, open the editor
2514 # with the message of the changeset to amend
2515 # with the message of the changeset to amend
2515 message = logmessage(ui, opts)
2516 message = logmessage(ui, opts)
2516 # ensure logfile does not conflict with later enforcement of the
2517 # ensure logfile does not conflict with later enforcement of the
2517 # message. potential logfile content has been processed by
2518 # message. potential logfile content has been processed by
2518 # `logmessage` anyway.
2519 # `logmessage` anyway.
2519 opts.pop('logfile')
2520 opts.pop('logfile')
2520 # First, do a regular commit to record all changes in the working
2521 # First, do a regular commit to record all changes in the working
2521 # directory (if there are any)
2522 # directory (if there are any)
2522 ui.callhooks = False
2523 ui.callhooks = False
2523 activebookmark = repo._activebookmark
2524 activebookmark = repo._activebookmark
2524 try:
2525 try:
2525 repo._activebookmark = None
2526 repo._activebookmark = None
2526 opts['message'] = 'temporary amend commit for %s' % old
2527 opts['message'] = 'temporary amend commit for %s' % old
2527 node = commit(ui, repo, commitfunc, pats, opts)
2528 node = commit(ui, repo, commitfunc, pats, opts)
2528 finally:
2529 finally:
2529 repo._activebookmark = activebookmark
2530 repo._activebookmark = activebookmark
2530 ui.callhooks = True
2531 ui.callhooks = True
2531 ctx = repo[node]
2532 ctx = repo[node]
2532
2533
2533 # Participating changesets:
2534 # Participating changesets:
2534 #
2535 #
2535 # node/ctx o - new (intermediate) commit that contains changes
2536 # node/ctx o - new (intermediate) commit that contains changes
2536 # | from working dir to go into amending commit
2537 # | from working dir to go into amending commit
2537 # | (or a workingctx if there were no changes)
2538 # | (or a workingctx if there were no changes)
2538 # |
2539 # |
2539 # old o - changeset to amend
2540 # old o - changeset to amend
2540 # |
2541 # |
2541 # base o - parent of amending changeset
2542 # base o - parent of amending changeset
2542
2543
2543 # Update extra dict from amended commit (e.g. to preserve graft
2544 # Update extra dict from amended commit (e.g. to preserve graft
2544 # source)
2545 # source)
2545 extra.update(old.extra())
2546 extra.update(old.extra())
2546
2547
2547 # Also update it from the intermediate commit or from the wctx
2548 # Also update it from the intermediate commit or from the wctx
2548 extra.update(ctx.extra())
2549 extra.update(ctx.extra())
2549
2550
2550 if len(old.parents()) > 1:
2551 if len(old.parents()) > 1:
2551 # ctx.files() isn't reliable for merges, so fall back to the
2552 # ctx.files() isn't reliable for merges, so fall back to the
2552 # slower repo.status() method
2553 # slower repo.status() method
2553 files = set([fn for st in repo.status(base, old)[:3]
2554 files = set([fn for st in repo.status(base, old)[:3]
2554 for fn in st])
2555 for fn in st])
2555 else:
2556 else:
2556 files = set(old.files())
2557 files = set(old.files())
2557
2558
2558 # Second, we use either the commit we just did, or if there were no
2559 # Second, we use either the commit we just did, or if there were no
2559 # changes the parent of the working directory as the version of the
2560 # changes the parent of the working directory as the version of the
2560 # files in the final amend commit
2561 # files in the final amend commit
2561 if node:
2562 if node:
2562 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2563 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2563
2564
2564 user = ctx.user()
2565 user = ctx.user()
2565 date = ctx.date()
2566 date = ctx.date()
2566 # Recompute copies (avoid recording a -> b -> a)
2567 # Recompute copies (avoid recording a -> b -> a)
2567 copied = copies.pathcopies(base, ctx)
2568 copied = copies.pathcopies(base, ctx)
2568 if old.p2:
2569 if old.p2:
2569 copied.update(copies.pathcopies(old.p2(), ctx))
2570 copied.update(copies.pathcopies(old.p2(), ctx))
2570
2571
2571 # Prune files which were reverted by the updates: if old
2572 # Prune files which were reverted by the updates: if old
2572 # introduced file X and our intermediate commit, node,
2573 # introduced file X and our intermediate commit, node,
2573 # renamed that file, then those two files are the same and
2574 # renamed that file, then those two files are the same and
2574 # we can discard X from our list of files. Likewise if X
2575 # we can discard X from our list of files. Likewise if X
2575 # was deleted, it's no longer relevant
2576 # was deleted, it's no longer relevant
2576 files.update(ctx.files())
2577 files.update(ctx.files())
2577
2578
2578 def samefile(f):
2579 def samefile(f):
2579 if f in ctx.manifest():
2580 if f in ctx.manifest():
2580 a = ctx.filectx(f)
2581 a = ctx.filectx(f)
2581 if f in base.manifest():
2582 if f in base.manifest():
2582 b = base.filectx(f)
2583 b = base.filectx(f)
2583 return (not a.cmp(b)
2584 return (not a.cmp(b)
2584 and a.flags() == b.flags())
2585 and a.flags() == b.flags())
2585 else:
2586 else:
2586 return False
2587 return False
2587 else:
2588 else:
2588 return f not in base.manifest()
2589 return f not in base.manifest()
2589 files = [f for f in files if not samefile(f)]
2590 files = [f for f in files if not samefile(f)]
2590
2591
2591 def filectxfn(repo, ctx_, path):
2592 def filectxfn(repo, ctx_, path):
2592 try:
2593 try:
2593 fctx = ctx[path]
2594 fctx = ctx[path]
2594 flags = fctx.flags()
2595 flags = fctx.flags()
2595 mctx = context.memfilectx(repo,
2596 mctx = context.memfilectx(repo,
2596 fctx.path(), fctx.data(),
2597 fctx.path(), fctx.data(),
2597 islink='l' in flags,
2598 islink='l' in flags,
2598 isexec='x' in flags,
2599 isexec='x' in flags,
2599 copied=copied.get(path))
2600 copied=copied.get(path))
2600 return mctx
2601 return mctx
2601 except KeyError:
2602 except KeyError:
2602 return None
2603 return None
2603 else:
2604 else:
2604 ui.note(_('copying changeset %s to %s\n') % (old, base))
2605 ui.note(_('copying changeset %s to %s\n') % (old, base))
2605
2606
2606 # Use version of files as in the old cset
2607 # Use version of files as in the old cset
2607 def filectxfn(repo, ctx_, path):
2608 def filectxfn(repo, ctx_, path):
2608 try:
2609 try:
2609 return old.filectx(path)
2610 return old.filectx(path)
2610 except KeyError:
2611 except KeyError:
2611 return None
2612 return None
2612
2613
2613 user = opts.get('user') or old.user()
2614 user = opts.get('user') or old.user()
2614 date = opts.get('date') or old.date()
2615 date = opts.get('date') or old.date()
2615 editform = mergeeditform(old, 'commit.amend')
2616 editform = mergeeditform(old, 'commit.amend')
2616 editor = getcommiteditor(editform=editform, **opts)
2617 editor = getcommiteditor(editform=editform, **opts)
2617 if not message:
2618 if not message:
2618 editor = getcommiteditor(edit=True, editform=editform)
2619 editor = getcommiteditor(edit=True, editform=editform)
2619 message = old.description()
2620 message = old.description()
2620
2621
2621 pureextra = extra.copy()
2622 pureextra = extra.copy()
2622 if 'amend_source' in pureextra:
2623 if 'amend_source' in pureextra:
2623 del pureextra['amend_source']
2624 del pureextra['amend_source']
2624 pureoldextra = old.extra()
2625 pureoldextra = old.extra()
2625 if 'amend_source' in pureoldextra:
2626 if 'amend_source' in pureoldextra:
2626 del pureoldextra['amend_source']
2627 del pureoldextra['amend_source']
2627 extra['amend_source'] = old.hex()
2628 extra['amend_source'] = old.hex()
2628
2629
2629 new = context.memctx(repo,
2630 new = context.memctx(repo,
2630 parents=[base.node(), old.p2().node()],
2631 parents=[base.node(), old.p2().node()],
2631 text=message,
2632 text=message,
2632 files=files,
2633 files=files,
2633 filectxfn=filectxfn,
2634 filectxfn=filectxfn,
2634 user=user,
2635 user=user,
2635 date=date,
2636 date=date,
2636 extra=extra,
2637 extra=extra,
2637 editor=editor)
2638 editor=editor)
2638
2639
2639 newdesc = changelog.stripdesc(new.description())
2640 newdesc = changelog.stripdesc(new.description())
2640 if ((not node)
2641 if ((not node)
2641 and newdesc == old.description()
2642 and newdesc == old.description()
2642 and user == old.user()
2643 and user == old.user()
2643 and date == old.date()
2644 and date == old.date()
2644 and pureextra == pureoldextra):
2645 and pureextra == pureoldextra):
2645 # nothing changed. continuing here would create a new node
2646 # nothing changed. continuing here would create a new node
2646 # anyway because of the amend_source noise.
2647 # anyway because of the amend_source noise.
2647 #
2648 #
2648 # This not what we expect from amend.
2649 # This not what we expect from amend.
2649 return old.node()
2650 return old.node()
2650
2651
2651 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2652 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2652 try:
2653 try:
2653 if opts.get('secret'):
2654 if opts.get('secret'):
2654 commitphase = 'secret'
2655 commitphase = 'secret'
2655 else:
2656 else:
2656 commitphase = old.phase()
2657 commitphase = old.phase()
2657 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2658 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2658 newid = repo.commitctx(new)
2659 newid = repo.commitctx(new)
2659 finally:
2660 finally:
2660 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2661 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2661 if newid != old.node():
2662 if newid != old.node():
2662 # Reroute the working copy parent to the new changeset
2663 # Reroute the working copy parent to the new changeset
2663 repo.setparents(newid, nullid)
2664 repo.setparents(newid, nullid)
2664
2665
2665 # Move bookmarks from old parent to amend commit
2666 # Move bookmarks from old parent to amend commit
2666 bms = repo.nodebookmarks(old.node())
2667 bms = repo.nodebookmarks(old.node())
2667 if bms:
2668 if bms:
2668 marks = repo._bookmarks
2669 marks = repo._bookmarks
2669 for bm in bms:
2670 for bm in bms:
2670 ui.debug('moving bookmarks %r from %s to %s\n' %
2671 ui.debug('moving bookmarks %r from %s to %s\n' %
2671 (marks, old.hex(), hex(newid)))
2672 (marks, old.hex(), hex(newid)))
2672 marks[bm] = newid
2673 marks[bm] = newid
2673 marks.recordchange(tr)
2674 marks.recordchange(tr)
2674 #commit the whole amend process
2675 #commit the whole amend process
2675 if createmarkers:
2676 if createmarkers:
2676 # mark the new changeset as successor of the rewritten one
2677 # mark the new changeset as successor of the rewritten one
2677 new = repo[newid]
2678 new = repo[newid]
2678 obs = [(old, (new,))]
2679 obs = [(old, (new,))]
2679 if node:
2680 if node:
2680 obs.append((ctx, ()))
2681 obs.append((ctx, ()))
2681
2682
2682 obsolete.createmarkers(repo, obs)
2683 obsolete.createmarkers(repo, obs)
2683 tr.close()
2684 tr.close()
2684 finally:
2685 finally:
2685 tr.release()
2686 tr.release()
2686 if not createmarkers and newid != old.node():
2687 if not createmarkers and newid != old.node():
2687 # Strip the intermediate commit (if there was one) and the amended
2688 # Strip the intermediate commit (if there was one) and the amended
2688 # commit
2689 # commit
2689 if node:
2690 if node:
2690 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2691 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2691 ui.note(_('stripping amended changeset %s\n') % old)
2692 ui.note(_('stripping amended changeset %s\n') % old)
2692 repair.strip(ui, repo, old.node(), topic='amend-backup')
2693 repair.strip(ui, repo, old.node(), topic='amend-backup')
2693 finally:
2694 finally:
2694 lockmod.release(lock, wlock)
2695 lockmod.release(lock, wlock)
2695 return newid
2696 return newid
2696
2697
2697 def commiteditor(repo, ctx, subs, editform=''):
2698 def commiteditor(repo, ctx, subs, editform=''):
2698 if ctx.description():
2699 if ctx.description():
2699 return ctx.description()
2700 return ctx.description()
2700 return commitforceeditor(repo, ctx, subs, editform=editform,
2701 return commitforceeditor(repo, ctx, subs, editform=editform,
2701 unchangedmessagedetection=True)
2702 unchangedmessagedetection=True)
2702
2703
2703 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2704 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2704 editform='', unchangedmessagedetection=False):
2705 editform='', unchangedmessagedetection=False):
2705 if not extramsg:
2706 if not extramsg:
2706 extramsg = _("Leave message empty to abort commit.")
2707 extramsg = _("Leave message empty to abort commit.")
2707
2708
2708 forms = [e for e in editform.split('.') if e]
2709 forms = [e for e in editform.split('.') if e]
2709 forms.insert(0, 'changeset')
2710 forms.insert(0, 'changeset')
2710 templatetext = None
2711 templatetext = None
2711 while forms:
2712 while forms:
2712 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2713 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2713 if tmpl:
2714 if tmpl:
2714 templatetext = committext = buildcommittemplate(
2715 templatetext = committext = buildcommittemplate(
2715 repo, ctx, subs, extramsg, tmpl)
2716 repo, ctx, subs, extramsg, tmpl)
2716 break
2717 break
2717 forms.pop()
2718 forms.pop()
2718 else:
2719 else:
2719 committext = buildcommittext(repo, ctx, subs, extramsg)
2720 committext = buildcommittext(repo, ctx, subs, extramsg)
2720
2721
2721 # run editor in the repository root
2722 # run editor in the repository root
2722 olddir = os.getcwd()
2723 olddir = os.getcwd()
2723 os.chdir(repo.root)
2724 os.chdir(repo.root)
2724
2725
2725 # make in-memory changes visible to external process
2726 # make in-memory changes visible to external process
2726 tr = repo.currenttransaction()
2727 tr = repo.currenttransaction()
2727 repo.dirstate.write(tr)
2728 repo.dirstate.write(tr)
2728 pending = tr and tr.writepending() and repo.root
2729 pending = tr and tr.writepending() and repo.root
2729
2730
2730 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2731 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2731 editform=editform, pending=pending)
2732 editform=editform, pending=pending)
2732 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2733 text = re.sub("(?m)^HG:.*(\n|$)", "", editortext)
2733 os.chdir(olddir)
2734 os.chdir(olddir)
2734
2735
2735 if finishdesc:
2736 if finishdesc:
2736 text = finishdesc(text)
2737 text = finishdesc(text)
2737 if not text.strip():
2738 if not text.strip():
2738 raise error.Abort(_("empty commit message"))
2739 raise error.Abort(_("empty commit message"))
2739 if unchangedmessagedetection and editortext == templatetext:
2740 if unchangedmessagedetection and editortext == templatetext:
2740 raise error.Abort(_("commit message unchanged"))
2741 raise error.Abort(_("commit message unchanged"))
2741
2742
2742 return text
2743 return text
2743
2744
2744 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2745 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2745 ui = repo.ui
2746 ui = repo.ui
2746 tmpl, mapfile = gettemplate(ui, tmpl, None)
2747 tmpl, mapfile = gettemplate(ui, tmpl, None)
2747
2748
2748 try:
2749 try:
2749 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2750 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2750 except SyntaxError as inst:
2751 except SyntaxError as inst:
2751 raise error.Abort(inst.args[0])
2752 raise error.Abort(inst.args[0])
2752
2753
2753 for k, v in repo.ui.configitems('committemplate'):
2754 for k, v in repo.ui.configitems('committemplate'):
2754 if k != 'changeset':
2755 if k != 'changeset':
2755 t.t.cache[k] = v
2756 t.t.cache[k] = v
2756
2757
2757 if not extramsg:
2758 if not extramsg:
2758 extramsg = '' # ensure that extramsg is string
2759 extramsg = '' # ensure that extramsg is string
2759
2760
2760 ui.pushbuffer()
2761 ui.pushbuffer()
2761 t.show(ctx, extramsg=extramsg)
2762 t.show(ctx, extramsg=extramsg)
2762 return ui.popbuffer()
2763 return ui.popbuffer()
2763
2764
2764 def hgprefix(msg):
2765 def hgprefix(msg):
2765 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2766 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2766
2767
2767 def buildcommittext(repo, ctx, subs, extramsg):
2768 def buildcommittext(repo, ctx, subs, extramsg):
2768 edittext = []
2769 edittext = []
2769 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2770 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2770 if ctx.description():
2771 if ctx.description():
2771 edittext.append(ctx.description())
2772 edittext.append(ctx.description())
2772 edittext.append("")
2773 edittext.append("")
2773 edittext.append("") # Empty line between message and comments.
2774 edittext.append("") # Empty line between message and comments.
2774 edittext.append(hgprefix(_("Enter commit message."
2775 edittext.append(hgprefix(_("Enter commit message."
2775 " Lines beginning with 'HG:' are removed.")))
2776 " Lines beginning with 'HG:' are removed.")))
2776 edittext.append(hgprefix(extramsg))
2777 edittext.append(hgprefix(extramsg))
2777 edittext.append("HG: --")
2778 edittext.append("HG: --")
2778 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2779 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2779 if ctx.p2():
2780 if ctx.p2():
2780 edittext.append(hgprefix(_("branch merge")))
2781 edittext.append(hgprefix(_("branch merge")))
2781 if ctx.branch():
2782 if ctx.branch():
2782 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2783 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2783 if bookmarks.isactivewdirparent(repo):
2784 if bookmarks.isactivewdirparent(repo):
2784 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2785 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2785 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2786 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2786 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2787 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2787 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2788 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2788 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2789 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2789 if not added and not modified and not removed:
2790 if not added and not modified and not removed:
2790 edittext.append(hgprefix(_("no files changed")))
2791 edittext.append(hgprefix(_("no files changed")))
2791 edittext.append("")
2792 edittext.append("")
2792
2793
2793 return "\n".join(edittext)
2794 return "\n".join(edittext)
2794
2795
2795 def commitstatus(repo, node, branch, bheads=None, opts=None):
2796 def commitstatus(repo, node, branch, bheads=None, opts=None):
2796 if opts is None:
2797 if opts is None:
2797 opts = {}
2798 opts = {}
2798 ctx = repo[node]
2799 ctx = repo[node]
2799 parents = ctx.parents()
2800 parents = ctx.parents()
2800
2801
2801 if (not opts.get('amend') and bheads and node not in bheads and not
2802 if (not opts.get('amend') and bheads and node not in bheads and not
2802 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2803 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2803 repo.ui.status(_('created new head\n'))
2804 repo.ui.status(_('created new head\n'))
2804 # The message is not printed for initial roots. For the other
2805 # The message is not printed for initial roots. For the other
2805 # changesets, it is printed in the following situations:
2806 # changesets, it is printed in the following situations:
2806 #
2807 #
2807 # Par column: for the 2 parents with ...
2808 # Par column: for the 2 parents with ...
2808 # N: null or no parent
2809 # N: null or no parent
2809 # B: parent is on another named branch
2810 # B: parent is on another named branch
2810 # C: parent is a regular non head changeset
2811 # C: parent is a regular non head changeset
2811 # H: parent was a branch head of the current branch
2812 # H: parent was a branch head of the current branch
2812 # Msg column: whether we print "created new head" message
2813 # Msg column: whether we print "created new head" message
2813 # In the following, it is assumed that there already exists some
2814 # In the following, it is assumed that there already exists some
2814 # initial branch heads of the current branch, otherwise nothing is
2815 # initial branch heads of the current branch, otherwise nothing is
2815 # printed anyway.
2816 # printed anyway.
2816 #
2817 #
2817 # Par Msg Comment
2818 # Par Msg Comment
2818 # N N y additional topo root
2819 # N N y additional topo root
2819 #
2820 #
2820 # B N y additional branch root
2821 # B N y additional branch root
2821 # C N y additional topo head
2822 # C N y additional topo head
2822 # H N n usual case
2823 # H N n usual case
2823 #
2824 #
2824 # B B y weird additional branch root
2825 # B B y weird additional branch root
2825 # C B y branch merge
2826 # C B y branch merge
2826 # H B n merge with named branch
2827 # H B n merge with named branch
2827 #
2828 #
2828 # C C y additional head from merge
2829 # C C y additional head from merge
2829 # C H n merge with a head
2830 # C H n merge with a head
2830 #
2831 #
2831 # H H n head merge: head count decreases
2832 # H H n head merge: head count decreases
2832
2833
2833 if not opts.get('close_branch'):
2834 if not opts.get('close_branch'):
2834 for r in parents:
2835 for r in parents:
2835 if r.closesbranch() and r.branch() == branch:
2836 if r.closesbranch() and r.branch() == branch:
2836 repo.ui.status(_('reopening closed branch head %d\n') % r)
2837 repo.ui.status(_('reopening closed branch head %d\n') % r)
2837
2838
2838 if repo.ui.debugflag:
2839 if repo.ui.debugflag:
2839 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2840 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2840 elif repo.ui.verbose:
2841 elif repo.ui.verbose:
2841 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2842 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2842
2843
2843 def revert(ui, repo, ctx, parents, *pats, **opts):
2844 def revert(ui, repo, ctx, parents, *pats, **opts):
2844 parent, p2 = parents
2845 parent, p2 = parents
2845 node = ctx.node()
2846 node = ctx.node()
2846
2847
2847 mf = ctx.manifest()
2848 mf = ctx.manifest()
2848 if node == p2:
2849 if node == p2:
2849 parent = p2
2850 parent = p2
2850 if node == parent:
2851 if node == parent:
2851 pmf = mf
2852 pmf = mf
2852 else:
2853 else:
2853 pmf = None
2854 pmf = None
2854
2855
2855 # need all matching names in dirstate and manifest of target rev,
2856 # need all matching names in dirstate and manifest of target rev,
2856 # so have to walk both. do not print errors if files exist in one
2857 # so have to walk both. do not print errors if files exist in one
2857 # but not other. in both cases, filesets should be evaluated against
2858 # but not other. in both cases, filesets should be evaluated against
2858 # workingctx to get consistent result (issue4497). this means 'set:**'
2859 # workingctx to get consistent result (issue4497). this means 'set:**'
2859 # cannot be used to select missing files from target rev.
2860 # cannot be used to select missing files from target rev.
2860
2861
2861 # `names` is a mapping for all elements in working copy and target revision
2862 # `names` is a mapping for all elements in working copy and target revision
2862 # The mapping is in the form:
2863 # The mapping is in the form:
2863 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2864 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2864 names = {}
2865 names = {}
2865
2866
2866 wlock = repo.wlock()
2867 wlock = repo.wlock()
2867 try:
2868 try:
2868 ## filling of the `names` mapping
2869 ## filling of the `names` mapping
2869 # walk dirstate to fill `names`
2870 # walk dirstate to fill `names`
2870
2871
2871 interactive = opts.get('interactive', False)
2872 interactive = opts.get('interactive', False)
2872 wctx = repo[None]
2873 wctx = repo[None]
2873 m = scmutil.match(wctx, pats, opts)
2874 m = scmutil.match(wctx, pats, opts)
2874
2875
2875 # we'll need this later
2876 # we'll need this later
2876 targetsubs = sorted(s for s in wctx.substate if m(s))
2877 targetsubs = sorted(s for s in wctx.substate if m(s))
2877
2878
2878 if not m.always():
2879 if not m.always():
2879 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2880 for abs in repo.walk(matchmod.badmatch(m, lambda x, y: False)):
2880 names[abs] = m.rel(abs), m.exact(abs)
2881 names[abs] = m.rel(abs), m.exact(abs)
2881
2882
2882 # walk target manifest to fill `names`
2883 # walk target manifest to fill `names`
2883
2884
2884 def badfn(path, msg):
2885 def badfn(path, msg):
2885 if path in names:
2886 if path in names:
2886 return
2887 return
2887 if path in ctx.substate:
2888 if path in ctx.substate:
2888 return
2889 return
2889 path_ = path + '/'
2890 path_ = path + '/'
2890 for f in names:
2891 for f in names:
2891 if f.startswith(path_):
2892 if f.startswith(path_):
2892 return
2893 return
2893 ui.warn("%s: %s\n" % (m.rel(path), msg))
2894 ui.warn("%s: %s\n" % (m.rel(path), msg))
2894
2895
2895 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2896 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2896 if abs not in names:
2897 if abs not in names:
2897 names[abs] = m.rel(abs), m.exact(abs)
2898 names[abs] = m.rel(abs), m.exact(abs)
2898
2899
2899 # Find status of all file in `names`.
2900 # Find status of all file in `names`.
2900 m = scmutil.matchfiles(repo, names)
2901 m = scmutil.matchfiles(repo, names)
2901
2902
2902 changes = repo.status(node1=node, match=m,
2903 changes = repo.status(node1=node, match=m,
2903 unknown=True, ignored=True, clean=True)
2904 unknown=True, ignored=True, clean=True)
2904 else:
2905 else:
2905 changes = repo.status(node1=node, match=m)
2906 changes = repo.status(node1=node, match=m)
2906 for kind in changes:
2907 for kind in changes:
2907 for abs in kind:
2908 for abs in kind:
2908 names[abs] = m.rel(abs), m.exact(abs)
2909 names[abs] = m.rel(abs), m.exact(abs)
2909
2910
2910 m = scmutil.matchfiles(repo, names)
2911 m = scmutil.matchfiles(repo, names)
2911
2912
2912 modified = set(changes.modified)
2913 modified = set(changes.modified)
2913 added = set(changes.added)
2914 added = set(changes.added)
2914 removed = set(changes.removed)
2915 removed = set(changes.removed)
2915 _deleted = set(changes.deleted)
2916 _deleted = set(changes.deleted)
2916 unknown = set(changes.unknown)
2917 unknown = set(changes.unknown)
2917 unknown.update(changes.ignored)
2918 unknown.update(changes.ignored)
2918 clean = set(changes.clean)
2919 clean = set(changes.clean)
2919 modadded = set()
2920 modadded = set()
2920
2921
2921 # split between files known in target manifest and the others
2922 # split between files known in target manifest and the others
2922 smf = set(mf)
2923 smf = set(mf)
2923
2924
2924 # determine the exact nature of the deleted changesets
2925 # determine the exact nature of the deleted changesets
2925 deladded = _deleted - smf
2926 deladded = _deleted - smf
2926 deleted = _deleted - deladded
2927 deleted = _deleted - deladded
2927
2928
2928 # We need to account for the state of the file in the dirstate,
2929 # We need to account for the state of the file in the dirstate,
2929 # even when we revert against something else than parent. This will
2930 # even when we revert against something else than parent. This will
2930 # slightly alter the behavior of revert (doing back up or not, delete
2931 # slightly alter the behavior of revert (doing back up or not, delete
2931 # or just forget etc).
2932 # or just forget etc).
2932 if parent == node:
2933 if parent == node:
2933 dsmodified = modified
2934 dsmodified = modified
2934 dsadded = added
2935 dsadded = added
2935 dsremoved = removed
2936 dsremoved = removed
2936 # store all local modifications, useful later for rename detection
2937 # store all local modifications, useful later for rename detection
2937 localchanges = dsmodified | dsadded
2938 localchanges = dsmodified | dsadded
2938 modified, added, removed = set(), set(), set()
2939 modified, added, removed = set(), set(), set()
2939 else:
2940 else:
2940 changes = repo.status(node1=parent, match=m)
2941 changes = repo.status(node1=parent, match=m)
2941 dsmodified = set(changes.modified)
2942 dsmodified = set(changes.modified)
2942 dsadded = set(changes.added)
2943 dsadded = set(changes.added)
2943 dsremoved = set(changes.removed)
2944 dsremoved = set(changes.removed)
2944 # store all local modifications, useful later for rename detection
2945 # store all local modifications, useful later for rename detection
2945 localchanges = dsmodified | dsadded
2946 localchanges = dsmodified | dsadded
2946
2947
2947 # only take into account for removes between wc and target
2948 # only take into account for removes between wc and target
2948 clean |= dsremoved - removed
2949 clean |= dsremoved - removed
2949 dsremoved &= removed
2950 dsremoved &= removed
2950 # distinct between dirstate remove and other
2951 # distinct between dirstate remove and other
2951 removed -= dsremoved
2952 removed -= dsremoved
2952
2953
2953 modadded = added & dsmodified
2954 modadded = added & dsmodified
2954 added -= modadded
2955 added -= modadded
2955
2956
2956 # tell newly modified apart.
2957 # tell newly modified apart.
2957 dsmodified &= modified
2958 dsmodified &= modified
2958 dsmodified |= modified & dsadded # dirstate added may needs backup
2959 dsmodified |= modified & dsadded # dirstate added may needs backup
2959 modified -= dsmodified
2960 modified -= dsmodified
2960
2961
2961 # We need to wait for some post-processing to update this set
2962 # We need to wait for some post-processing to update this set
2962 # before making the distinction. The dirstate will be used for
2963 # before making the distinction. The dirstate will be used for
2963 # that purpose.
2964 # that purpose.
2964 dsadded = added
2965 dsadded = added
2965
2966
2966 # in case of merge, files that are actually added can be reported as
2967 # in case of merge, files that are actually added can be reported as
2967 # modified, we need to post process the result
2968 # modified, we need to post process the result
2968 if p2 != nullid:
2969 if p2 != nullid:
2969 if pmf is None:
2970 if pmf is None:
2970 # only need parent manifest in the merge case,
2971 # only need parent manifest in the merge case,
2971 # so do not read by default
2972 # so do not read by default
2972 pmf = repo[parent].manifest()
2973 pmf = repo[parent].manifest()
2973 mergeadd = dsmodified - set(pmf)
2974 mergeadd = dsmodified - set(pmf)
2974 dsadded |= mergeadd
2975 dsadded |= mergeadd
2975 dsmodified -= mergeadd
2976 dsmodified -= mergeadd
2976
2977
2977 # if f is a rename, update `names` to also revert the source
2978 # if f is a rename, update `names` to also revert the source
2978 cwd = repo.getcwd()
2979 cwd = repo.getcwd()
2979 for f in localchanges:
2980 for f in localchanges:
2980 src = repo.dirstate.copied(f)
2981 src = repo.dirstate.copied(f)
2981 # XXX should we check for rename down to target node?
2982 # XXX should we check for rename down to target node?
2982 if src and src not in names and repo.dirstate[src] == 'r':
2983 if src and src not in names and repo.dirstate[src] == 'r':
2983 dsremoved.add(src)
2984 dsremoved.add(src)
2984 names[src] = (repo.pathto(src, cwd), True)
2985 names[src] = (repo.pathto(src, cwd), True)
2985
2986
2986 # distinguish between file to forget and the other
2987 # distinguish between file to forget and the other
2987 added = set()
2988 added = set()
2988 for abs in dsadded:
2989 for abs in dsadded:
2989 if repo.dirstate[abs] != 'a':
2990 if repo.dirstate[abs] != 'a':
2990 added.add(abs)
2991 added.add(abs)
2991 dsadded -= added
2992 dsadded -= added
2992
2993
2993 for abs in deladded:
2994 for abs in deladded:
2994 if repo.dirstate[abs] == 'a':
2995 if repo.dirstate[abs] == 'a':
2995 dsadded.add(abs)
2996 dsadded.add(abs)
2996 deladded -= dsadded
2997 deladded -= dsadded
2997
2998
2998 # For files marked as removed, we check if an unknown file is present at
2999 # For files marked as removed, we check if an unknown file is present at
2999 # the same path. If a such file exists it may need to be backed up.
3000 # the same path. If a such file exists it may need to be backed up.
3000 # Making the distinction at this stage helps have simpler backup
3001 # Making the distinction at this stage helps have simpler backup
3001 # logic.
3002 # logic.
3002 removunk = set()
3003 removunk = set()
3003 for abs in removed:
3004 for abs in removed:
3004 target = repo.wjoin(abs)
3005 target = repo.wjoin(abs)
3005 if os.path.lexists(target):
3006 if os.path.lexists(target):
3006 removunk.add(abs)
3007 removunk.add(abs)
3007 removed -= removunk
3008 removed -= removunk
3008
3009
3009 dsremovunk = set()
3010 dsremovunk = set()
3010 for abs in dsremoved:
3011 for abs in dsremoved:
3011 target = repo.wjoin(abs)
3012 target = repo.wjoin(abs)
3012 if os.path.lexists(target):
3013 if os.path.lexists(target):
3013 dsremovunk.add(abs)
3014 dsremovunk.add(abs)
3014 dsremoved -= dsremovunk
3015 dsremoved -= dsremovunk
3015
3016
3016 # action to be actually performed by revert
3017 # action to be actually performed by revert
3017 # (<list of file>, message>) tuple
3018 # (<list of file>, message>) tuple
3018 actions = {'revert': ([], _('reverting %s\n')),
3019 actions = {'revert': ([], _('reverting %s\n')),
3019 'add': ([], _('adding %s\n')),
3020 'add': ([], _('adding %s\n')),
3020 'remove': ([], _('removing %s\n')),
3021 'remove': ([], _('removing %s\n')),
3021 'drop': ([], _('removing %s\n')),
3022 'drop': ([], _('removing %s\n')),
3022 'forget': ([], _('forgetting %s\n')),
3023 'forget': ([], _('forgetting %s\n')),
3023 'undelete': ([], _('undeleting %s\n')),
3024 'undelete': ([], _('undeleting %s\n')),
3024 'noop': (None, _('no changes needed to %s\n')),
3025 'noop': (None, _('no changes needed to %s\n')),
3025 'unknown': (None, _('file not managed: %s\n')),
3026 'unknown': (None, _('file not managed: %s\n')),
3026 }
3027 }
3027
3028
3028 # "constant" that convey the backup strategy.
3029 # "constant" that convey the backup strategy.
3029 # All set to `discard` if `no-backup` is set do avoid checking
3030 # All set to `discard` if `no-backup` is set do avoid checking
3030 # no_backup lower in the code.
3031 # no_backup lower in the code.
3031 # These values are ordered for comparison purposes
3032 # These values are ordered for comparison purposes
3032 backup = 2 # unconditionally do backup
3033 backup = 2 # unconditionally do backup
3033 check = 1 # check if the existing file differs from target
3034 check = 1 # check if the existing file differs from target
3034 discard = 0 # never do backup
3035 discard = 0 # never do backup
3035 if opts.get('no_backup'):
3036 if opts.get('no_backup'):
3036 backup = check = discard
3037 backup = check = discard
3037
3038
3038 backupanddel = actions['remove']
3039 backupanddel = actions['remove']
3039 if not opts.get('no_backup'):
3040 if not opts.get('no_backup'):
3040 backupanddel = actions['drop']
3041 backupanddel = actions['drop']
3041
3042
3042 disptable = (
3043 disptable = (
3043 # dispatch table:
3044 # dispatch table:
3044 # file state
3045 # file state
3045 # action
3046 # action
3046 # make backup
3047 # make backup
3047
3048
3048 ## Sets that results that will change file on disk
3049 ## Sets that results that will change file on disk
3049 # Modified compared to target, no local change
3050 # Modified compared to target, no local change
3050 (modified, actions['revert'], discard),
3051 (modified, actions['revert'], discard),
3051 # Modified compared to target, but local file is deleted
3052 # Modified compared to target, but local file is deleted
3052 (deleted, actions['revert'], discard),
3053 (deleted, actions['revert'], discard),
3053 # Modified compared to target, local change
3054 # Modified compared to target, local change
3054 (dsmodified, actions['revert'], backup),
3055 (dsmodified, actions['revert'], backup),
3055 # Added since target
3056 # Added since target
3056 (added, actions['remove'], discard),
3057 (added, actions['remove'], discard),
3057 # Added in working directory
3058 # Added in working directory
3058 (dsadded, actions['forget'], discard),
3059 (dsadded, actions['forget'], discard),
3059 # Added since target, have local modification
3060 # Added since target, have local modification
3060 (modadded, backupanddel, backup),
3061 (modadded, backupanddel, backup),
3061 # Added since target but file is missing in working directory
3062 # Added since target but file is missing in working directory
3062 (deladded, actions['drop'], discard),
3063 (deladded, actions['drop'], discard),
3063 # Removed since target, before working copy parent
3064 # Removed since target, before working copy parent
3064 (removed, actions['add'], discard),
3065 (removed, actions['add'], discard),
3065 # Same as `removed` but an unknown file exists at the same path
3066 # Same as `removed` but an unknown file exists at the same path
3066 (removunk, actions['add'], check),
3067 (removunk, actions['add'], check),
3067 # Removed since targe, marked as such in working copy parent
3068 # Removed since targe, marked as such in working copy parent
3068 (dsremoved, actions['undelete'], discard),
3069 (dsremoved, actions['undelete'], discard),
3069 # Same as `dsremoved` but an unknown file exists at the same path
3070 # Same as `dsremoved` but an unknown file exists at the same path
3070 (dsremovunk, actions['undelete'], check),
3071 (dsremovunk, actions['undelete'], check),
3071 ## the following sets does not result in any file changes
3072 ## the following sets does not result in any file changes
3072 # File with no modification
3073 # File with no modification
3073 (clean, actions['noop'], discard),
3074 (clean, actions['noop'], discard),
3074 # Existing file, not tracked anywhere
3075 # Existing file, not tracked anywhere
3075 (unknown, actions['unknown'], discard),
3076 (unknown, actions['unknown'], discard),
3076 )
3077 )
3077
3078
3078 for abs, (rel, exact) in sorted(names.items()):
3079 for abs, (rel, exact) in sorted(names.items()):
3079 # target file to be touch on disk (relative to cwd)
3080 # target file to be touch on disk (relative to cwd)
3080 target = repo.wjoin(abs)
3081 target = repo.wjoin(abs)
3081 # search the entry in the dispatch table.
3082 # search the entry in the dispatch table.
3082 # if the file is in any of these sets, it was touched in the working
3083 # if the file is in any of these sets, it was touched in the working
3083 # directory parent and we are sure it needs to be reverted.
3084 # directory parent and we are sure it needs to be reverted.
3084 for table, (xlist, msg), dobackup in disptable:
3085 for table, (xlist, msg), dobackup in disptable:
3085 if abs not in table:
3086 if abs not in table:
3086 continue
3087 continue
3087 if xlist is not None:
3088 if xlist is not None:
3088 xlist.append(abs)
3089 xlist.append(abs)
3089 if dobackup and (backup <= dobackup
3090 if dobackup and (backup <= dobackup
3090 or wctx[abs].cmp(ctx[abs])):
3091 or wctx[abs].cmp(ctx[abs])):
3091 bakname = origpath(ui, repo, rel)
3092 bakname = origpath(ui, repo, rel)
3092 ui.note(_('saving current version of %s as %s\n') %
3093 ui.note(_('saving current version of %s as %s\n') %
3093 (rel, bakname))
3094 (rel, bakname))
3094 if not opts.get('dry_run'):
3095 if not opts.get('dry_run'):
3095 if interactive:
3096 if interactive:
3096 util.copyfile(target, bakname)
3097 util.copyfile(target, bakname)
3097 else:
3098 else:
3098 util.rename(target, bakname)
3099 util.rename(target, bakname)
3099 if ui.verbose or not exact:
3100 if ui.verbose or not exact:
3100 if not isinstance(msg, basestring):
3101 if not isinstance(msg, basestring):
3101 msg = msg(abs)
3102 msg = msg(abs)
3102 ui.status(msg % rel)
3103 ui.status(msg % rel)
3103 elif exact:
3104 elif exact:
3104 ui.warn(msg % rel)
3105 ui.warn(msg % rel)
3105 break
3106 break
3106
3107
3107 if not opts.get('dry_run'):
3108 if not opts.get('dry_run'):
3108 needdata = ('revert', 'add', 'undelete')
3109 needdata = ('revert', 'add', 'undelete')
3109 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3110 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3110 _performrevert(repo, parents, ctx, actions, interactive)
3111 _performrevert(repo, parents, ctx, actions, interactive)
3111
3112
3112 if targetsubs:
3113 if targetsubs:
3113 # Revert the subrepos on the revert list
3114 # Revert the subrepos on the revert list
3114 for sub in targetsubs:
3115 for sub in targetsubs:
3115 try:
3116 try:
3116 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3117 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3117 except KeyError:
3118 except KeyError:
3118 raise error.Abort("subrepository '%s' does not exist in %s!"
3119 raise error.Abort("subrepository '%s' does not exist in %s!"
3119 % (sub, short(ctx.node())))
3120 % (sub, short(ctx.node())))
3120 finally:
3121 finally:
3121 wlock.release()
3122 wlock.release()
3122
3123
3123 def origpath(ui, repo, filepath):
3124 def origpath(ui, repo, filepath):
3124 '''customize where .orig files are created
3125 '''customize where .orig files are created
3125
3126
3126 Fetch user defined path from config file: [ui] origbackuppath = <path>
3127 Fetch user defined path from config file: [ui] origbackuppath = <path>
3127 Fall back to default (filepath) if not specified
3128 Fall back to default (filepath) if not specified
3128 '''
3129 '''
3129 origbackuppath = ui.config('ui', 'origbackuppath', None)
3130 origbackuppath = ui.config('ui', 'origbackuppath', None)
3130 if origbackuppath is None:
3131 if origbackuppath is None:
3131 return filepath + ".orig"
3132 return filepath + ".orig"
3132
3133
3133 filepathfromroot = os.path.relpath(filepath, start=repo.root)
3134 filepathfromroot = os.path.relpath(filepath, start=repo.root)
3134 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
3135 fullorigpath = repo.wjoin(origbackuppath, filepathfromroot)
3135
3136
3136 origbackupdir = repo.vfs.dirname(fullorigpath)
3137 origbackupdir = repo.vfs.dirname(fullorigpath)
3137 if not repo.vfs.exists(origbackupdir):
3138 if not repo.vfs.exists(origbackupdir):
3138 ui.note(_('creating directory: %s\n') % origbackupdir)
3139 ui.note(_('creating directory: %s\n') % origbackupdir)
3139 util.makedirs(origbackupdir)
3140 util.makedirs(origbackupdir)
3140
3141
3141 return fullorigpath + ".orig"
3142 return fullorigpath + ".orig"
3142
3143
3143 def _revertprefetch(repo, ctx, *files):
3144 def _revertprefetch(repo, ctx, *files):
3144 """Let extension changing the storage layer prefetch content"""
3145 """Let extension changing the storage layer prefetch content"""
3145 pass
3146 pass
3146
3147
3147 def _performrevert(repo, parents, ctx, actions, interactive=False):
3148 def _performrevert(repo, parents, ctx, actions, interactive=False):
3148 """function that actually perform all the actions computed for revert
3149 """function that actually perform all the actions computed for revert
3149
3150
3150 This is an independent function to let extension to plug in and react to
3151 This is an independent function to let extension to plug in and react to
3151 the imminent revert.
3152 the imminent revert.
3152
3153
3153 Make sure you have the working directory locked when calling this function.
3154 Make sure you have the working directory locked when calling this function.
3154 """
3155 """
3155 parent, p2 = parents
3156 parent, p2 = parents
3156 node = ctx.node()
3157 node = ctx.node()
3157 def checkout(f):
3158 def checkout(f):
3158 fc = ctx[f]
3159 fc = ctx[f]
3159 repo.wwrite(f, fc.data(), fc.flags())
3160 repo.wwrite(f, fc.data(), fc.flags())
3160
3161
3161 audit_path = pathutil.pathauditor(repo.root)
3162 audit_path = pathutil.pathauditor(repo.root)
3162 for f in actions['forget'][0]:
3163 for f in actions['forget'][0]:
3163 repo.dirstate.drop(f)
3164 repo.dirstate.drop(f)
3164 for f in actions['remove'][0]:
3165 for f in actions['remove'][0]:
3165 audit_path(f)
3166 audit_path(f)
3166 try:
3167 try:
3167 util.unlinkpath(repo.wjoin(f))
3168 util.unlinkpath(repo.wjoin(f))
3168 except OSError:
3169 except OSError:
3169 pass
3170 pass
3170 repo.dirstate.remove(f)
3171 repo.dirstate.remove(f)
3171 for f in actions['drop'][0]:
3172 for f in actions['drop'][0]:
3172 audit_path(f)
3173 audit_path(f)
3173 repo.dirstate.remove(f)
3174 repo.dirstate.remove(f)
3174
3175
3175 normal = None
3176 normal = None
3176 if node == parent:
3177 if node == parent:
3177 # We're reverting to our parent. If possible, we'd like status
3178 # We're reverting to our parent. If possible, we'd like status
3178 # to report the file as clean. We have to use normallookup for
3179 # to report the file as clean. We have to use normallookup for
3179 # merges to avoid losing information about merged/dirty files.
3180 # merges to avoid losing information about merged/dirty files.
3180 if p2 != nullid:
3181 if p2 != nullid:
3181 normal = repo.dirstate.normallookup
3182 normal = repo.dirstate.normallookup
3182 else:
3183 else:
3183 normal = repo.dirstate.normal
3184 normal = repo.dirstate.normal
3184
3185
3185 newlyaddedandmodifiedfiles = set()
3186 newlyaddedandmodifiedfiles = set()
3186 if interactive:
3187 if interactive:
3187 # Prompt the user for changes to revert
3188 # Prompt the user for changes to revert
3188 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3189 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3189 m = scmutil.match(ctx, torevert, {})
3190 m = scmutil.match(ctx, torevert, {})
3190 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3191 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3191 diffopts.nodates = True
3192 diffopts.nodates = True
3192 diffopts.git = True
3193 diffopts.git = True
3193 reversehunks = repo.ui.configbool('experimental',
3194 reversehunks = repo.ui.configbool('experimental',
3194 'revertalternateinteractivemode',
3195 'revertalternateinteractivemode',
3195 True)
3196 True)
3196 if reversehunks:
3197 if reversehunks:
3197 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3198 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3198 else:
3199 else:
3199 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3200 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3200 originalchunks = patch.parsepatch(diff)
3201 originalchunks = patch.parsepatch(diff)
3201
3202
3202 try:
3203 try:
3203
3204
3204 chunks = recordfilter(repo.ui, originalchunks)
3205 chunks, opts = recordfilter(repo.ui, originalchunks)
3205 if reversehunks:
3206 if reversehunks:
3206 chunks = patch.reversehunks(chunks)
3207 chunks = patch.reversehunks(chunks)
3207
3208
3208 except patch.PatchError as err:
3209 except patch.PatchError as err:
3209 raise error.Abort(_('error parsing patch: %s') % err)
3210 raise error.Abort(_('error parsing patch: %s') % err)
3210
3211
3211 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3212 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3212 # Apply changes
3213 # Apply changes
3213 fp = cStringIO.StringIO()
3214 fp = cStringIO.StringIO()
3214 for c in chunks:
3215 for c in chunks:
3215 c.write(fp)
3216 c.write(fp)
3216 dopatch = fp.tell()
3217 dopatch = fp.tell()
3217 fp.seek(0)
3218 fp.seek(0)
3218 if dopatch:
3219 if dopatch:
3219 try:
3220 try:
3220 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3221 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3221 except patch.PatchError as err:
3222 except patch.PatchError as err:
3222 raise error.Abort(str(err))
3223 raise error.Abort(str(err))
3223 del fp
3224 del fp
3224 else:
3225 else:
3225 for f in actions['revert'][0]:
3226 for f in actions['revert'][0]:
3226 checkout(f)
3227 checkout(f)
3227 if normal:
3228 if normal:
3228 normal(f)
3229 normal(f)
3229
3230
3230 for f in actions['add'][0]:
3231 for f in actions['add'][0]:
3231 # Don't checkout modified files, they are already created by the diff
3232 # Don't checkout modified files, they are already created by the diff
3232 if f not in newlyaddedandmodifiedfiles:
3233 if f not in newlyaddedandmodifiedfiles:
3233 checkout(f)
3234 checkout(f)
3234 repo.dirstate.add(f)
3235 repo.dirstate.add(f)
3235
3236
3236 normal = repo.dirstate.normallookup
3237 normal = repo.dirstate.normallookup
3237 if node == parent and p2 == nullid:
3238 if node == parent and p2 == nullid:
3238 normal = repo.dirstate.normal
3239 normal = repo.dirstate.normal
3239 for f in actions['undelete'][0]:
3240 for f in actions['undelete'][0]:
3240 checkout(f)
3241 checkout(f)
3241 normal(f)
3242 normal(f)
3242
3243
3243 copied = copies.pathcopies(repo[parent], ctx)
3244 copied = copies.pathcopies(repo[parent], ctx)
3244
3245
3245 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3246 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3246 if f in copied:
3247 if f in copied:
3247 repo.dirstate.copy(copied[f], f)
3248 repo.dirstate.copy(copied[f], f)
3248
3249
3249 def command(table):
3250 def command(table):
3250 """Returns a function object to be used as a decorator for making commands.
3251 """Returns a function object to be used as a decorator for making commands.
3251
3252
3252 This function receives a command table as its argument. The table should
3253 This function receives a command table as its argument. The table should
3253 be a dict.
3254 be a dict.
3254
3255
3255 The returned function can be used as a decorator for adding commands
3256 The returned function can be used as a decorator for adding commands
3256 to that command table. This function accepts multiple arguments to define
3257 to that command table. This function accepts multiple arguments to define
3257 a command.
3258 a command.
3258
3259
3259 The first argument is the command name.
3260 The first argument is the command name.
3260
3261
3261 The options argument is an iterable of tuples defining command arguments.
3262 The options argument is an iterable of tuples defining command arguments.
3262 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3263 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3263
3264
3264 The synopsis argument defines a short, one line summary of how to use the
3265 The synopsis argument defines a short, one line summary of how to use the
3265 command. This shows up in the help output.
3266 command. This shows up in the help output.
3266
3267
3267 The norepo argument defines whether the command does not require a
3268 The norepo argument defines whether the command does not require a
3268 local repository. Most commands operate against a repository, thus the
3269 local repository. Most commands operate against a repository, thus the
3269 default is False.
3270 default is False.
3270
3271
3271 The optionalrepo argument defines whether the command optionally requires
3272 The optionalrepo argument defines whether the command optionally requires
3272 a local repository.
3273 a local repository.
3273
3274
3274 The inferrepo argument defines whether to try to find a repository from the
3275 The inferrepo argument defines whether to try to find a repository from the
3275 command line arguments. If True, arguments will be examined for potential
3276 command line arguments. If True, arguments will be examined for potential
3276 repository locations. See ``findrepo()``. If a repository is found, it
3277 repository locations. See ``findrepo()``. If a repository is found, it
3277 will be used.
3278 will be used.
3278 """
3279 """
3279 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3280 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3280 inferrepo=False):
3281 inferrepo=False):
3281 def decorator(func):
3282 def decorator(func):
3282 if synopsis:
3283 if synopsis:
3283 table[name] = func, list(options), synopsis
3284 table[name] = func, list(options), synopsis
3284 else:
3285 else:
3285 table[name] = func, list(options)
3286 table[name] = func, list(options)
3286
3287
3287 if norepo:
3288 if norepo:
3288 # Avoid import cycle.
3289 # Avoid import cycle.
3289 import commands
3290 import commands
3290 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3291 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3291
3292
3292 if optionalrepo:
3293 if optionalrepo:
3293 import commands
3294 import commands
3294 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3295 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3295
3296
3296 if inferrepo:
3297 if inferrepo:
3297 import commands
3298 import commands
3298 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3299 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3299
3300
3300 return func
3301 return func
3301 return decorator
3302 return decorator
3302
3303
3303 return cmd
3304 return cmd
3304
3305
3305 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3306 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3306 # commands.outgoing. "missing" is "missing" of the result of
3307 # commands.outgoing. "missing" is "missing" of the result of
3307 # "findcommonoutgoing()"
3308 # "findcommonoutgoing()"
3308 outgoinghooks = util.hooks()
3309 outgoinghooks = util.hooks()
3309
3310
3310 # a list of (ui, repo) functions called by commands.summary
3311 # a list of (ui, repo) functions called by commands.summary
3311 summaryhooks = util.hooks()
3312 summaryhooks = util.hooks()
3312
3313
3313 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3314 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3314 #
3315 #
3315 # functions should return tuple of booleans below, if 'changes' is None:
3316 # functions should return tuple of booleans below, if 'changes' is None:
3316 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3317 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3317 #
3318 #
3318 # otherwise, 'changes' is a tuple of tuples below:
3319 # otherwise, 'changes' is a tuple of tuples below:
3319 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3320 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3320 # - (desturl, destbranch, destpeer, outgoing)
3321 # - (desturl, destbranch, destpeer, outgoing)
3321 summaryremotehooks = util.hooks()
3322 summaryremotehooks = util.hooks()
3322
3323
3323 # A list of state files kept by multistep operations like graft.
3324 # A list of state files kept by multistep operations like graft.
3324 # Since graft cannot be aborted, it is considered 'clearable' by update.
3325 # Since graft cannot be aborted, it is considered 'clearable' by update.
3325 # note: bisect is intentionally excluded
3326 # note: bisect is intentionally excluded
3326 # (state file, clearable, allowcommit, error, hint)
3327 # (state file, clearable, allowcommit, error, hint)
3327 unfinishedstates = [
3328 unfinishedstates = [
3328 ('graftstate', True, False, _('graft in progress'),
3329 ('graftstate', True, False, _('graft in progress'),
3329 _("use 'hg graft --continue' or 'hg update' to abort")),
3330 _("use 'hg graft --continue' or 'hg update' to abort")),
3330 ('updatestate', True, False, _('last update was interrupted'),
3331 ('updatestate', True, False, _('last update was interrupted'),
3331 _("use 'hg update' to get a consistent checkout"))
3332 _("use 'hg update' to get a consistent checkout"))
3332 ]
3333 ]
3333
3334
3334 def checkunfinished(repo, commit=False):
3335 def checkunfinished(repo, commit=False):
3335 '''Look for an unfinished multistep operation, like graft, and abort
3336 '''Look for an unfinished multistep operation, like graft, and abort
3336 if found. It's probably good to check this right before
3337 if found. It's probably good to check this right before
3337 bailifchanged().
3338 bailifchanged().
3338 '''
3339 '''
3339 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3340 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3340 if commit and allowcommit:
3341 if commit and allowcommit:
3341 continue
3342 continue
3342 if repo.vfs.exists(f):
3343 if repo.vfs.exists(f):
3343 raise error.Abort(msg, hint=hint)
3344 raise error.Abort(msg, hint=hint)
3344
3345
3345 def clearunfinished(repo):
3346 def clearunfinished(repo):
3346 '''Check for unfinished operations (as above), and clear the ones
3347 '''Check for unfinished operations (as above), and clear the ones
3347 that are clearable.
3348 that are clearable.
3348 '''
3349 '''
3349 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3350 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3350 if not clearable and repo.vfs.exists(f):
3351 if not clearable and repo.vfs.exists(f):
3351 raise error.Abort(msg, hint=hint)
3352 raise error.Abort(msg, hint=hint)
3352 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3353 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3353 if clearable and repo.vfs.exists(f):
3354 if clearable and repo.vfs.exists(f):
3354 util.unlink(repo.join(f))
3355 util.unlink(repo.join(f))
3355
3356
3356 class dirstateguard(object):
3357 class dirstateguard(object):
3357 '''Restore dirstate at unexpected failure.
3358 '''Restore dirstate at unexpected failure.
3358
3359
3359 At the construction, this class does:
3360 At the construction, this class does:
3360
3361
3361 - write current ``repo.dirstate`` out, and
3362 - write current ``repo.dirstate`` out, and
3362 - save ``.hg/dirstate`` into the backup file
3363 - save ``.hg/dirstate`` into the backup file
3363
3364
3364 This restores ``.hg/dirstate`` from backup file, if ``release()``
3365 This restores ``.hg/dirstate`` from backup file, if ``release()``
3365 is invoked before ``close()``.
3366 is invoked before ``close()``.
3366
3367
3367 This just removes the backup file at ``close()`` before ``release()``.
3368 This just removes the backup file at ``close()`` before ``release()``.
3368 '''
3369 '''
3369
3370
3370 def __init__(self, repo, name):
3371 def __init__(self, repo, name):
3371 self._repo = repo
3372 self._repo = repo
3372 self._suffix = '.backup.%s.%d' % (name, id(self))
3373 self._suffix = '.backup.%s.%d' % (name, id(self))
3373 repo.dirstate._savebackup(repo.currenttransaction(), self._suffix)
3374 repo.dirstate._savebackup(repo.currenttransaction(), self._suffix)
3374 self._active = True
3375 self._active = True
3375 self._closed = False
3376 self._closed = False
3376
3377
3377 def __del__(self):
3378 def __del__(self):
3378 if self._active: # still active
3379 if self._active: # still active
3379 # this may occur, even if this class is used correctly:
3380 # this may occur, even if this class is used correctly:
3380 # for example, releasing other resources like transaction
3381 # for example, releasing other resources like transaction
3381 # may raise exception before ``dirstateguard.release`` in
3382 # may raise exception before ``dirstateguard.release`` in
3382 # ``release(tr, ....)``.
3383 # ``release(tr, ....)``.
3383 self._abort()
3384 self._abort()
3384
3385
3385 def close(self):
3386 def close(self):
3386 if not self._active: # already inactivated
3387 if not self._active: # already inactivated
3387 msg = (_("can't close already inactivated backup: dirstate%s")
3388 msg = (_("can't close already inactivated backup: dirstate%s")
3388 % self._suffix)
3389 % self._suffix)
3389 raise error.Abort(msg)
3390 raise error.Abort(msg)
3390
3391
3391 self._repo.dirstate._clearbackup(self._repo.currenttransaction(),
3392 self._repo.dirstate._clearbackup(self._repo.currenttransaction(),
3392 self._suffix)
3393 self._suffix)
3393 self._active = False
3394 self._active = False
3394 self._closed = True
3395 self._closed = True
3395
3396
3396 def _abort(self):
3397 def _abort(self):
3397 self._repo.dirstate._restorebackup(self._repo.currenttransaction(),
3398 self._repo.dirstate._restorebackup(self._repo.currenttransaction(),
3398 self._suffix)
3399 self._suffix)
3399 self._active = False
3400 self._active = False
3400
3401
3401 def release(self):
3402 def release(self):
3402 if not self._closed:
3403 if not self._closed:
3403 if not self._active: # already inactivated
3404 if not self._active: # already inactivated
3404 msg = (_("can't release already inactivated backup:"
3405 msg = (_("can't release already inactivated backup:"
3405 " dirstate%s")
3406 " dirstate%s")
3406 % self._suffix)
3407 % self._suffix)
3407 raise error.Abort(msg)
3408 raise error.Abort(msg)
3408 self._abort()
3409 self._abort()
@@ -1,1648 +1,1651
1 # stuff related specifically to patch manipulation / parsing
1 # stuff related specifically to patch manipulation / parsing
2 #
2 #
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # This code is based on the Mark Edgington's crecord extension.
8 # This code is based on the Mark Edgington's crecord extension.
9 # (Itself based on Bryan O'Sullivan's record extension.)
9 # (Itself based on Bryan O'Sullivan's record extension.)
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import cStringIO
13 import cStringIO
14 import locale
14 import locale
15 import os
15 import os
16 import re
16 import re
17 import signal
17 import signal
18 import struct
18 import struct
19 import sys
19 import sys
20 import tempfile
20 import tempfile
21
21
22 from .i18n import _
22 from .i18n import _
23 from . import (
23 from . import (
24 encoding,
24 encoding,
25 error,
25 error,
26 patch as patchmod,
26 patch as patchmod,
27 )
27 )
28
28
29 # This is required for ncurses to display non-ASCII characters in default user
29 # This is required for ncurses to display non-ASCII characters in default user
30 # locale encoding correctly. --immerrr
30 # locale encoding correctly. --immerrr
31 locale.setlocale(locale.LC_ALL, '')
31 locale.setlocale(locale.LC_ALL, '')
32
32
33 # os.name is one of: 'posix', 'nt', 'dos', 'os2', 'mac', or 'ce'
33 # os.name is one of: 'posix', 'nt', 'dos', 'os2', 'mac', or 'ce'
34 if os.name == 'posix':
34 if os.name == 'posix':
35 import curses
35 import curses
36 import fcntl
36 import fcntl
37 import termios
37 import termios
38 else:
38 else:
39 # I have no idea if wcurses works with crecord...
39 # I have no idea if wcurses works with crecord...
40 try:
40 try:
41 import wcurses as curses
41 import wcurses as curses
42 except ImportError:
42 except ImportError:
43 # wcurses is not shipped on Windows by default
43 # wcurses is not shipped on Windows by default
44 pass
44 pass
45
45
46 try:
46 try:
47 curses
47 curses
48 except NameError:
48 except NameError:
49 if os.name != 'nt': # Temporary hack to get running on Windows again
49 if os.name != 'nt': # Temporary hack to get running on Windows again
50 raise error.Abort(
50 raise error.Abort(
51 _('the python curses/wcurses module is not available/installed'))
51 _('the python curses/wcurses module is not available/installed'))
52
52
53 _origstdout = sys.__stdout__ # used by gethw()
53 _origstdout = sys.__stdout__ # used by gethw()
54
54
55 class patchnode(object):
55 class patchnode(object):
56 """abstract class for patch graph nodes
56 """abstract class for patch graph nodes
57 (i.e. patchroot, header, hunk, hunkline)
57 (i.e. patchroot, header, hunk, hunkline)
58 """
58 """
59
59
60 def firstchild(self):
60 def firstchild(self):
61 raise NotImplementedError("method must be implemented by subclass")
61 raise NotImplementedError("method must be implemented by subclass")
62
62
63 def lastchild(self):
63 def lastchild(self):
64 raise NotImplementedError("method must be implemented by subclass")
64 raise NotImplementedError("method must be implemented by subclass")
65
65
66 def allchildren(self):
66 def allchildren(self):
67 "Return a list of all of the direct children of this node"
67 "Return a list of all of the direct children of this node"
68 raise NotImplementedError("method must be implemented by subclass")
68 raise NotImplementedError("method must be implemented by subclass")
69 def nextsibling(self):
69 def nextsibling(self):
70 """
70 """
71 Return the closest next item of the same type where there are no items
71 Return the closest next item of the same type where there are no items
72 of different types between the current item and this closest item.
72 of different types between the current item and this closest item.
73 If no such item exists, return None.
73 If no such item exists, return None.
74
74
75 """
75 """
76 raise NotImplementedError("method must be implemented by subclass")
76 raise NotImplementedError("method must be implemented by subclass")
77
77
78 def prevsibling(self):
78 def prevsibling(self):
79 """
79 """
80 Return the closest previous item of the same type where there are no
80 Return the closest previous item of the same type where there are no
81 items of different types between the current item and this closest item.
81 items of different types between the current item and this closest item.
82 If no such item exists, return None.
82 If no such item exists, return None.
83
83
84 """
84 """
85 raise NotImplementedError("method must be implemented by subclass")
85 raise NotImplementedError("method must be implemented by subclass")
86
86
87 def parentitem(self):
87 def parentitem(self):
88 raise NotImplementedError("method must be implemented by subclass")
88 raise NotImplementedError("method must be implemented by subclass")
89
89
90
90
91 def nextitem(self, constrainlevel=True, skipfolded=True):
91 def nextitem(self, constrainlevel=True, skipfolded=True):
92 """
92 """
93 If constrainLevel == True, return the closest next item
93 If constrainLevel == True, return the closest next item
94 of the same type where there are no items of different types between
94 of the same type where there are no items of different types between
95 the current item and this closest item.
95 the current item and this closest item.
96
96
97 If constrainLevel == False, then try to return the next item
97 If constrainLevel == False, then try to return the next item
98 closest to this item, regardless of item's type (header, hunk, or
98 closest to this item, regardless of item's type (header, hunk, or
99 HunkLine).
99 HunkLine).
100
100
101 If skipFolded == True, and the current item is folded, then the child
101 If skipFolded == True, and the current item is folded, then the child
102 items that are hidden due to folding will be skipped when determining
102 items that are hidden due to folding will be skipped when determining
103 the next item.
103 the next item.
104
104
105 If it is not possible to get the next item, return None.
105 If it is not possible to get the next item, return None.
106
106
107 """
107 """
108 try:
108 try:
109 itemfolded = self.folded
109 itemfolded = self.folded
110 except AttributeError:
110 except AttributeError:
111 itemfolded = False
111 itemfolded = False
112 if constrainlevel:
112 if constrainlevel:
113 return self.nextsibling()
113 return self.nextsibling()
114 elif skipfolded and itemfolded:
114 elif skipfolded and itemfolded:
115 nextitem = self.nextsibling()
115 nextitem = self.nextsibling()
116 if nextitem is None:
116 if nextitem is None:
117 try:
117 try:
118 nextitem = self.parentitem().nextsibling()
118 nextitem = self.parentitem().nextsibling()
119 except AttributeError:
119 except AttributeError:
120 nextitem = None
120 nextitem = None
121 return nextitem
121 return nextitem
122 else:
122 else:
123 # try child
123 # try child
124 item = self.firstchild()
124 item = self.firstchild()
125 if item is not None:
125 if item is not None:
126 return item
126 return item
127
127
128 # else try next sibling
128 # else try next sibling
129 item = self.nextsibling()
129 item = self.nextsibling()
130 if item is not None:
130 if item is not None:
131 return item
131 return item
132
132
133 try:
133 try:
134 # else try parent's next sibling
134 # else try parent's next sibling
135 item = self.parentitem().nextsibling()
135 item = self.parentitem().nextsibling()
136 if item is not None:
136 if item is not None:
137 return item
137 return item
138
138
139 # else return grandparent's next sibling (or None)
139 # else return grandparent's next sibling (or None)
140 return self.parentitem().parentitem().nextsibling()
140 return self.parentitem().parentitem().nextsibling()
141
141
142 except AttributeError: # parent and/or grandparent was None
142 except AttributeError: # parent and/or grandparent was None
143 return None
143 return None
144
144
145 def previtem(self, constrainlevel=True, skipfolded=True):
145 def previtem(self, constrainlevel=True, skipfolded=True):
146 """
146 """
147 If constrainLevel == True, return the closest previous item
147 If constrainLevel == True, return the closest previous item
148 of the same type where there are no items of different types between
148 of the same type where there are no items of different types between
149 the current item and this closest item.
149 the current item and this closest item.
150
150
151 If constrainLevel == False, then try to return the previous item
151 If constrainLevel == False, then try to return the previous item
152 closest to this item, regardless of item's type (header, hunk, or
152 closest to this item, regardless of item's type (header, hunk, or
153 HunkLine).
153 HunkLine).
154
154
155 If skipFolded == True, and the current item is folded, then the items
155 If skipFolded == True, and the current item is folded, then the items
156 that are hidden due to folding will be skipped when determining the
156 that are hidden due to folding will be skipped when determining the
157 next item.
157 next item.
158
158
159 If it is not possible to get the previous item, return None.
159 If it is not possible to get the previous item, return None.
160
160
161 """
161 """
162 if constrainlevel:
162 if constrainlevel:
163 return self.prevsibling()
163 return self.prevsibling()
164 else:
164 else:
165 # try previous sibling's last child's last child,
165 # try previous sibling's last child's last child,
166 # else try previous sibling's last child, else try previous sibling
166 # else try previous sibling's last child, else try previous sibling
167 prevsibling = self.prevsibling()
167 prevsibling = self.prevsibling()
168 if prevsibling is not None:
168 if prevsibling is not None:
169 prevsiblinglastchild = prevsibling.lastchild()
169 prevsiblinglastchild = prevsibling.lastchild()
170 if ((prevsiblinglastchild is not None) and
170 if ((prevsiblinglastchild is not None) and
171 not prevsibling.folded):
171 not prevsibling.folded):
172 prevsiblinglclc = prevsiblinglastchild.lastchild()
172 prevsiblinglclc = prevsiblinglastchild.lastchild()
173 if ((prevsiblinglclc is not None) and
173 if ((prevsiblinglclc is not None) and
174 not prevsiblinglastchild.folded):
174 not prevsiblinglastchild.folded):
175 return prevsiblinglclc
175 return prevsiblinglclc
176 else:
176 else:
177 return prevsiblinglastchild
177 return prevsiblinglastchild
178 else:
178 else:
179 return prevsibling
179 return prevsibling
180
180
181 # try parent (or None)
181 # try parent (or None)
182 return self.parentitem()
182 return self.parentitem()
183
183
184 class patch(patchnode, list): # todo: rename patchroot
184 class patch(patchnode, list): # todo: rename patchroot
185 """
185 """
186 list of header objects representing the patch.
186 list of header objects representing the patch.
187
187
188 """
188 """
189 def __init__(self, headerlist):
189 def __init__(self, headerlist):
190 self.extend(headerlist)
190 self.extend(headerlist)
191 # add parent patch object reference to each header
191 # add parent patch object reference to each header
192 for header in self:
192 for header in self:
193 header.patch = self
193 header.patch = self
194
194
195 class uiheader(patchnode):
195 class uiheader(patchnode):
196 """patch header
196 """patch header
197
197
198 xxx shouldn't we move this to mercurial/patch.py ?
198 xxx shouldn't we move this to mercurial/patch.py ?
199 """
199 """
200
200
201 def __init__(self, header):
201 def __init__(self, header):
202 self.nonuiheader = header
202 self.nonuiheader = header
203 # flag to indicate whether to apply this chunk
203 # flag to indicate whether to apply this chunk
204 self.applied = True
204 self.applied = True
205 # flag which only affects the status display indicating if a node's
205 # flag which only affects the status display indicating if a node's
206 # children are partially applied (i.e. some applied, some not).
206 # children are partially applied (i.e. some applied, some not).
207 self.partial = False
207 self.partial = False
208
208
209 # flag to indicate whether to display as folded/unfolded to user
209 # flag to indicate whether to display as folded/unfolded to user
210 self.folded = True
210 self.folded = True
211
211
212 # list of all headers in patch
212 # list of all headers in patch
213 self.patch = None
213 self.patch = None
214
214
215 # flag is False if this header was ever unfolded from initial state
215 # flag is False if this header was ever unfolded from initial state
216 self.neverunfolded = True
216 self.neverunfolded = True
217 self.hunks = [uihunk(h, self) for h in self.hunks]
217 self.hunks = [uihunk(h, self) for h in self.hunks]
218
218
219
219
220 def prettystr(self):
220 def prettystr(self):
221 x = cStringIO.StringIO()
221 x = cStringIO.StringIO()
222 self.pretty(x)
222 self.pretty(x)
223 return x.getvalue()
223 return x.getvalue()
224
224
225 def nextsibling(self):
225 def nextsibling(self):
226 numheadersinpatch = len(self.patch)
226 numheadersinpatch = len(self.patch)
227 indexofthisheader = self.patch.index(self)
227 indexofthisheader = self.patch.index(self)
228
228
229 if indexofthisheader < numheadersinpatch - 1:
229 if indexofthisheader < numheadersinpatch - 1:
230 nextheader = self.patch[indexofthisheader + 1]
230 nextheader = self.patch[indexofthisheader + 1]
231 return nextheader
231 return nextheader
232 else:
232 else:
233 return None
233 return None
234
234
235 def prevsibling(self):
235 def prevsibling(self):
236 indexofthisheader = self.patch.index(self)
236 indexofthisheader = self.patch.index(self)
237 if indexofthisheader > 0:
237 if indexofthisheader > 0:
238 previousheader = self.patch[indexofthisheader - 1]
238 previousheader = self.patch[indexofthisheader - 1]
239 return previousheader
239 return previousheader
240 else:
240 else:
241 return None
241 return None
242
242
243 def parentitem(self):
243 def parentitem(self):
244 """
244 """
245 there is no 'real' parent item of a header that can be selected,
245 there is no 'real' parent item of a header that can be selected,
246 so return None.
246 so return None.
247 """
247 """
248 return None
248 return None
249
249
250 def firstchild(self):
250 def firstchild(self):
251 "return the first child of this item, if one exists. otherwise None."
251 "return the first child of this item, if one exists. otherwise None."
252 if len(self.hunks) > 0:
252 if len(self.hunks) > 0:
253 return self.hunks[0]
253 return self.hunks[0]
254 else:
254 else:
255 return None
255 return None
256
256
257 def lastchild(self):
257 def lastchild(self):
258 "return the last child of this item, if one exists. otherwise None."
258 "return the last child of this item, if one exists. otherwise None."
259 if len(self.hunks) > 0:
259 if len(self.hunks) > 0:
260 return self.hunks[-1]
260 return self.hunks[-1]
261 else:
261 else:
262 return None
262 return None
263
263
264 def allchildren(self):
264 def allchildren(self):
265 "return a list of all of the direct children of this node"
265 "return a list of all of the direct children of this node"
266 return self.hunks
266 return self.hunks
267
267
268 def __getattr__(self, name):
268 def __getattr__(self, name):
269 return getattr(self.nonuiheader, name)
269 return getattr(self.nonuiheader, name)
270
270
271 class uihunkline(patchnode):
271 class uihunkline(patchnode):
272 "represents a changed line in a hunk"
272 "represents a changed line in a hunk"
273 def __init__(self, linetext, hunk):
273 def __init__(self, linetext, hunk):
274 self.linetext = linetext
274 self.linetext = linetext
275 self.applied = True
275 self.applied = True
276 # the parent hunk to which this line belongs
276 # the parent hunk to which this line belongs
277 self.hunk = hunk
277 self.hunk = hunk
278 # folding lines currently is not used/needed, but this flag is needed
278 # folding lines currently is not used/needed, but this flag is needed
279 # in the previtem method.
279 # in the previtem method.
280 self.folded = False
280 self.folded = False
281
281
282 def prettystr(self):
282 def prettystr(self):
283 return self.linetext
283 return self.linetext
284
284
285 def nextsibling(self):
285 def nextsibling(self):
286 numlinesinhunk = len(self.hunk.changedlines)
286 numlinesinhunk = len(self.hunk.changedlines)
287 indexofthisline = self.hunk.changedlines.index(self)
287 indexofthisline = self.hunk.changedlines.index(self)
288
288
289 if (indexofthisline < numlinesinhunk - 1):
289 if (indexofthisline < numlinesinhunk - 1):
290 nextline = self.hunk.changedlines[indexofthisline + 1]
290 nextline = self.hunk.changedlines[indexofthisline + 1]
291 return nextline
291 return nextline
292 else:
292 else:
293 return None
293 return None
294
294
295 def prevsibling(self):
295 def prevsibling(self):
296 indexofthisline = self.hunk.changedlines.index(self)
296 indexofthisline = self.hunk.changedlines.index(self)
297 if indexofthisline > 0:
297 if indexofthisline > 0:
298 previousline = self.hunk.changedlines[indexofthisline - 1]
298 previousline = self.hunk.changedlines[indexofthisline - 1]
299 return previousline
299 return previousline
300 else:
300 else:
301 return None
301 return None
302
302
303 def parentitem(self):
303 def parentitem(self):
304 "return the parent to the current item"
304 "return the parent to the current item"
305 return self.hunk
305 return self.hunk
306
306
307 def firstchild(self):
307 def firstchild(self):
308 "return the first child of this item, if one exists. otherwise None."
308 "return the first child of this item, if one exists. otherwise None."
309 # hunk-lines don't have children
309 # hunk-lines don't have children
310 return None
310 return None
311
311
312 def lastchild(self):
312 def lastchild(self):
313 "return the last child of this item, if one exists. otherwise None."
313 "return the last child of this item, if one exists. otherwise None."
314 # hunk-lines don't have children
314 # hunk-lines don't have children
315 return None
315 return None
316
316
317 class uihunk(patchnode):
317 class uihunk(patchnode):
318 """ui patch hunk, wraps a hunk and keep track of ui behavior """
318 """ui patch hunk, wraps a hunk and keep track of ui behavior """
319 maxcontext = 3
319 maxcontext = 3
320
320
321 def __init__(self, hunk, header):
321 def __init__(self, hunk, header):
322 self._hunk = hunk
322 self._hunk = hunk
323 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
323 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
324 self.header = header
324 self.header = header
325 # used at end for detecting how many removed lines were un-applied
325 # used at end for detecting how many removed lines were un-applied
326 self.originalremoved = self.removed
326 self.originalremoved = self.removed
327
327
328 # flag to indicate whether to display as folded/unfolded to user
328 # flag to indicate whether to display as folded/unfolded to user
329 self.folded = True
329 self.folded = True
330 # flag to indicate whether to apply this chunk
330 # flag to indicate whether to apply this chunk
331 self.applied = True
331 self.applied = True
332 # flag which only affects the status display indicating if a node's
332 # flag which only affects the status display indicating if a node's
333 # children are partially applied (i.e. some applied, some not).
333 # children are partially applied (i.e. some applied, some not).
334 self.partial = False
334 self.partial = False
335
335
336 def nextsibling(self):
336 def nextsibling(self):
337 numhunksinheader = len(self.header.hunks)
337 numhunksinheader = len(self.header.hunks)
338 indexofthishunk = self.header.hunks.index(self)
338 indexofthishunk = self.header.hunks.index(self)
339
339
340 if (indexofthishunk < numhunksinheader - 1):
340 if (indexofthishunk < numhunksinheader - 1):
341 nexthunk = self.header.hunks[indexofthishunk + 1]
341 nexthunk = self.header.hunks[indexofthishunk + 1]
342 return nexthunk
342 return nexthunk
343 else:
343 else:
344 return None
344 return None
345
345
346 def prevsibling(self):
346 def prevsibling(self):
347 indexofthishunk = self.header.hunks.index(self)
347 indexofthishunk = self.header.hunks.index(self)
348 if indexofthishunk > 0:
348 if indexofthishunk > 0:
349 previoushunk = self.header.hunks[indexofthishunk - 1]
349 previoushunk = self.header.hunks[indexofthishunk - 1]
350 return previoushunk
350 return previoushunk
351 else:
351 else:
352 return None
352 return None
353
353
354 def parentitem(self):
354 def parentitem(self):
355 "return the parent to the current item"
355 "return the parent to the current item"
356 return self.header
356 return self.header
357
357
358 def firstchild(self):
358 def firstchild(self):
359 "return the first child of this item, if one exists. otherwise None."
359 "return the first child of this item, if one exists. otherwise None."
360 if len(self.changedlines) > 0:
360 if len(self.changedlines) > 0:
361 return self.changedlines[0]
361 return self.changedlines[0]
362 else:
362 else:
363 return None
363 return None
364
364
365 def lastchild(self):
365 def lastchild(self):
366 "return the last child of this item, if one exists. otherwise None."
366 "return the last child of this item, if one exists. otherwise None."
367 if len(self.changedlines) > 0:
367 if len(self.changedlines) > 0:
368 return self.changedlines[-1]
368 return self.changedlines[-1]
369 else:
369 else:
370 return None
370 return None
371
371
372 def allchildren(self):
372 def allchildren(self):
373 "return a list of all of the direct children of this node"
373 "return a list of all of the direct children of this node"
374 return self.changedlines
374 return self.changedlines
375 def countchanges(self):
375 def countchanges(self):
376 """changedlines -> (n+,n-)"""
376 """changedlines -> (n+,n-)"""
377 add = len([l for l in self.changedlines if l.applied
377 add = len([l for l in self.changedlines if l.applied
378 and l.prettystr()[0] == '+'])
378 and l.prettystr()[0] == '+'])
379 rem = len([l for l in self.changedlines if l.applied
379 rem = len([l for l in self.changedlines if l.applied
380 and l.prettystr()[0] == '-'])
380 and l.prettystr()[0] == '-'])
381 return add, rem
381 return add, rem
382
382
383 def getfromtoline(self):
383 def getfromtoline(self):
384 # calculate the number of removed lines converted to context lines
384 # calculate the number of removed lines converted to context lines
385 removedconvertedtocontext = self.originalremoved - self.removed
385 removedconvertedtocontext = self.originalremoved - self.removed
386
386
387 contextlen = (len(self.before) + len(self.after) +
387 contextlen = (len(self.before) + len(self.after) +
388 removedconvertedtocontext)
388 removedconvertedtocontext)
389 if self.after and self.after[-1] == '\\ no newline at end of file\n':
389 if self.after and self.after[-1] == '\\ no newline at end of file\n':
390 contextlen -= 1
390 contextlen -= 1
391 fromlen = contextlen + self.removed
391 fromlen = contextlen + self.removed
392 tolen = contextlen + self.added
392 tolen = contextlen + self.added
393
393
394 # diffutils manual, section "2.2.2.2 detailed description of unified
394 # diffutils manual, section "2.2.2.2 detailed description of unified
395 # format": "an empty hunk is considered to end at the line that
395 # format": "an empty hunk is considered to end at the line that
396 # precedes the hunk."
396 # precedes the hunk."
397 #
397 #
398 # so, if either of hunks is empty, decrease its line start. --immerrr
398 # so, if either of hunks is empty, decrease its line start. --immerrr
399 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
399 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
400 fromline, toline = self.fromline, self.toline
400 fromline, toline = self.fromline, self.toline
401 if fromline != 0:
401 if fromline != 0:
402 if fromlen == 0:
402 if fromlen == 0:
403 fromline -= 1
403 fromline -= 1
404 if tolen == 0:
404 if tolen == 0:
405 toline -= 1
405 toline -= 1
406
406
407 fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
407 fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
408 fromline, fromlen, toline, tolen,
408 fromline, fromlen, toline, tolen,
409 self.proc and (' ' + self.proc))
409 self.proc and (' ' + self.proc))
410 return fromtoline
410 return fromtoline
411
411
412 def write(self, fp):
412 def write(self, fp):
413 # updated self.added/removed, which are used by getfromtoline()
413 # updated self.added/removed, which are used by getfromtoline()
414 self.added, self.removed = self.countchanges()
414 self.added, self.removed = self.countchanges()
415 fp.write(self.getfromtoline())
415 fp.write(self.getfromtoline())
416
416
417 hunklinelist = []
417 hunklinelist = []
418 # add the following to the list: (1) all applied lines, and
418 # add the following to the list: (1) all applied lines, and
419 # (2) all unapplied removal lines (convert these to context lines)
419 # (2) all unapplied removal lines (convert these to context lines)
420 for changedline in self.changedlines:
420 for changedline in self.changedlines:
421 changedlinestr = changedline.prettystr()
421 changedlinestr = changedline.prettystr()
422 if changedline.applied:
422 if changedline.applied:
423 hunklinelist.append(changedlinestr)
423 hunklinelist.append(changedlinestr)
424 elif changedlinestr[0] == "-":
424 elif changedlinestr[0] == "-":
425 hunklinelist.append(" " + changedlinestr[1:])
425 hunklinelist.append(" " + changedlinestr[1:])
426
426
427 fp.write(''.join(self.before + hunklinelist + self.after))
427 fp.write(''.join(self.before + hunklinelist + self.after))
428
428
429 pretty = write
429 pretty = write
430
430
431 def prettystr(self):
431 def prettystr(self):
432 x = cStringIO.StringIO()
432 x = cStringIO.StringIO()
433 self.pretty(x)
433 self.pretty(x)
434 return x.getvalue()
434 return x.getvalue()
435
435
436 def __getattr__(self, name):
436 def __getattr__(self, name):
437 return getattr(self._hunk, name)
437 return getattr(self._hunk, name)
438 def __repr__(self):
438 def __repr__(self):
439 return '<hunk %r@%d>' % (self.filename(), self.fromline)
439 return '<hunk %r@%d>' % (self.filename(), self.fromline)
440
440
441 def filterpatch(ui, chunks, chunkselector, operation=None):
441 def filterpatch(ui, chunks, chunkselector, operation=None):
442 """interactively filter patch chunks into applied-only chunks"""
442 """interactively filter patch chunks into applied-only chunks"""
443
443
444 if operation is None:
444 if operation is None:
445 operation = _('confirm')
445 operation = _('confirm')
446 chunks = list(chunks)
446 chunks = list(chunks)
447 # convert chunks list into structure suitable for displaying/modifying
447 # convert chunks list into structure suitable for displaying/modifying
448 # with curses. create a list of headers only.
448 # with curses. create a list of headers only.
449 headers = [c for c in chunks if isinstance(c, patchmod.header)]
449 headers = [c for c in chunks if isinstance(c, patchmod.header)]
450
450
451 # if there are no changed files
451 # if there are no changed files
452 if len(headers) == 0:
452 if len(headers) == 0:
453 return []
453 return []
454 uiheaders = [uiheader(h) for h in headers]
454 uiheaders = [uiheader(h) for h in headers]
455 # let user choose headers/hunks/lines, and mark their applied flags
455 # let user choose headers/hunks/lines, and mark their applied flags
456 # accordingly
456 # accordingly
457 chunkselector(ui, uiheaders)
457 ret = chunkselector(ui, uiheaders)
458 appliedhunklist = []
458 appliedhunklist = []
459 for hdr in uiheaders:
459 for hdr in uiheaders:
460 if (hdr.applied and
460 if (hdr.applied and
461 (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
461 (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
462 appliedhunklist.append(hdr)
462 appliedhunklist.append(hdr)
463 fixoffset = 0
463 fixoffset = 0
464 for hnk in hdr.hunks:
464 for hnk in hdr.hunks:
465 if hnk.applied:
465 if hnk.applied:
466 appliedhunklist.append(hnk)
466 appliedhunklist.append(hnk)
467 # adjust the 'to'-line offset of the hunk to be correct
467 # adjust the 'to'-line offset of the hunk to be correct
468 # after de-activating some of the other hunks for this file
468 # after de-activating some of the other hunks for this file
469 if fixoffset:
469 if fixoffset:
470 #hnk = copy.copy(hnk) # necessary??
470 #hnk = copy.copy(hnk) # necessary??
471 hnk.toline += fixoffset
471 hnk.toline += fixoffset
472 else:
472 else:
473 fixoffset += hnk.removed - hnk.added
473 fixoffset += hnk.removed - hnk.added
474
474
475 return appliedhunklist
475 return (appliedhunklist, ret)
476
476
477 def gethw():
477 def gethw():
478 """
478 """
479 magically get the current height and width of the window (without initscr)
479 magically get the current height and width of the window (without initscr)
480
480
481 this is a rip-off of a rip-off - taken from the bpython code. it is
481 this is a rip-off of a rip-off - taken from the bpython code. it is
482 useful / necessary because otherwise curses.initscr() must be called,
482 useful / necessary because otherwise curses.initscr() must be called,
483 which can leave the terminal in a nasty state after exiting.
483 which can leave the terminal in a nasty state after exiting.
484
484
485 """
485 """
486 h, w = struct.unpack(
486 h, w = struct.unpack(
487 "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
487 "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
488 return h, w
488 return h, w
489
489
490 def chunkselector(ui, headerlist):
490 def chunkselector(ui, headerlist):
491 """
491 """
492 curses interface to get selection of chunks, and mark the applied flags
492 curses interface to get selection of chunks, and mark the applied flags
493 of the chosen chunks.
493 of the chosen chunks.
494
494
495 """
495 """
496 ui.write(_('starting interactive selection\n'))
496 ui.write(_('starting interactive selection\n'))
497 chunkselector = curseschunkselector(headerlist, ui)
497 chunkselector = curseschunkselector(headerlist, ui)
498 f = signal.getsignal(signal.SIGTSTP)
498 f = signal.getsignal(signal.SIGTSTP)
499 curses.wrapper(chunkselector.main)
499 curses.wrapper(chunkselector.main)
500 if chunkselector.initerr is not None:
500 if chunkselector.initerr is not None:
501 raise error.Abort(chunkselector.initerr)
501 raise error.Abort(chunkselector.initerr)
502 # ncurses does not restore signal handler for SIGTSTP
502 # ncurses does not restore signal handler for SIGTSTP
503 signal.signal(signal.SIGTSTP, f)
503 signal.signal(signal.SIGTSTP, f)
504 return chunkselector.opts
504
505
505 def testdecorator(testfn, f):
506 def testdecorator(testfn, f):
506 def u(*args, **kwargs):
507 def u(*args, **kwargs):
507 return f(testfn, *args, **kwargs)
508 return f(testfn, *args, **kwargs)
508 return u
509 return u
509
510
510 def testchunkselector(testfn, ui, headerlist):
511 def testchunkselector(testfn, ui, headerlist):
511 """
512 """
512 test interface to get selection of chunks, and mark the applied flags
513 test interface to get selection of chunks, and mark the applied flags
513 of the chosen chunks.
514 of the chosen chunks.
514
515
515 """
516 """
516 chunkselector = curseschunkselector(headerlist, ui)
517 chunkselector = curseschunkselector(headerlist, ui)
517 if testfn and os.path.exists(testfn):
518 if testfn and os.path.exists(testfn):
518 testf = open(testfn)
519 testf = open(testfn)
519 testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
520 testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
520 testf.close()
521 testf.close()
521 while True:
522 while True:
522 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
523 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
523 break
524 break
525 return chunkselector.opts
524
526
525 class curseschunkselector(object):
527 class curseschunkselector(object):
526 def __init__(self, headerlist, ui):
528 def __init__(self, headerlist, ui):
527 # put the headers into a patch object
529 # put the headers into a patch object
528 self.headerlist = patch(headerlist)
530 self.headerlist = patch(headerlist)
529
531
530 self.ui = ui
532 self.ui = ui
533 self.opts = {}
531
534
532 self.errorstr = None
535 self.errorstr = None
533 # list of all chunks
536 # list of all chunks
534 self.chunklist = []
537 self.chunklist = []
535 for h in headerlist:
538 for h in headerlist:
536 self.chunklist.append(h)
539 self.chunklist.append(h)
537 self.chunklist.extend(h.hunks)
540 self.chunklist.extend(h.hunks)
538
541
539 # dictionary mapping (fgcolor, bgcolor) pairs to the
542 # dictionary mapping (fgcolor, bgcolor) pairs to the
540 # corresponding curses color-pair value.
543 # corresponding curses color-pair value.
541 self.colorpairs = {}
544 self.colorpairs = {}
542 # maps custom nicknames of color-pairs to curses color-pair values
545 # maps custom nicknames of color-pairs to curses color-pair values
543 self.colorpairnames = {}
546 self.colorpairnames = {}
544
547
545 # the currently selected header, hunk, or hunk-line
548 # the currently selected header, hunk, or hunk-line
546 self.currentselecteditem = self.headerlist[0]
549 self.currentselecteditem = self.headerlist[0]
547
550
548 # updated when printing out patch-display -- the 'lines' here are the
551 # updated when printing out patch-display -- the 'lines' here are the
549 # line positions *in the pad*, not on the screen.
552 # line positions *in the pad*, not on the screen.
550 self.selecteditemstartline = 0
553 self.selecteditemstartline = 0
551 self.selecteditemendline = None
554 self.selecteditemendline = None
552
555
553 # define indentation levels
556 # define indentation levels
554 self.headerindentnumchars = 0
557 self.headerindentnumchars = 0
555 self.hunkindentnumchars = 3
558 self.hunkindentnumchars = 3
556 self.hunklineindentnumchars = 6
559 self.hunklineindentnumchars = 6
557
560
558 # the first line of the pad to print to the screen
561 # the first line of the pad to print to the screen
559 self.firstlineofpadtoprint = 0
562 self.firstlineofpadtoprint = 0
560
563
561 # keeps track of the number of lines in the pad
564 # keeps track of the number of lines in the pad
562 self.numpadlines = None
565 self.numpadlines = None
563
566
564 self.numstatuslines = 2
567 self.numstatuslines = 2
565
568
566 # keep a running count of the number of lines printed to the pad
569 # keep a running count of the number of lines printed to the pad
567 # (used for determining when the selected item begins/ends)
570 # (used for determining when the selected item begins/ends)
568 self.linesprintedtopadsofar = 0
571 self.linesprintedtopadsofar = 0
569
572
570 # the first line of the pad which is visible on the screen
573 # the first line of the pad which is visible on the screen
571 self.firstlineofpadtoprint = 0
574 self.firstlineofpadtoprint = 0
572
575
573 # stores optional text for a commit comment provided by the user
576 # stores optional text for a commit comment provided by the user
574 self.commenttext = ""
577 self.commenttext = ""
575
578
576 # if the last 'toggle all' command caused all changes to be applied
579 # if the last 'toggle all' command caused all changes to be applied
577 self.waslasttoggleallapplied = True
580 self.waslasttoggleallapplied = True
578
581
579 def uparrowevent(self):
582 def uparrowevent(self):
580 """
583 """
581 try to select the previous item to the current item that has the
584 try to select the previous item to the current item that has the
582 most-indented level. for example, if a hunk is selected, try to select
585 most-indented level. for example, if a hunk is selected, try to select
583 the last hunkline of the hunk prior to the selected hunk. or, if
586 the last hunkline of the hunk prior to the selected hunk. or, if
584 the first hunkline of a hunk is currently selected, then select the
587 the first hunkline of a hunk is currently selected, then select the
585 hunk itself.
588 hunk itself.
586
589
587 if the currently selected item is already at the top of the screen,
590 if the currently selected item is already at the top of the screen,
588 scroll the screen down to show the new-selected item.
591 scroll the screen down to show the new-selected item.
589
592
590 """
593 """
591 currentitem = self.currentselecteditem
594 currentitem = self.currentselecteditem
592
595
593 nextitem = currentitem.previtem(constrainlevel=False)
596 nextitem = currentitem.previtem(constrainlevel=False)
594
597
595 if nextitem is None:
598 if nextitem is None:
596 # if no parent item (i.e. currentitem is the first header), then
599 # if no parent item (i.e. currentitem is the first header), then
597 # no change...
600 # no change...
598 nextitem = currentitem
601 nextitem = currentitem
599
602
600 self.currentselecteditem = nextitem
603 self.currentselecteditem = nextitem
601
604
602 def uparrowshiftevent(self):
605 def uparrowshiftevent(self):
603 """
606 """
604 select (if possible) the previous item on the same level as the
607 select (if possible) the previous item on the same level as the
605 currently selected item. otherwise, select (if possible) the
608 currently selected item. otherwise, select (if possible) the
606 parent-item of the currently selected item.
609 parent-item of the currently selected item.
607
610
608 if the currently selected item is already at the top of the screen,
611 if the currently selected item is already at the top of the screen,
609 scroll the screen down to show the new-selected item.
612 scroll the screen down to show the new-selected item.
610
613
611 """
614 """
612 currentitem = self.currentselecteditem
615 currentitem = self.currentselecteditem
613 nextitem = currentitem.previtem()
616 nextitem = currentitem.previtem()
614 # if there's no previous item on this level, try choosing the parent
617 # if there's no previous item on this level, try choosing the parent
615 if nextitem is None:
618 if nextitem is None:
616 nextitem = currentitem.parentitem()
619 nextitem = currentitem.parentitem()
617 if nextitem is None:
620 if nextitem is None:
618 # if no parent item (i.e. currentitem is the first header), then
621 # if no parent item (i.e. currentitem is the first header), then
619 # no change...
622 # no change...
620 nextitem = currentitem
623 nextitem = currentitem
621
624
622 self.currentselecteditem = nextitem
625 self.currentselecteditem = nextitem
623
626
624 def downarrowevent(self):
627 def downarrowevent(self):
625 """
628 """
626 try to select the next item to the current item that has the
629 try to select the next item to the current item that has the
627 most-indented level. for example, if a hunk is selected, select
630 most-indented level. for example, if a hunk is selected, select
628 the first hunkline of the selected hunk. or, if the last hunkline of
631 the first hunkline of the selected hunk. or, if the last hunkline of
629 a hunk is currently selected, then select the next hunk, if one exists,
632 a hunk is currently selected, then select the next hunk, if one exists,
630 or if not, the next header if one exists.
633 or if not, the next header if one exists.
631
634
632 if the currently selected item is already at the bottom of the screen,
635 if the currently selected item is already at the bottom of the screen,
633 scroll the screen up to show the new-selected item.
636 scroll the screen up to show the new-selected item.
634
637
635 """
638 """
636 #self.startprintline += 1 #debug
639 #self.startprintline += 1 #debug
637 currentitem = self.currentselecteditem
640 currentitem = self.currentselecteditem
638
641
639 nextitem = currentitem.nextitem(constrainlevel=False)
642 nextitem = currentitem.nextitem(constrainlevel=False)
640 # if there's no next item, keep the selection as-is
643 # if there's no next item, keep the selection as-is
641 if nextitem is None:
644 if nextitem is None:
642 nextitem = currentitem
645 nextitem = currentitem
643
646
644 self.currentselecteditem = nextitem
647 self.currentselecteditem = nextitem
645
648
646 def downarrowshiftevent(self):
649 def downarrowshiftevent(self):
647 """
650 """
648 if the cursor is already at the bottom chunk, scroll the screen up and
651 if the cursor is already at the bottom chunk, scroll the screen up and
649 move the cursor-position to the subsequent chunk. otherwise, only move
652 move the cursor-position to the subsequent chunk. otherwise, only move
650 the cursor position down one chunk.
653 the cursor position down one chunk.
651
654
652 """
655 """
653 # todo: update docstring
656 # todo: update docstring
654
657
655 currentitem = self.currentselecteditem
658 currentitem = self.currentselecteditem
656 nextitem = currentitem.nextitem()
659 nextitem = currentitem.nextitem()
657 # if there's no previous item on this level, try choosing the parent's
660 # if there's no previous item on this level, try choosing the parent's
658 # nextitem.
661 # nextitem.
659 if nextitem is None:
662 if nextitem is None:
660 try:
663 try:
661 nextitem = currentitem.parentitem().nextitem()
664 nextitem = currentitem.parentitem().nextitem()
662 except AttributeError:
665 except AttributeError:
663 # parentitem returned None, so nextitem() can't be called
666 # parentitem returned None, so nextitem() can't be called
664 nextitem = None
667 nextitem = None
665 if nextitem is None:
668 if nextitem is None:
666 # if no next item on parent-level, then no change...
669 # if no next item on parent-level, then no change...
667 nextitem = currentitem
670 nextitem = currentitem
668
671
669 self.currentselecteditem = nextitem
672 self.currentselecteditem = nextitem
670
673
671 def rightarrowevent(self):
674 def rightarrowevent(self):
672 """
675 """
673 select (if possible) the first of this item's child-items.
676 select (if possible) the first of this item's child-items.
674
677
675 """
678 """
676 currentitem = self.currentselecteditem
679 currentitem = self.currentselecteditem
677 nextitem = currentitem.firstchild()
680 nextitem = currentitem.firstchild()
678
681
679 # turn off folding if we want to show a child-item
682 # turn off folding if we want to show a child-item
680 if currentitem.folded:
683 if currentitem.folded:
681 self.togglefolded(currentitem)
684 self.togglefolded(currentitem)
682
685
683 if nextitem is None:
686 if nextitem is None:
684 # if no next item on parent-level, then no change...
687 # if no next item on parent-level, then no change...
685 nextitem = currentitem
688 nextitem = currentitem
686
689
687 self.currentselecteditem = nextitem
690 self.currentselecteditem = nextitem
688
691
689 def leftarrowevent(self):
692 def leftarrowevent(self):
690 """
693 """
691 if the current item can be folded (i.e. it is an unfolded header or
694 if the current item can be folded (i.e. it is an unfolded header or
692 hunk), then fold it. otherwise try select (if possible) the parent
695 hunk), then fold it. otherwise try select (if possible) the parent
693 of this item.
696 of this item.
694
697
695 """
698 """
696 currentitem = self.currentselecteditem
699 currentitem = self.currentselecteditem
697
700
698 # try to fold the item
701 # try to fold the item
699 if not isinstance(currentitem, uihunkline):
702 if not isinstance(currentitem, uihunkline):
700 if not currentitem.folded:
703 if not currentitem.folded:
701 self.togglefolded(item=currentitem)
704 self.togglefolded(item=currentitem)
702 return
705 return
703
706
704 # if it can't be folded, try to select the parent item
707 # if it can't be folded, try to select the parent item
705 nextitem = currentitem.parentitem()
708 nextitem = currentitem.parentitem()
706
709
707 if nextitem is None:
710 if nextitem is None:
708 # if no item on parent-level, then no change...
711 # if no item on parent-level, then no change...
709 nextitem = currentitem
712 nextitem = currentitem
710 if not nextitem.folded:
713 if not nextitem.folded:
711 self.togglefolded(item=nextitem)
714 self.togglefolded(item=nextitem)
712
715
713 self.currentselecteditem = nextitem
716 self.currentselecteditem = nextitem
714
717
715 def leftarrowshiftevent(self):
718 def leftarrowshiftevent(self):
716 """
719 """
717 select the header of the current item (or fold current item if the
720 select the header of the current item (or fold current item if the
718 current item is already a header).
721 current item is already a header).
719
722
720 """
723 """
721 currentitem = self.currentselecteditem
724 currentitem = self.currentselecteditem
722
725
723 if isinstance(currentitem, uiheader):
726 if isinstance(currentitem, uiheader):
724 if not currentitem.folded:
727 if not currentitem.folded:
725 self.togglefolded(item=currentitem)
728 self.togglefolded(item=currentitem)
726 return
729 return
727
730
728 # select the parent item recursively until we're at a header
731 # select the parent item recursively until we're at a header
729 while True:
732 while True:
730 nextitem = currentitem.parentitem()
733 nextitem = currentitem.parentitem()
731 if nextitem is None:
734 if nextitem is None:
732 break
735 break
733 else:
736 else:
734 currentitem = nextitem
737 currentitem = nextitem
735
738
736 self.currentselecteditem = currentitem
739 self.currentselecteditem = currentitem
737
740
738 def updatescroll(self):
741 def updatescroll(self):
739 "scroll the screen to fully show the currently-selected"
742 "scroll the screen to fully show the currently-selected"
740 selstart = self.selecteditemstartline
743 selstart = self.selecteditemstartline
741 selend = self.selecteditemendline
744 selend = self.selecteditemendline
742 #selnumlines = selend - selstart
745 #selnumlines = selend - selstart
743 padstart = self.firstlineofpadtoprint
746 padstart = self.firstlineofpadtoprint
744 padend = padstart + self.yscreensize - self.numstatuslines - 1
747 padend = padstart + self.yscreensize - self.numstatuslines - 1
745 # 'buffered' pad start/end values which scroll with a certain
748 # 'buffered' pad start/end values which scroll with a certain
746 # top/bottom context margin
749 # top/bottom context margin
747 padstartbuffered = padstart + 3
750 padstartbuffered = padstart + 3
748 padendbuffered = padend - 3
751 padendbuffered = padend - 3
749
752
750 if selend > padendbuffered:
753 if selend > padendbuffered:
751 self.scrolllines(selend - padendbuffered)
754 self.scrolllines(selend - padendbuffered)
752 elif selstart < padstartbuffered:
755 elif selstart < padstartbuffered:
753 # negative values scroll in pgup direction
756 # negative values scroll in pgup direction
754 self.scrolllines(selstart - padstartbuffered)
757 self.scrolllines(selstart - padstartbuffered)
755
758
756
759
757 def scrolllines(self, numlines):
760 def scrolllines(self, numlines):
758 "scroll the screen up (down) by numlines when numlines >0 (<0)."
761 "scroll the screen up (down) by numlines when numlines >0 (<0)."
759 self.firstlineofpadtoprint += numlines
762 self.firstlineofpadtoprint += numlines
760 if self.firstlineofpadtoprint < 0:
763 if self.firstlineofpadtoprint < 0:
761 self.firstlineofpadtoprint = 0
764 self.firstlineofpadtoprint = 0
762 if self.firstlineofpadtoprint > self.numpadlines - 1:
765 if self.firstlineofpadtoprint > self.numpadlines - 1:
763 self.firstlineofpadtoprint = self.numpadlines - 1
766 self.firstlineofpadtoprint = self.numpadlines - 1
764
767
765 def toggleapply(self, item=None):
768 def toggleapply(self, item=None):
766 """
769 """
767 toggle the applied flag of the specified item. if no item is specified,
770 toggle the applied flag of the specified item. if no item is specified,
768 toggle the flag of the currently selected item.
771 toggle the flag of the currently selected item.
769
772
770 """
773 """
771 if item is None:
774 if item is None:
772 item = self.currentselecteditem
775 item = self.currentselecteditem
773
776
774 item.applied = not item.applied
777 item.applied = not item.applied
775
778
776 if isinstance(item, uiheader):
779 if isinstance(item, uiheader):
777 item.partial = False
780 item.partial = False
778 if item.applied:
781 if item.applied:
779 # apply all its hunks
782 # apply all its hunks
780 for hnk in item.hunks:
783 for hnk in item.hunks:
781 hnk.applied = True
784 hnk.applied = True
782 # apply all their hunklines
785 # apply all their hunklines
783 for hunkline in hnk.changedlines:
786 for hunkline in hnk.changedlines:
784 hunkline.applied = True
787 hunkline.applied = True
785 else:
788 else:
786 # un-apply all its hunks
789 # un-apply all its hunks
787 for hnk in item.hunks:
790 for hnk in item.hunks:
788 hnk.applied = False
791 hnk.applied = False
789 hnk.partial = False
792 hnk.partial = False
790 # un-apply all their hunklines
793 # un-apply all their hunklines
791 for hunkline in hnk.changedlines:
794 for hunkline in hnk.changedlines:
792 hunkline.applied = False
795 hunkline.applied = False
793 elif isinstance(item, uihunk):
796 elif isinstance(item, uihunk):
794 item.partial = False
797 item.partial = False
795 # apply all it's hunklines
798 # apply all it's hunklines
796 for hunkline in item.changedlines:
799 for hunkline in item.changedlines:
797 hunkline.applied = item.applied
800 hunkline.applied = item.applied
798
801
799 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
802 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
800 allsiblingsapplied = not (False in siblingappliedstatus)
803 allsiblingsapplied = not (False in siblingappliedstatus)
801 nosiblingsapplied = not (True in siblingappliedstatus)
804 nosiblingsapplied = not (True in siblingappliedstatus)
802
805
803 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
806 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
804 somesiblingspartial = (True in siblingspartialstatus)
807 somesiblingspartial = (True in siblingspartialstatus)
805
808
806 #cases where applied or partial should be removed from header
809 #cases where applied or partial should be removed from header
807
810
808 # if no 'sibling' hunks are applied (including this hunk)
811 # if no 'sibling' hunks are applied (including this hunk)
809 if nosiblingsapplied:
812 if nosiblingsapplied:
810 if not item.header.special():
813 if not item.header.special():
811 item.header.applied = False
814 item.header.applied = False
812 item.header.partial = False
815 item.header.partial = False
813 else: # some/all parent siblings are applied
816 else: # some/all parent siblings are applied
814 item.header.applied = True
817 item.header.applied = True
815 item.header.partial = (somesiblingspartial or
818 item.header.partial = (somesiblingspartial or
816 not allsiblingsapplied)
819 not allsiblingsapplied)
817
820
818 elif isinstance(item, uihunkline):
821 elif isinstance(item, uihunkline):
819 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
822 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
820 allsiblingsapplied = not (False in siblingappliedstatus)
823 allsiblingsapplied = not (False in siblingappliedstatus)
821 nosiblingsapplied = not (True in siblingappliedstatus)
824 nosiblingsapplied = not (True in siblingappliedstatus)
822
825
823 # if no 'sibling' lines are applied
826 # if no 'sibling' lines are applied
824 if nosiblingsapplied:
827 if nosiblingsapplied:
825 item.hunk.applied = False
828 item.hunk.applied = False
826 item.hunk.partial = False
829 item.hunk.partial = False
827 elif allsiblingsapplied:
830 elif allsiblingsapplied:
828 item.hunk.applied = True
831 item.hunk.applied = True
829 item.hunk.partial = False
832 item.hunk.partial = False
830 else: # some siblings applied
833 else: # some siblings applied
831 item.hunk.applied = True
834 item.hunk.applied = True
832 item.hunk.partial = True
835 item.hunk.partial = True
833
836
834 parentsiblingsapplied = [hnk.applied for hnk
837 parentsiblingsapplied = [hnk.applied for hnk
835 in item.hunk.header.hunks]
838 in item.hunk.header.hunks]
836 noparentsiblingsapplied = not (True in parentsiblingsapplied)
839 noparentsiblingsapplied = not (True in parentsiblingsapplied)
837 allparentsiblingsapplied = not (False in parentsiblingsapplied)
840 allparentsiblingsapplied = not (False in parentsiblingsapplied)
838
841
839 parentsiblingspartial = [hnk.partial for hnk
842 parentsiblingspartial = [hnk.partial for hnk
840 in item.hunk.header.hunks]
843 in item.hunk.header.hunks]
841 someparentsiblingspartial = (True in parentsiblingspartial)
844 someparentsiblingspartial = (True in parentsiblingspartial)
842
845
843 # if all parent hunks are not applied, un-apply header
846 # if all parent hunks are not applied, un-apply header
844 if noparentsiblingsapplied:
847 if noparentsiblingsapplied:
845 if not item.hunk.header.special():
848 if not item.hunk.header.special():
846 item.hunk.header.applied = False
849 item.hunk.header.applied = False
847 item.hunk.header.partial = False
850 item.hunk.header.partial = False
848 # set the applied and partial status of the header if needed
851 # set the applied and partial status of the header if needed
849 else: # some/all parent siblings are applied
852 else: # some/all parent siblings are applied
850 item.hunk.header.applied = True
853 item.hunk.header.applied = True
851 item.hunk.header.partial = (someparentsiblingspartial or
854 item.hunk.header.partial = (someparentsiblingspartial or
852 not allparentsiblingsapplied)
855 not allparentsiblingsapplied)
853
856
854 def toggleall(self):
857 def toggleall(self):
855 "toggle the applied flag of all items."
858 "toggle the applied flag of all items."
856 if self.waslasttoggleallapplied: # then unapply them this time
859 if self.waslasttoggleallapplied: # then unapply them this time
857 for item in self.headerlist:
860 for item in self.headerlist:
858 if item.applied:
861 if item.applied:
859 self.toggleapply(item)
862 self.toggleapply(item)
860 else:
863 else:
861 for item in self.headerlist:
864 for item in self.headerlist:
862 if not item.applied:
865 if not item.applied:
863 self.toggleapply(item)
866 self.toggleapply(item)
864 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
867 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
865
868
866 def togglefolded(self, item=None, foldparent=False):
869 def togglefolded(self, item=None, foldparent=False):
867 "toggle folded flag of specified item (defaults to currently selected)"
870 "toggle folded flag of specified item (defaults to currently selected)"
868 if item is None:
871 if item is None:
869 item = self.currentselecteditem
872 item = self.currentselecteditem
870 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
873 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
871 if not isinstance(item, uiheader):
874 if not isinstance(item, uiheader):
872 # we need to select the parent item in this case
875 # we need to select the parent item in this case
873 self.currentselecteditem = item = item.parentitem()
876 self.currentselecteditem = item = item.parentitem()
874 elif item.neverunfolded:
877 elif item.neverunfolded:
875 item.neverunfolded = False
878 item.neverunfolded = False
876
879
877 # also fold any foldable children of the parent/current item
880 # also fold any foldable children of the parent/current item
878 if isinstance(item, uiheader): # the original or 'new' item
881 if isinstance(item, uiheader): # the original or 'new' item
879 for child in item.allchildren():
882 for child in item.allchildren():
880 child.folded = not item.folded
883 child.folded = not item.folded
881
884
882 if isinstance(item, (uiheader, uihunk)):
885 if isinstance(item, (uiheader, uihunk)):
883 item.folded = not item.folded
886 item.folded = not item.folded
884
887
885
888
886 def alignstring(self, instr, window):
889 def alignstring(self, instr, window):
887 """
890 """
888 add whitespace to the end of a string in order to make it fill
891 add whitespace to the end of a string in order to make it fill
889 the screen in the x direction. the current cursor position is
892 the screen in the x direction. the current cursor position is
890 taken into account when making this calculation. the string can span
893 taken into account when making this calculation. the string can span
891 multiple lines.
894 multiple lines.
892
895
893 """
896 """
894 y, xstart = window.getyx()
897 y, xstart = window.getyx()
895 width = self.xscreensize
898 width = self.xscreensize
896 # turn tabs into spaces
899 # turn tabs into spaces
897 instr = instr.expandtabs(4)
900 instr = instr.expandtabs(4)
898 strwidth = encoding.colwidth(instr)
901 strwidth = encoding.colwidth(instr)
899 numspaces = (width - ((strwidth + xstart) % width) - 1)
902 numspaces = (width - ((strwidth + xstart) % width) - 1)
900 return instr + " " * numspaces + "\n"
903 return instr + " " * numspaces + "\n"
901
904
902 def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
905 def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
903 pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
906 pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
904 """
907 """
905 print the string, text, with the specified colors and attributes, to
908 print the string, text, with the specified colors and attributes, to
906 the specified curses window object.
909 the specified curses window object.
907
910
908 the foreground and background colors are of the form
911 the foreground and background colors are of the form
909 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
912 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
910 magenta, red, white, yellow]. if pairname is provided, a color
913 magenta, red, white, yellow]. if pairname is provided, a color
911 pair will be looked up in the self.colorpairnames dictionary.
914 pair will be looked up in the self.colorpairnames dictionary.
912
915
913 attrlist is a list containing text attributes in the form of
916 attrlist is a list containing text attributes in the form of
914 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
917 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
915 underline].
918 underline].
916
919
917 if align == True, whitespace is added to the printed string such that
920 if align == True, whitespace is added to the printed string such that
918 the string stretches to the right border of the window.
921 the string stretches to the right border of the window.
919
922
920 if showwhtspc == True, trailing whitespace of a string is highlighted.
923 if showwhtspc == True, trailing whitespace of a string is highlighted.
921
924
922 """
925 """
923 # preprocess the text, converting tabs to spaces
926 # preprocess the text, converting tabs to spaces
924 text = text.expandtabs(4)
927 text = text.expandtabs(4)
925 # strip \n, and convert control characters to ^[char] representation
928 # strip \n, and convert control characters to ^[char] representation
926 text = re.sub(r'[\x00-\x08\x0a-\x1f]',
929 text = re.sub(r'[\x00-\x08\x0a-\x1f]',
927 lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
930 lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
928
931
929 if pair is not None:
932 if pair is not None:
930 colorpair = pair
933 colorpair = pair
931 elif pairname is not None:
934 elif pairname is not None:
932 colorpair = self.colorpairnames[pairname]
935 colorpair = self.colorpairnames[pairname]
933 else:
936 else:
934 if fgcolor is None:
937 if fgcolor is None:
935 fgcolor = -1
938 fgcolor = -1
936 if bgcolor is None:
939 if bgcolor is None:
937 bgcolor = -1
940 bgcolor = -1
938 if (fgcolor, bgcolor) in self.colorpairs:
941 if (fgcolor, bgcolor) in self.colorpairs:
939 colorpair = self.colorpairs[(fgcolor, bgcolor)]
942 colorpair = self.colorpairs[(fgcolor, bgcolor)]
940 else:
943 else:
941 colorpair = self.getcolorpair(fgcolor, bgcolor)
944 colorpair = self.getcolorpair(fgcolor, bgcolor)
942 # add attributes if possible
945 # add attributes if possible
943 if attrlist is None:
946 if attrlist is None:
944 attrlist = []
947 attrlist = []
945 if colorpair < 256:
948 if colorpair < 256:
946 # then it is safe to apply all attributes
949 # then it is safe to apply all attributes
947 for textattr in attrlist:
950 for textattr in attrlist:
948 colorpair |= textattr
951 colorpair |= textattr
949 else:
952 else:
950 # just apply a select few (safe?) attributes
953 # just apply a select few (safe?) attributes
951 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
954 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
952 if textattr in attrlist:
955 if textattr in attrlist:
953 colorpair |= textattr
956 colorpair |= textattr
954
957
955 y, xstart = self.chunkpad.getyx()
958 y, xstart = self.chunkpad.getyx()
956 t = "" # variable for counting lines printed
959 t = "" # variable for counting lines printed
957 # if requested, show trailing whitespace
960 # if requested, show trailing whitespace
958 if showwhtspc:
961 if showwhtspc:
959 origlen = len(text)
962 origlen = len(text)
960 text = text.rstrip(' \n') # tabs have already been expanded
963 text = text.rstrip(' \n') # tabs have already been expanded
961 strippedlen = len(text)
964 strippedlen = len(text)
962 numtrailingspaces = origlen - strippedlen
965 numtrailingspaces = origlen - strippedlen
963
966
964 if towin:
967 if towin:
965 window.addstr(text, colorpair)
968 window.addstr(text, colorpair)
966 t += text
969 t += text
967
970
968 if showwhtspc:
971 if showwhtspc:
969 wscolorpair = colorpair | curses.A_REVERSE
972 wscolorpair = colorpair | curses.A_REVERSE
970 if towin:
973 if towin:
971 for i in range(numtrailingspaces):
974 for i in range(numtrailingspaces):
972 window.addch(curses.ACS_CKBOARD, wscolorpair)
975 window.addch(curses.ACS_CKBOARD, wscolorpair)
973 t += " " * numtrailingspaces
976 t += " " * numtrailingspaces
974
977
975 if align:
978 if align:
976 if towin:
979 if towin:
977 extrawhitespace = self.alignstring("", window)
980 extrawhitespace = self.alignstring("", window)
978 window.addstr(extrawhitespace, colorpair)
981 window.addstr(extrawhitespace, colorpair)
979 else:
982 else:
980 # need to use t, since the x position hasn't incremented
983 # need to use t, since the x position hasn't incremented
981 extrawhitespace = self.alignstring(t, window)
984 extrawhitespace = self.alignstring(t, window)
982 t += extrawhitespace
985 t += extrawhitespace
983
986
984 # is reset to 0 at the beginning of printitem()
987 # is reset to 0 at the beginning of printitem()
985
988
986 linesprinted = (xstart + len(t)) / self.xscreensize
989 linesprinted = (xstart + len(t)) / self.xscreensize
987 self.linesprintedtopadsofar += linesprinted
990 self.linesprintedtopadsofar += linesprinted
988 return t
991 return t
989
992
990 def updatescreen(self):
993 def updatescreen(self):
991 self.statuswin.erase()
994 self.statuswin.erase()
992 self.chunkpad.erase()
995 self.chunkpad.erase()
993
996
994 printstring = self.printstring
997 printstring = self.printstring
995
998
996 # print out the status lines at the top
999 # print out the status lines at the top
997 try:
1000 try:
998 if self.errorstr is not None:
1001 if self.errorstr is not None:
999 printstring(self.statuswin, self.errorstr, pairname='legend')
1002 printstring(self.statuswin, self.errorstr, pairname='legend')
1000 printstring(self.statuswin, 'Press any key to continue',
1003 printstring(self.statuswin, 'Press any key to continue',
1001 pairname='legend')
1004 pairname='legend')
1002 self.statuswin.refresh()
1005 self.statuswin.refresh()
1003 return
1006 return
1004 printstring(self.statuswin,
1007 printstring(self.statuswin,
1005 "SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
1008 "SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
1006 "(space/A) toggle hunk/all; (e)dit hunk;",
1009 "(space/A) toggle hunk/all; (e)dit hunk;",
1007 pairname="legend")
1010 pairname="legend")
1008 printstring(self.statuswin,
1011 printstring(self.statuswin,
1009 " (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
1012 " (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
1010 "| [X]=hunk applied **=folded",
1013 "| [X]=hunk applied **=folded",
1011 pairname="legend")
1014 pairname="legend")
1012 except curses.error:
1015 except curses.error:
1013 pass
1016 pass
1014
1017
1015 # print out the patch in the remaining part of the window
1018 # print out the patch in the remaining part of the window
1016 try:
1019 try:
1017 self.printitem()
1020 self.printitem()
1018 self.updatescroll()
1021 self.updatescroll()
1019 self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
1022 self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
1020 self.numstatuslines, 0,
1023 self.numstatuslines, 0,
1021 self.yscreensize + 1 - self.numstatuslines,
1024 self.yscreensize + 1 - self.numstatuslines,
1022 self.xscreensize)
1025 self.xscreensize)
1023 except curses.error:
1026 except curses.error:
1024 pass
1027 pass
1025
1028
1026 # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
1029 # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
1027 self.statuswin.refresh()
1030 self.statuswin.refresh()
1028
1031
1029 def getstatusprefixstring(self, item):
1032 def getstatusprefixstring(self, item):
1030 """
1033 """
1031 create a string to prefix a line with which indicates whether 'item'
1034 create a string to prefix a line with which indicates whether 'item'
1032 is applied and/or folded.
1035 is applied and/or folded.
1033
1036
1034 """
1037 """
1035 # create checkbox string
1038 # create checkbox string
1036 if item.applied:
1039 if item.applied:
1037 if not isinstance(item, uihunkline) and item.partial:
1040 if not isinstance(item, uihunkline) and item.partial:
1038 checkbox = "[~]"
1041 checkbox = "[~]"
1039 else:
1042 else:
1040 checkbox = "[x]"
1043 checkbox = "[x]"
1041 else:
1044 else:
1042 checkbox = "[ ]"
1045 checkbox = "[ ]"
1043
1046
1044 try:
1047 try:
1045 if item.folded:
1048 if item.folded:
1046 checkbox += "**"
1049 checkbox += "**"
1047 if isinstance(item, uiheader):
1050 if isinstance(item, uiheader):
1048 # one of "m", "a", or "d" (modified, added, deleted)
1051 # one of "m", "a", or "d" (modified, added, deleted)
1049 filestatus = item.changetype
1052 filestatus = item.changetype
1050
1053
1051 checkbox += filestatus + " "
1054 checkbox += filestatus + " "
1052 else:
1055 else:
1053 checkbox += " "
1056 checkbox += " "
1054 if isinstance(item, uiheader):
1057 if isinstance(item, uiheader):
1055 # add two more spaces for headers
1058 # add two more spaces for headers
1056 checkbox += " "
1059 checkbox += " "
1057 except AttributeError: # not foldable
1060 except AttributeError: # not foldable
1058 checkbox += " "
1061 checkbox += " "
1059
1062
1060 return checkbox
1063 return checkbox
1061
1064
1062 def printheader(self, header, selected=False, towin=True,
1065 def printheader(self, header, selected=False, towin=True,
1063 ignorefolding=False):
1066 ignorefolding=False):
1064 """
1067 """
1065 print the header to the pad. if countlines is True, don't print
1068 print the header to the pad. if countlines is True, don't print
1066 anything, but just count the number of lines which would be printed.
1069 anything, but just count the number of lines which would be printed.
1067
1070
1068 """
1071 """
1069 outstr = ""
1072 outstr = ""
1070 text = header.prettystr()
1073 text = header.prettystr()
1071 chunkindex = self.chunklist.index(header)
1074 chunkindex = self.chunklist.index(header)
1072
1075
1073 if chunkindex != 0 and not header.folded:
1076 if chunkindex != 0 and not header.folded:
1074 # add separating line before headers
1077 # add separating line before headers
1075 outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
1078 outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
1076 towin=towin, align=False)
1079 towin=towin, align=False)
1077 # select color-pair based on if the header is selected
1080 # select color-pair based on if the header is selected
1078 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1081 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1079 attrlist=[curses.A_BOLD])
1082 attrlist=[curses.A_BOLD])
1080
1083
1081 # print out each line of the chunk, expanding it to screen width
1084 # print out each line of the chunk, expanding it to screen width
1082
1085
1083 # number of characters to indent lines on this level by
1086 # number of characters to indent lines on this level by
1084 indentnumchars = 0
1087 indentnumchars = 0
1085 checkbox = self.getstatusprefixstring(header)
1088 checkbox = self.getstatusprefixstring(header)
1086 if not header.folded or ignorefolding:
1089 if not header.folded or ignorefolding:
1087 textlist = text.split("\n")
1090 textlist = text.split("\n")
1088 linestr = checkbox + textlist[0]
1091 linestr = checkbox + textlist[0]
1089 else:
1092 else:
1090 linestr = checkbox + header.filename()
1093 linestr = checkbox + header.filename()
1091 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1094 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1092 towin=towin)
1095 towin=towin)
1093 if not header.folded or ignorefolding:
1096 if not header.folded or ignorefolding:
1094 if len(textlist) > 1:
1097 if len(textlist) > 1:
1095 for line in textlist[1:]:
1098 for line in textlist[1:]:
1096 linestr = " "*(indentnumchars + len(checkbox)) + line
1099 linestr = " "*(indentnumchars + len(checkbox)) + line
1097 outstr += self.printstring(self.chunkpad, linestr,
1100 outstr += self.printstring(self.chunkpad, linestr,
1098 pair=colorpair, towin=towin)
1101 pair=colorpair, towin=towin)
1099
1102
1100 return outstr
1103 return outstr
1101
1104
1102 def printhunklinesbefore(self, hunk, selected=False, towin=True,
1105 def printhunklinesbefore(self, hunk, selected=False, towin=True,
1103 ignorefolding=False):
1106 ignorefolding=False):
1104 "includes start/end line indicator"
1107 "includes start/end line indicator"
1105 outstr = ""
1108 outstr = ""
1106 # where hunk is in list of siblings
1109 # where hunk is in list of siblings
1107 hunkindex = hunk.header.hunks.index(hunk)
1110 hunkindex = hunk.header.hunks.index(hunk)
1108
1111
1109 if hunkindex != 0:
1112 if hunkindex != 0:
1110 # add separating line before headers
1113 # add separating line before headers
1111 outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
1114 outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
1112 towin=towin, align=False)
1115 towin=towin, align=False)
1113
1116
1114 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1117 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1115 attrlist=[curses.A_BOLD])
1118 attrlist=[curses.A_BOLD])
1116
1119
1117 # print out from-to line with checkbox
1120 # print out from-to line with checkbox
1118 checkbox = self.getstatusprefixstring(hunk)
1121 checkbox = self.getstatusprefixstring(hunk)
1119
1122
1120 lineprefix = " "*self.hunkindentnumchars + checkbox
1123 lineprefix = " "*self.hunkindentnumchars + checkbox
1121 frtoline = " " + hunk.getfromtoline().strip("\n")
1124 frtoline = " " + hunk.getfromtoline().strip("\n")
1122
1125
1123
1126
1124 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1127 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1125 align=False) # add uncolored checkbox/indent
1128 align=False) # add uncolored checkbox/indent
1126 outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
1129 outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
1127 towin=towin)
1130 towin=towin)
1128
1131
1129 if hunk.folded and not ignorefolding:
1132 if hunk.folded and not ignorefolding:
1130 # skip remainder of output
1133 # skip remainder of output
1131 return outstr
1134 return outstr
1132
1135
1133 # print out lines of the chunk preceeding changed-lines
1136 # print out lines of the chunk preceeding changed-lines
1134 for line in hunk.before:
1137 for line in hunk.before:
1135 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1138 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1136 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1139 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1137
1140
1138 return outstr
1141 return outstr
1139
1142
1140 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1143 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1141 outstr = ""
1144 outstr = ""
1142 if hunk.folded and not ignorefolding:
1145 if hunk.folded and not ignorefolding:
1143 return outstr
1146 return outstr
1144
1147
1145 # a bit superfluous, but to avoid hard-coding indent amount
1148 # a bit superfluous, but to avoid hard-coding indent amount
1146 checkbox = self.getstatusprefixstring(hunk)
1149 checkbox = self.getstatusprefixstring(hunk)
1147 for line in hunk.after:
1150 for line in hunk.after:
1148 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1151 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1149 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1152 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1150
1153
1151 return outstr
1154 return outstr
1152
1155
1153 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1156 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1154 outstr = ""
1157 outstr = ""
1155 checkbox = self.getstatusprefixstring(hunkline)
1158 checkbox = self.getstatusprefixstring(hunkline)
1156
1159
1157 linestr = hunkline.prettystr().strip("\n")
1160 linestr = hunkline.prettystr().strip("\n")
1158
1161
1159 # select color-pair based on whether line is an addition/removal
1162 # select color-pair based on whether line is an addition/removal
1160 if selected:
1163 if selected:
1161 colorpair = self.getcolorpair(name="selected")
1164 colorpair = self.getcolorpair(name="selected")
1162 elif linestr.startswith("+"):
1165 elif linestr.startswith("+"):
1163 colorpair = self.getcolorpair(name="addition")
1166 colorpair = self.getcolorpair(name="addition")
1164 elif linestr.startswith("-"):
1167 elif linestr.startswith("-"):
1165 colorpair = self.getcolorpair(name="deletion")
1168 colorpair = self.getcolorpair(name="deletion")
1166 elif linestr.startswith("\\"):
1169 elif linestr.startswith("\\"):
1167 colorpair = self.getcolorpair(name="normal")
1170 colorpair = self.getcolorpair(name="normal")
1168
1171
1169 lineprefix = " "*self.hunklineindentnumchars + checkbox
1172 lineprefix = " "*self.hunklineindentnumchars + checkbox
1170 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1173 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1171 align=False) # add uncolored checkbox/indent
1174 align=False) # add uncolored checkbox/indent
1172 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1175 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1173 towin=towin, showwhtspc=True)
1176 towin=towin, showwhtspc=True)
1174 return outstr
1177 return outstr
1175
1178
1176 def printitem(self, item=None, ignorefolding=False, recursechildren=True,
1179 def printitem(self, item=None, ignorefolding=False, recursechildren=True,
1177 towin=True):
1180 towin=True):
1178 """
1181 """
1179 use __printitem() to print the the specified item.applied.
1182 use __printitem() to print the the specified item.applied.
1180 if item is not specified, then print the entire patch.
1183 if item is not specified, then print the entire patch.
1181 (hiding folded elements, etc. -- see __printitem() docstring)
1184 (hiding folded elements, etc. -- see __printitem() docstring)
1182 """
1185 """
1183 if item is None:
1186 if item is None:
1184 item = self.headerlist
1187 item = self.headerlist
1185 if recursechildren:
1188 if recursechildren:
1186 self.linesprintedtopadsofar = 0
1189 self.linesprintedtopadsofar = 0
1187
1190
1188 outstr = []
1191 outstr = []
1189 self.__printitem(item, ignorefolding, recursechildren, outstr,
1192 self.__printitem(item, ignorefolding, recursechildren, outstr,
1190 towin=towin)
1193 towin=towin)
1191 return ''.join(outstr)
1194 return ''.join(outstr)
1192
1195
1193 def outofdisplayedarea(self):
1196 def outofdisplayedarea(self):
1194 y, _ = self.chunkpad.getyx() # cursor location
1197 y, _ = self.chunkpad.getyx() # cursor location
1195 # * 2 here works but an optimization would be the max number of
1198 # * 2 here works but an optimization would be the max number of
1196 # consecutive non selectable lines
1199 # consecutive non selectable lines
1197 # i.e the max number of context line for any hunk in the patch
1200 # i.e the max number of context line for any hunk in the patch
1198 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1201 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1199 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1202 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1200 return y < miny or y > maxy
1203 return y < miny or y > maxy
1201
1204
1202 def handleselection(self, item, recursechildren):
1205 def handleselection(self, item, recursechildren):
1203 selected = (item is self.currentselecteditem)
1206 selected = (item is self.currentselecteditem)
1204 if selected and recursechildren:
1207 if selected and recursechildren:
1205 # assumes line numbering starting from line 0
1208 # assumes line numbering starting from line 0
1206 self.selecteditemstartline = self.linesprintedtopadsofar
1209 self.selecteditemstartline = self.linesprintedtopadsofar
1207 selecteditemlines = self.getnumlinesdisplayed(item,
1210 selecteditemlines = self.getnumlinesdisplayed(item,
1208 recursechildren=False)
1211 recursechildren=False)
1209 self.selecteditemendline = (self.selecteditemstartline +
1212 self.selecteditemendline = (self.selecteditemstartline +
1210 selecteditemlines - 1)
1213 selecteditemlines - 1)
1211 return selected
1214 return selected
1212
1215
1213 def __printitem(self, item, ignorefolding, recursechildren, outstr,
1216 def __printitem(self, item, ignorefolding, recursechildren, outstr,
1214 towin=True):
1217 towin=True):
1215 """
1218 """
1216 recursive method for printing out patch/header/hunk/hunk-line data to
1219 recursive method for printing out patch/header/hunk/hunk-line data to
1217 screen. also returns a string with all of the content of the displayed
1220 screen. also returns a string with all of the content of the displayed
1218 patch (not including coloring, etc.).
1221 patch (not including coloring, etc.).
1219
1222
1220 if ignorefolding is True, then folded items are printed out.
1223 if ignorefolding is True, then folded items are printed out.
1221
1224
1222 if recursechildren is False, then only print the item without its
1225 if recursechildren is False, then only print the item without its
1223 child items.
1226 child items.
1224
1227
1225 """
1228 """
1226 if towin and self.outofdisplayedarea():
1229 if towin and self.outofdisplayedarea():
1227 return
1230 return
1228
1231
1229 selected = self.handleselection(item, recursechildren)
1232 selected = self.handleselection(item, recursechildren)
1230
1233
1231 # patch object is a list of headers
1234 # patch object is a list of headers
1232 if isinstance(item, patch):
1235 if isinstance(item, patch):
1233 if recursechildren:
1236 if recursechildren:
1234 for hdr in item:
1237 for hdr in item:
1235 self.__printitem(hdr, ignorefolding,
1238 self.__printitem(hdr, ignorefolding,
1236 recursechildren, outstr, towin)
1239 recursechildren, outstr, towin)
1237 # todo: eliminate all isinstance() calls
1240 # todo: eliminate all isinstance() calls
1238 if isinstance(item, uiheader):
1241 if isinstance(item, uiheader):
1239 outstr.append(self.printheader(item, selected, towin=towin,
1242 outstr.append(self.printheader(item, selected, towin=towin,
1240 ignorefolding=ignorefolding))
1243 ignorefolding=ignorefolding))
1241 if recursechildren:
1244 if recursechildren:
1242 for hnk in item.hunks:
1245 for hnk in item.hunks:
1243 self.__printitem(hnk, ignorefolding,
1246 self.__printitem(hnk, ignorefolding,
1244 recursechildren, outstr, towin)
1247 recursechildren, outstr, towin)
1245 elif (isinstance(item, uihunk) and
1248 elif (isinstance(item, uihunk) and
1246 ((not item.header.folded) or ignorefolding)):
1249 ((not item.header.folded) or ignorefolding)):
1247 # print the hunk data which comes before the changed-lines
1250 # print the hunk data which comes before the changed-lines
1248 outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
1251 outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
1249 ignorefolding=ignorefolding))
1252 ignorefolding=ignorefolding))
1250 if recursechildren:
1253 if recursechildren:
1251 for l in item.changedlines:
1254 for l in item.changedlines:
1252 self.__printitem(l, ignorefolding,
1255 self.__printitem(l, ignorefolding,
1253 recursechildren, outstr, towin)
1256 recursechildren, outstr, towin)
1254 outstr.append(self.printhunklinesafter(item, towin=towin,
1257 outstr.append(self.printhunklinesafter(item, towin=towin,
1255 ignorefolding=ignorefolding))
1258 ignorefolding=ignorefolding))
1256 elif (isinstance(item, uihunkline) and
1259 elif (isinstance(item, uihunkline) and
1257 ((not item.hunk.folded) or ignorefolding)):
1260 ((not item.hunk.folded) or ignorefolding)):
1258 outstr.append(self.printhunkchangedline(item, selected,
1261 outstr.append(self.printhunkchangedline(item, selected,
1259 towin=towin))
1262 towin=towin))
1260
1263
1261 return outstr
1264 return outstr
1262
1265
1263 def getnumlinesdisplayed(self, item=None, ignorefolding=False,
1266 def getnumlinesdisplayed(self, item=None, ignorefolding=False,
1264 recursechildren=True):
1267 recursechildren=True):
1265 """
1268 """
1266 return the number of lines which would be displayed if the item were
1269 return the number of lines which would be displayed if the item were
1267 to be printed to the display. the item will not be printed to the
1270 to be printed to the display. the item will not be printed to the
1268 display (pad).
1271 display (pad).
1269 if no item is given, assume the entire patch.
1272 if no item is given, assume the entire patch.
1270 if ignorefolding is True, folded items will be unfolded when counting
1273 if ignorefolding is True, folded items will be unfolded when counting
1271 the number of lines.
1274 the number of lines.
1272
1275
1273 """
1276 """
1274 # temporarily disable printing to windows by printstring
1277 # temporarily disable printing to windows by printstring
1275 patchdisplaystring = self.printitem(item, ignorefolding,
1278 patchdisplaystring = self.printitem(item, ignorefolding,
1276 recursechildren, towin=False)
1279 recursechildren, towin=False)
1277 numlines = len(patchdisplaystring) / self.xscreensize
1280 numlines = len(patchdisplaystring) / self.xscreensize
1278 return numlines
1281 return numlines
1279
1282
1280 def sigwinchhandler(self, n, frame):
1283 def sigwinchhandler(self, n, frame):
1281 "handle window resizing"
1284 "handle window resizing"
1282 try:
1285 try:
1283 curses.endwin()
1286 curses.endwin()
1284 self.yscreensize, self.xscreensize = gethw()
1287 self.yscreensize, self.xscreensize = gethw()
1285 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1288 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1286 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1289 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1287 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1290 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1288 # todo: try to resize commit message window if possible
1291 # todo: try to resize commit message window if possible
1289 except curses.error:
1292 except curses.error:
1290 pass
1293 pass
1291
1294
1292 def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
1295 def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
1293 attrlist=None):
1296 attrlist=None):
1294 """
1297 """
1295 get a curses color pair, adding it to self.colorpairs if it is not
1298 get a curses color pair, adding it to self.colorpairs if it is not
1296 already defined. an optional string, name, can be passed as a shortcut
1299 already defined. an optional string, name, can be passed as a shortcut
1297 for referring to the color-pair. by default, if no arguments are
1300 for referring to the color-pair. by default, if no arguments are
1298 specified, the white foreground / black background color-pair is
1301 specified, the white foreground / black background color-pair is
1299 returned.
1302 returned.
1300
1303
1301 it is expected that this function will be used exclusively for
1304 it is expected that this function will be used exclusively for
1302 initializing color pairs, and not curses.init_pair().
1305 initializing color pairs, and not curses.init_pair().
1303
1306
1304 attrlist is used to 'flavor' the returned color-pair. this information
1307 attrlist is used to 'flavor' the returned color-pair. this information
1305 is not stored in self.colorpairs. it contains attribute values like
1308 is not stored in self.colorpairs. it contains attribute values like
1306 curses.A_BOLD.
1309 curses.A_BOLD.
1307
1310
1308 """
1311 """
1309 if (name is not None) and name in self.colorpairnames:
1312 if (name is not None) and name in self.colorpairnames:
1310 # then get the associated color pair and return it
1313 # then get the associated color pair and return it
1311 colorpair = self.colorpairnames[name]
1314 colorpair = self.colorpairnames[name]
1312 else:
1315 else:
1313 if fgcolor is None:
1316 if fgcolor is None:
1314 fgcolor = -1
1317 fgcolor = -1
1315 if bgcolor is None:
1318 if bgcolor is None:
1316 bgcolor = -1
1319 bgcolor = -1
1317 if (fgcolor, bgcolor) in self.colorpairs:
1320 if (fgcolor, bgcolor) in self.colorpairs:
1318 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1321 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1319 else:
1322 else:
1320 pairindex = len(self.colorpairs) + 1
1323 pairindex = len(self.colorpairs) + 1
1321 curses.init_pair(pairindex, fgcolor, bgcolor)
1324 curses.init_pair(pairindex, fgcolor, bgcolor)
1322 colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
1325 colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
1323 curses.color_pair(pairindex))
1326 curses.color_pair(pairindex))
1324 if name is not None:
1327 if name is not None:
1325 self.colorpairnames[name] = curses.color_pair(pairindex)
1328 self.colorpairnames[name] = curses.color_pair(pairindex)
1326
1329
1327 # add attributes if possible
1330 # add attributes if possible
1328 if attrlist is None:
1331 if attrlist is None:
1329 attrlist = []
1332 attrlist = []
1330 if colorpair < 256:
1333 if colorpair < 256:
1331 # then it is safe to apply all attributes
1334 # then it is safe to apply all attributes
1332 for textattr in attrlist:
1335 for textattr in attrlist:
1333 colorpair |= textattr
1336 colorpair |= textattr
1334 else:
1337 else:
1335 # just apply a select few (safe?) attributes
1338 # just apply a select few (safe?) attributes
1336 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1339 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1337 if textattrib in attrlist:
1340 if textattrib in attrlist:
1338 colorpair |= textattrib
1341 colorpair |= textattrib
1339 return colorpair
1342 return colorpair
1340
1343
1341 def initcolorpair(self, *args, **kwargs):
1344 def initcolorpair(self, *args, **kwargs):
1342 "same as getcolorpair."
1345 "same as getcolorpair."
1343 self.getcolorpair(*args, **kwargs)
1346 self.getcolorpair(*args, **kwargs)
1344
1347
1345 def helpwindow(self):
1348 def helpwindow(self):
1346 "print a help window to the screen. exit after any keypress."
1349 "print a help window to the screen. exit after any keypress."
1347 helptext = """ [press any key to return to the patch-display]
1350 helptext = """ [press any key to return to the patch-display]
1348
1351
1349 crecord allows you to interactively choose among the changes you have made,
1352 crecord allows you to interactively choose among the changes you have made,
1350 and confirm only those changes you select for further processing by the command
1353 and confirm only those changes you select for further processing by the command
1351 you are running (commit/shelve/revert), after confirming the selected
1354 you are running (commit/shelve/revert), after confirming the selected
1352 changes, the unselected changes are still present in your working copy, so you
1355 changes, the unselected changes are still present in your working copy, so you
1353 can use crecord multiple times to split large changes into smaller changesets.
1356 can use crecord multiple times to split large changes into smaller changesets.
1354 the following are valid keystrokes:
1357 the following are valid keystrokes:
1355
1358
1356 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1359 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1357 a : (un-)select all items
1360 a : (un-)select all items
1358 up/down-arrow [k/j] : go to previous/next unfolded item
1361 up/down-arrow [k/j] : go to previous/next unfolded item
1359 pgup/pgdn [K/J] : go to previous/next item of same type
1362 pgup/pgdn [K/J] : go to previous/next item of same type
1360 right/left-arrow [l/h] : go to child item / parent item
1363 right/left-arrow [l/h] : go to child item / parent item
1361 shift-left-arrow [H] : go to parent header / fold selected header
1364 shift-left-arrow [H] : go to parent header / fold selected header
1362 f : fold / unfold item, hiding/revealing its children
1365 f : fold / unfold item, hiding/revealing its children
1363 F : fold / unfold parent item and all of its ancestors
1366 F : fold / unfold parent item and all of its ancestors
1364 m : edit / resume editing the commit message
1367 m : edit / resume editing the commit message
1365 e : edit the currently selected hunk
1368 e : edit the currently selected hunk
1366 a : toggle amend mode (hg rev >= 2.2)
1369 a : toggle amend mode (hg rev >= 2.2)
1367 c : confirm selected changes
1370 c : confirm selected changes
1368 r : review/edit and confirm selected changes
1371 r : review/edit and confirm selected changes
1369 q : quit without confirming (no changes will be made)
1372 q : quit without confirming (no changes will be made)
1370 ? : help (what you're currently reading)"""
1373 ? : help (what you're currently reading)"""
1371
1374
1372 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1375 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1373 helplines = helptext.split("\n")
1376 helplines = helptext.split("\n")
1374 helplines = helplines + [" "]*(
1377 helplines = helplines + [" "]*(
1375 self.yscreensize - self.numstatuslines - len(helplines) - 1)
1378 self.yscreensize - self.numstatuslines - len(helplines) - 1)
1376 try:
1379 try:
1377 for line in helplines:
1380 for line in helplines:
1378 self.printstring(helpwin, line, pairname="legend")
1381 self.printstring(helpwin, line, pairname="legend")
1379 except curses.error:
1382 except curses.error:
1380 pass
1383 pass
1381 helpwin.refresh()
1384 helpwin.refresh()
1382 try:
1385 try:
1383 helpwin.getkey()
1386 helpwin.getkey()
1384 except curses.error:
1387 except curses.error:
1385 pass
1388 pass
1386
1389
1387 def confirmationwindow(self, windowtext):
1390 def confirmationwindow(self, windowtext):
1388 "display an informational window, then wait for and return a keypress."
1391 "display an informational window, then wait for and return a keypress."
1389
1392
1390 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1393 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1391 try:
1394 try:
1392 lines = windowtext.split("\n")
1395 lines = windowtext.split("\n")
1393 for line in lines:
1396 for line in lines:
1394 self.printstring(confirmwin, line, pairname="selected")
1397 self.printstring(confirmwin, line, pairname="selected")
1395 except curses.error:
1398 except curses.error:
1396 pass
1399 pass
1397 self.stdscr.refresh()
1400 self.stdscr.refresh()
1398 confirmwin.refresh()
1401 confirmwin.refresh()
1399 try:
1402 try:
1400 response = chr(self.stdscr.getch())
1403 response = chr(self.stdscr.getch())
1401 except ValueError:
1404 except ValueError:
1402 response = None
1405 response = None
1403
1406
1404 return response
1407 return response
1405
1408
1406 def confirmcommit(self, review=False):
1409 def confirmcommit(self, review=False):
1407 """ask for 'y' to be pressed to confirm selected. return True if
1410 """ask for 'y' to be pressed to confirm selected. return True if
1408 confirmed."""
1411 confirmed."""
1409 if review:
1412 if review:
1410 confirmtext = (
1413 confirmtext = (
1411 """if you answer yes to the following, the your currently chosen patch chunks
1414 """if you answer yes to the following, the your currently chosen patch chunks
1412 will be loaded into an editor. you may modify the patch from the editor, and
1415 will be loaded into an editor. you may modify the patch from the editor, and
1413 save the changes if you wish to change the patch. otherwise, you can just
1416 save the changes if you wish to change the patch. otherwise, you can just
1414 close the editor without saving to accept the current patch as-is.
1417 close the editor without saving to accept the current patch as-is.
1415
1418
1416 note: don't add/remove lines unless you also modify the range information.
1419 note: don't add/remove lines unless you also modify the range information.
1417 failing to follow this rule will result in the commit aborting.
1420 failing to follow this rule will result in the commit aborting.
1418
1421
1419 are you sure you want to review/edit and confirm the selected changes [yn]?
1422 are you sure you want to review/edit and confirm the selected changes [yn]?
1420 """)
1423 """)
1421 else:
1424 else:
1422 confirmtext = (
1425 confirmtext = (
1423 "are you sure you want to confirm the selected changes [yn]? ")
1426 "are you sure you want to confirm the selected changes [yn]? ")
1424
1427
1425 response = self.confirmationwindow(confirmtext)
1428 response = self.confirmationwindow(confirmtext)
1426 if response is None:
1429 if response is None:
1427 response = "n"
1430 response = "n"
1428 if response.lower().startswith("y"):
1431 if response.lower().startswith("y"):
1429 return True
1432 return True
1430 else:
1433 else:
1431 return False
1434 return False
1432
1435
1433 def recenterdisplayedarea(self):
1436 def recenterdisplayedarea(self):
1434 """
1437 """
1435 once we scrolled with pg up pg down we can be pointing outside of the
1438 once we scrolled with pg up pg down we can be pointing outside of the
1436 display zone. we print the patch with towin=False to compute the
1439 display zone. we print the patch with towin=False to compute the
1437 location of the selected item even though it is outside of the displayed
1440 location of the selected item even though it is outside of the displayed
1438 zone and then update the scroll.
1441 zone and then update the scroll.
1439 """
1442 """
1440 self.printitem(towin=False)
1443 self.printitem(towin=False)
1441 self.updatescroll()
1444 self.updatescroll()
1442
1445
1443 def toggleedit(self, item=None, test=False):
1446 def toggleedit(self, item=None, test=False):
1444 """
1447 """
1445 edit the currently selected chunk
1448 edit the currently selected chunk
1446 """
1449 """
1447 def updateui(self):
1450 def updateui(self):
1448 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1451 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1449 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1452 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1450 self.updatescroll()
1453 self.updatescroll()
1451 self.stdscr.refresh()
1454 self.stdscr.refresh()
1452 self.statuswin.refresh()
1455 self.statuswin.refresh()
1453 self.stdscr.keypad(1)
1456 self.stdscr.keypad(1)
1454
1457
1455 def editpatchwitheditor(self, chunk):
1458 def editpatchwitheditor(self, chunk):
1456 if chunk is None:
1459 if chunk is None:
1457 self.ui.write(_('cannot edit patch for whole file'))
1460 self.ui.write(_('cannot edit patch for whole file'))
1458 self.ui.write("\n")
1461 self.ui.write("\n")
1459 return None
1462 return None
1460 if chunk.header.binary():
1463 if chunk.header.binary():
1461 self.ui.write(_('cannot edit patch for binary file'))
1464 self.ui.write(_('cannot edit patch for binary file'))
1462 self.ui.write("\n")
1465 self.ui.write("\n")
1463 return None
1466 return None
1464 # patch comment based on the git one (based on comment at end of
1467 # patch comment based on the git one (based on comment at end of
1465 # https://mercurial-scm.org/wiki/recordextension)
1468 # https://mercurial-scm.org/wiki/recordextension)
1466 phelp = '---' + _("""
1469 phelp = '---' + _("""
1467 to remove '-' lines, make them ' ' lines (context).
1470 to remove '-' lines, make them ' ' lines (context).
1468 to remove '+' lines, delete them.
1471 to remove '+' lines, delete them.
1469 lines starting with # will be removed from the patch.
1472 lines starting with # will be removed from the patch.
1470
1473
1471 if the patch applies cleanly, the edited hunk will immediately be
1474 if the patch applies cleanly, the edited hunk will immediately be
1472 added to the record list. if it does not apply cleanly, a rejects
1475 added to the record list. if it does not apply cleanly, a rejects
1473 file will be generated: you can use that when you try again. if
1476 file will be generated: you can use that when you try again. if
1474 all lines of the hunk are removed, then the edit is aborted and
1477 all lines of the hunk are removed, then the edit is aborted and
1475 the hunk is left unchanged.
1478 the hunk is left unchanged.
1476 """)
1479 """)
1477 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1480 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1478 suffix=".diff", text=True)
1481 suffix=".diff", text=True)
1479 ncpatchfp = None
1482 ncpatchfp = None
1480 try:
1483 try:
1481 # write the initial patch
1484 # write the initial patch
1482 f = os.fdopen(patchfd, "w")
1485 f = os.fdopen(patchfd, "w")
1483 chunk.header.write(f)
1486 chunk.header.write(f)
1484 chunk.write(f)
1487 chunk.write(f)
1485 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1488 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1486 f.close()
1489 f.close()
1487 # start the editor and wait for it to complete
1490 # start the editor and wait for it to complete
1488 editor = self.ui.geteditor()
1491 editor = self.ui.geteditor()
1489 ret = self.ui.system("%s \"%s\"" % (editor, patchfn),
1492 ret = self.ui.system("%s \"%s\"" % (editor, patchfn),
1490 environ={'hguser': self.ui.username()})
1493 environ={'hguser': self.ui.username()})
1491 if ret != 0:
1494 if ret != 0:
1492 self.errorstr = "Editor exited with status %d" % ret
1495 self.errorstr = "Editor exited with status %d" % ret
1493 return None
1496 return None
1494 # remove comment lines
1497 # remove comment lines
1495 patchfp = open(patchfn)
1498 patchfp = open(patchfn)
1496 ncpatchfp = cStringIO.StringIO()
1499 ncpatchfp = cStringIO.StringIO()
1497 for line in patchfp:
1500 for line in patchfp:
1498 if not line.startswith('#'):
1501 if not line.startswith('#'):
1499 ncpatchfp.write(line)
1502 ncpatchfp.write(line)
1500 patchfp.close()
1503 patchfp.close()
1501 ncpatchfp.seek(0)
1504 ncpatchfp.seek(0)
1502 newpatches = patchmod.parsepatch(ncpatchfp)
1505 newpatches = patchmod.parsepatch(ncpatchfp)
1503 finally:
1506 finally:
1504 os.unlink(patchfn)
1507 os.unlink(patchfn)
1505 del ncpatchfp
1508 del ncpatchfp
1506 return newpatches
1509 return newpatches
1507 if item is None:
1510 if item is None:
1508 item = self.currentselecteditem
1511 item = self.currentselecteditem
1509 if isinstance(item, uiheader):
1512 if isinstance(item, uiheader):
1510 return
1513 return
1511 if isinstance(item, uihunkline):
1514 if isinstance(item, uihunkline):
1512 item = item.parentitem()
1515 item = item.parentitem()
1513 if not isinstance(item, uihunk):
1516 if not isinstance(item, uihunk):
1514 return
1517 return
1515
1518
1516 beforeadded, beforeremoved = item.added, item.removed
1519 beforeadded, beforeremoved = item.added, item.removed
1517 newpatches = editpatchwitheditor(self, item)
1520 newpatches = editpatchwitheditor(self, item)
1518 if newpatches is None:
1521 if newpatches is None:
1519 if not test:
1522 if not test:
1520 updateui(self)
1523 updateui(self)
1521 return
1524 return
1522 header = item.header
1525 header = item.header
1523 editedhunkindex = header.hunks.index(item)
1526 editedhunkindex = header.hunks.index(item)
1524 hunksbefore = header.hunks[:editedhunkindex]
1527 hunksbefore = header.hunks[:editedhunkindex]
1525 hunksafter = header.hunks[editedhunkindex + 1:]
1528 hunksafter = header.hunks[editedhunkindex + 1:]
1526 newpatchheader = newpatches[0]
1529 newpatchheader = newpatches[0]
1527 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1530 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1528 newadded = sum([h.added for h in newhunks])
1531 newadded = sum([h.added for h in newhunks])
1529 newremoved = sum([h.removed for h in newhunks])
1532 newremoved = sum([h.removed for h in newhunks])
1530 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1533 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1531
1534
1532 for h in hunksafter:
1535 for h in hunksafter:
1533 h.toline += offset
1536 h.toline += offset
1534 for h in newhunks:
1537 for h in newhunks:
1535 h.folded = False
1538 h.folded = False
1536 header.hunks = hunksbefore + newhunks + hunksafter
1539 header.hunks = hunksbefore + newhunks + hunksafter
1537 if self.emptypatch():
1540 if self.emptypatch():
1538 header.hunks = hunksbefore + [item] + hunksafter
1541 header.hunks = hunksbefore + [item] + hunksafter
1539 self.currentselecteditem = header
1542 self.currentselecteditem = header
1540
1543
1541 if not test:
1544 if not test:
1542 updateui(self)
1545 updateui(self)
1543
1546
1544 def emptypatch(self):
1547 def emptypatch(self):
1545 item = self.headerlist
1548 item = self.headerlist
1546 if not item:
1549 if not item:
1547 return True
1550 return True
1548 for header in item:
1551 for header in item:
1549 if header.hunks:
1552 if header.hunks:
1550 return False
1553 return False
1551 return True
1554 return True
1552
1555
1553 def handlekeypressed(self, keypressed, test=False):
1556 def handlekeypressed(self, keypressed, test=False):
1554 if keypressed in ["k", "KEY_UP"]:
1557 if keypressed in ["k", "KEY_UP"]:
1555 self.uparrowevent()
1558 self.uparrowevent()
1556 if keypressed in ["K", "KEY_PPAGE"]:
1559 if keypressed in ["K", "KEY_PPAGE"]:
1557 self.uparrowshiftevent()
1560 self.uparrowshiftevent()
1558 elif keypressed in ["j", "KEY_DOWN"]:
1561 elif keypressed in ["j", "KEY_DOWN"]:
1559 self.downarrowevent()
1562 self.downarrowevent()
1560 elif keypressed in ["J", "KEY_NPAGE"]:
1563 elif keypressed in ["J", "KEY_NPAGE"]:
1561 self.downarrowshiftevent()
1564 self.downarrowshiftevent()
1562 elif keypressed in ["l", "KEY_RIGHT"]:
1565 elif keypressed in ["l", "KEY_RIGHT"]:
1563 self.rightarrowevent()
1566 self.rightarrowevent()
1564 elif keypressed in ["h", "KEY_LEFT"]:
1567 elif keypressed in ["h", "KEY_LEFT"]:
1565 self.leftarrowevent()
1568 self.leftarrowevent()
1566 elif keypressed in ["H", "KEY_SLEFT"]:
1569 elif keypressed in ["H", "KEY_SLEFT"]:
1567 self.leftarrowshiftevent()
1570 self.leftarrowshiftevent()
1568 elif keypressed in ["q"]:
1571 elif keypressed in ["q"]:
1569 raise error.Abort(_('user quit'))
1572 raise error.Abort(_('user quit'))
1570 elif keypressed in ["c"]:
1573 elif keypressed in ["c"]:
1571 if self.confirmcommit():
1574 if self.confirmcommit():
1572 return True
1575 return True
1573 elif keypressed in ["r"]:
1576 elif keypressed in ["r"]:
1574 if self.confirmcommit(review=True):
1577 if self.confirmcommit(review=True):
1575 return True
1578 return True
1576 elif test and keypressed in ['X']:
1579 elif test and keypressed in ['X']:
1577 return True
1580 return True
1578 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1581 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1579 self.toggleapply()
1582 self.toggleapply()
1580 elif keypressed in ['A']:
1583 elif keypressed in ['A']:
1581 self.toggleall()
1584 self.toggleall()
1582 elif keypressed in ['e']:
1585 elif keypressed in ['e']:
1583 self.toggleedit(test=test)
1586 self.toggleedit(test=test)
1584 elif keypressed in ["f"]:
1587 elif keypressed in ["f"]:
1585 self.togglefolded()
1588 self.togglefolded()
1586 elif keypressed in ["F"]:
1589 elif keypressed in ["F"]:
1587 self.togglefolded(foldparent=True)
1590 self.togglefolded(foldparent=True)
1588 elif keypressed in ["?"]:
1591 elif keypressed in ["?"]:
1589 self.helpwindow()
1592 self.helpwindow()
1590 self.stdscr.clear()
1593 self.stdscr.clear()
1591 self.stdscr.refresh()
1594 self.stdscr.refresh()
1592
1595
1593 def main(self, stdscr):
1596 def main(self, stdscr):
1594 """
1597 """
1595 method to be wrapped by curses.wrapper() for selecting chunks.
1598 method to be wrapped by curses.wrapper() for selecting chunks.
1596
1599
1597 """
1600 """
1598 signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1601 signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1599 self.stdscr = stdscr
1602 self.stdscr = stdscr
1600 # error during initialization, cannot be printed in the curses
1603 # error during initialization, cannot be printed in the curses
1601 # interface, it should be printed by the calling code
1604 # interface, it should be printed by the calling code
1602 self.initerr = None
1605 self.initerr = None
1603 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1606 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1604
1607
1605 curses.start_color()
1608 curses.start_color()
1606 curses.use_default_colors()
1609 curses.use_default_colors()
1607
1610
1608 # available colors: black, blue, cyan, green, magenta, white, yellow
1611 # available colors: black, blue, cyan, green, magenta, white, yellow
1609 # init_pair(color_id, foreground_color, background_color)
1612 # init_pair(color_id, foreground_color, background_color)
1610 self.initcolorpair(None, None, name="normal")
1613 self.initcolorpair(None, None, name="normal")
1611 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
1614 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
1612 name="selected")
1615 name="selected")
1613 self.initcolorpair(curses.COLOR_RED, None, name="deletion")
1616 self.initcolorpair(curses.COLOR_RED, None, name="deletion")
1614 self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
1617 self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
1615 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
1618 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
1616 # newwin([height, width,] begin_y, begin_x)
1619 # newwin([height, width,] begin_y, begin_x)
1617 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1620 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1618 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1621 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1619
1622
1620 # figure out how much space to allocate for the chunk-pad which is
1623 # figure out how much space to allocate for the chunk-pad which is
1621 # used for displaying the patch
1624 # used for displaying the patch
1622
1625
1623 # stupid hack to prevent getnumlinesdisplayed from failing
1626 # stupid hack to prevent getnumlinesdisplayed from failing
1624 self.chunkpad = curses.newpad(1, self.xscreensize)
1627 self.chunkpad = curses.newpad(1, self.xscreensize)
1625
1628
1626 # add 1 so to account for last line text reaching end of line
1629 # add 1 so to account for last line text reaching end of line
1627 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1630 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1628
1631
1629 try:
1632 try:
1630 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1633 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1631 except curses.error:
1634 except curses.error:
1632 self.initerr = _('this diff is too large to be displayed')
1635 self.initerr = _('this diff is too large to be displayed')
1633 return
1636 return
1634 # initialize selecteitemendline (initial start-line is 0)
1637 # initialize selecteitemendline (initial start-line is 0)
1635 self.selecteditemendline = self.getnumlinesdisplayed(
1638 self.selecteditemendline = self.getnumlinesdisplayed(
1636 self.currentselecteditem, recursechildren=False)
1639 self.currentselecteditem, recursechildren=False)
1637
1640
1638 while True:
1641 while True:
1639 self.updatescreen()
1642 self.updatescreen()
1640 try:
1643 try:
1641 keypressed = self.statuswin.getkey()
1644 keypressed = self.statuswin.getkey()
1642 if self.errorstr is not None:
1645 if self.errorstr is not None:
1643 self.errorstr = None
1646 self.errorstr = None
1644 continue
1647 continue
1645 except curses.error:
1648 except curses.error:
1646 keypressed = "foobar"
1649 keypressed = "foobar"
1647 if self.handlekeypressed(keypressed):
1650 if self.handlekeypressed(keypressed):
1648 break
1651 break
@@ -1,2557 +1,2557
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12
12
13 from i18n import _
13 from i18n import _
14 from node import hex, short
14 from node import hex, short
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
15 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
16 import pathutil
16 import pathutil
17
17
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
18 gitre = re.compile('diff --git a/(.*) b/(.*)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
19 tabsplitter = re.compile(r'(\t+|[^\t]+)')
20
20
21 class PatchError(Exception):
21 class PatchError(Exception):
22 pass
22 pass
23
23
24
24
25 # public functions
25 # public functions
26
26
27 def split(stream):
27 def split(stream):
28 '''return an iterator of individual patches from a stream'''
28 '''return an iterator of individual patches from a stream'''
29 def isheader(line, inheader):
29 def isheader(line, inheader):
30 if inheader and line[0] in (' ', '\t'):
30 if inheader and line[0] in (' ', '\t'):
31 # continuation
31 # continuation
32 return True
32 return True
33 if line[0] in (' ', '-', '+'):
33 if line[0] in (' ', '-', '+'):
34 # diff line - don't check for header pattern in there
34 # diff line - don't check for header pattern in there
35 return False
35 return False
36 l = line.split(': ', 1)
36 l = line.split(': ', 1)
37 return len(l) == 2 and ' ' not in l[0]
37 return len(l) == 2 and ' ' not in l[0]
38
38
39 def chunk(lines):
39 def chunk(lines):
40 return cStringIO.StringIO(''.join(lines))
40 return cStringIO.StringIO(''.join(lines))
41
41
42 def hgsplit(stream, cur):
42 def hgsplit(stream, cur):
43 inheader = True
43 inheader = True
44
44
45 for line in stream:
45 for line in stream:
46 if not line.strip():
46 if not line.strip():
47 inheader = False
47 inheader = False
48 if not inheader and line.startswith('# HG changeset patch'):
48 if not inheader and line.startswith('# HG changeset patch'):
49 yield chunk(cur)
49 yield chunk(cur)
50 cur = []
50 cur = []
51 inheader = True
51 inheader = True
52
52
53 cur.append(line)
53 cur.append(line)
54
54
55 if cur:
55 if cur:
56 yield chunk(cur)
56 yield chunk(cur)
57
57
58 def mboxsplit(stream, cur):
58 def mboxsplit(stream, cur):
59 for line in stream:
59 for line in stream:
60 if line.startswith('From '):
60 if line.startswith('From '):
61 for c in split(chunk(cur[1:])):
61 for c in split(chunk(cur[1:])):
62 yield c
62 yield c
63 cur = []
63 cur = []
64
64
65 cur.append(line)
65 cur.append(line)
66
66
67 if cur:
67 if cur:
68 for c in split(chunk(cur[1:])):
68 for c in split(chunk(cur[1:])):
69 yield c
69 yield c
70
70
71 def mimesplit(stream, cur):
71 def mimesplit(stream, cur):
72 def msgfp(m):
72 def msgfp(m):
73 fp = cStringIO.StringIO()
73 fp = cStringIO.StringIO()
74 g = email.Generator.Generator(fp, mangle_from_=False)
74 g = email.Generator.Generator(fp, mangle_from_=False)
75 g.flatten(m)
75 g.flatten(m)
76 fp.seek(0)
76 fp.seek(0)
77 return fp
77 return fp
78
78
79 for line in stream:
79 for line in stream:
80 cur.append(line)
80 cur.append(line)
81 c = chunk(cur)
81 c = chunk(cur)
82
82
83 m = email.Parser.Parser().parse(c)
83 m = email.Parser.Parser().parse(c)
84 if not m.is_multipart():
84 if not m.is_multipart():
85 yield msgfp(m)
85 yield msgfp(m)
86 else:
86 else:
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
87 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
88 for part in m.walk():
88 for part in m.walk():
89 ct = part.get_content_type()
89 ct = part.get_content_type()
90 if ct not in ok_types:
90 if ct not in ok_types:
91 continue
91 continue
92 yield msgfp(part)
92 yield msgfp(part)
93
93
94 def headersplit(stream, cur):
94 def headersplit(stream, cur):
95 inheader = False
95 inheader = False
96
96
97 for line in stream:
97 for line in stream:
98 if not inheader and isheader(line, inheader):
98 if not inheader and isheader(line, inheader):
99 yield chunk(cur)
99 yield chunk(cur)
100 cur = []
100 cur = []
101 inheader = True
101 inheader = True
102 if inheader and not isheader(line, inheader):
102 if inheader and not isheader(line, inheader):
103 inheader = False
103 inheader = False
104
104
105 cur.append(line)
105 cur.append(line)
106
106
107 if cur:
107 if cur:
108 yield chunk(cur)
108 yield chunk(cur)
109
109
110 def remainder(cur):
110 def remainder(cur):
111 yield chunk(cur)
111 yield chunk(cur)
112
112
113 class fiter(object):
113 class fiter(object):
114 def __init__(self, fp):
114 def __init__(self, fp):
115 self.fp = fp
115 self.fp = fp
116
116
117 def __iter__(self):
117 def __iter__(self):
118 return self
118 return self
119
119
120 def next(self):
120 def next(self):
121 l = self.fp.readline()
121 l = self.fp.readline()
122 if not l:
122 if not l:
123 raise StopIteration
123 raise StopIteration
124 return l
124 return l
125
125
126 inheader = False
126 inheader = False
127 cur = []
127 cur = []
128
128
129 mimeheaders = ['content-type']
129 mimeheaders = ['content-type']
130
130
131 if not util.safehasattr(stream, 'next'):
131 if not util.safehasattr(stream, 'next'):
132 # http responses, for example, have readline but not next
132 # http responses, for example, have readline but not next
133 stream = fiter(stream)
133 stream = fiter(stream)
134
134
135 for line in stream:
135 for line in stream:
136 cur.append(line)
136 cur.append(line)
137 if line.startswith('# HG changeset patch'):
137 if line.startswith('# HG changeset patch'):
138 return hgsplit(stream, cur)
138 return hgsplit(stream, cur)
139 elif line.startswith('From '):
139 elif line.startswith('From '):
140 return mboxsplit(stream, cur)
140 return mboxsplit(stream, cur)
141 elif isheader(line, inheader):
141 elif isheader(line, inheader):
142 inheader = True
142 inheader = True
143 if line.split(':', 1)[0].lower() in mimeheaders:
143 if line.split(':', 1)[0].lower() in mimeheaders:
144 # let email parser handle this
144 # let email parser handle this
145 return mimesplit(stream, cur)
145 return mimesplit(stream, cur)
146 elif line.startswith('--- ') and inheader:
146 elif line.startswith('--- ') and inheader:
147 # No evil headers seen by diff start, split by hand
147 # No evil headers seen by diff start, split by hand
148 return headersplit(stream, cur)
148 return headersplit(stream, cur)
149 # Not enough info, keep reading
149 # Not enough info, keep reading
150
150
151 # if we are here, we have a very plain patch
151 # if we are here, we have a very plain patch
152 return remainder(cur)
152 return remainder(cur)
153
153
154 ## Some facility for extensible patch parsing:
154 ## Some facility for extensible patch parsing:
155 # list of pairs ("header to match", "data key")
155 # list of pairs ("header to match", "data key")
156 patchheadermap = [('Date', 'date'),
156 patchheadermap = [('Date', 'date'),
157 ('Branch', 'branch'),
157 ('Branch', 'branch'),
158 ('Node ID', 'nodeid'),
158 ('Node ID', 'nodeid'),
159 ]
159 ]
160
160
161 def extract(ui, fileobj):
161 def extract(ui, fileobj):
162 '''extract patch from data read from fileobj.
162 '''extract patch from data read from fileobj.
163
163
164 patch can be a normal patch or contained in an email message.
164 patch can be a normal patch or contained in an email message.
165
165
166 return a dictionary. Standard keys are:
166 return a dictionary. Standard keys are:
167 - filename,
167 - filename,
168 - message,
168 - message,
169 - user,
169 - user,
170 - date,
170 - date,
171 - branch,
171 - branch,
172 - node,
172 - node,
173 - p1,
173 - p1,
174 - p2.
174 - p2.
175 Any item can be missing from the dictionary. If filename is missing,
175 Any item can be missing from the dictionary. If filename is missing,
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
176 fileobj did not contain a patch. Caller must unlink filename when done.'''
177
177
178 # attempt to detect the start of a patch
178 # attempt to detect the start of a patch
179 # (this heuristic is borrowed from quilt)
179 # (this heuristic is borrowed from quilt)
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
180 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
181 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
182 r'---[ \t].*?^\+\+\+[ \t]|'
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
183 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
184
184
185 data = {}
185 data = {}
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
186 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
187 tmpfp = os.fdopen(fd, 'w')
187 tmpfp = os.fdopen(fd, 'w')
188 try:
188 try:
189 msg = email.Parser.Parser().parse(fileobj)
189 msg = email.Parser.Parser().parse(fileobj)
190
190
191 subject = msg['Subject']
191 subject = msg['Subject']
192 data['user'] = msg['From']
192 data['user'] = msg['From']
193 if not subject and not data['user']:
193 if not subject and not data['user']:
194 # Not an email, restore parsed headers if any
194 # Not an email, restore parsed headers if any
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
195 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
196
196
197 # should try to parse msg['Date']
197 # should try to parse msg['Date']
198 parents = []
198 parents = []
199
199
200 if subject:
200 if subject:
201 if subject.startswith('[PATCH'):
201 if subject.startswith('[PATCH'):
202 pend = subject.find(']')
202 pend = subject.find(']')
203 if pend >= 0:
203 if pend >= 0:
204 subject = subject[pend + 1:].lstrip()
204 subject = subject[pend + 1:].lstrip()
205 subject = re.sub(r'\n[ \t]+', ' ', subject)
205 subject = re.sub(r'\n[ \t]+', ' ', subject)
206 ui.debug('Subject: %s\n' % subject)
206 ui.debug('Subject: %s\n' % subject)
207 if data['user']:
207 if data['user']:
208 ui.debug('From: %s\n' % data['user'])
208 ui.debug('From: %s\n' % data['user'])
209 diffs_seen = 0
209 diffs_seen = 0
210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
210 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
211 message = ''
211 message = ''
212 for part in msg.walk():
212 for part in msg.walk():
213 content_type = part.get_content_type()
213 content_type = part.get_content_type()
214 ui.debug('Content-Type: %s\n' % content_type)
214 ui.debug('Content-Type: %s\n' % content_type)
215 if content_type not in ok_types:
215 if content_type not in ok_types:
216 continue
216 continue
217 payload = part.get_payload(decode=True)
217 payload = part.get_payload(decode=True)
218 m = diffre.search(payload)
218 m = diffre.search(payload)
219 if m:
219 if m:
220 hgpatch = False
220 hgpatch = False
221 hgpatchheader = False
221 hgpatchheader = False
222 ignoretext = False
222 ignoretext = False
223
223
224 ui.debug('found patch at byte %d\n' % m.start(0))
224 ui.debug('found patch at byte %d\n' % m.start(0))
225 diffs_seen += 1
225 diffs_seen += 1
226 cfp = cStringIO.StringIO()
226 cfp = cStringIO.StringIO()
227 for line in payload[:m.start(0)].splitlines():
227 for line in payload[:m.start(0)].splitlines():
228 if line.startswith('# HG changeset patch') and not hgpatch:
228 if line.startswith('# HG changeset patch') and not hgpatch:
229 ui.debug('patch generated by hg export\n')
229 ui.debug('patch generated by hg export\n')
230 hgpatch = True
230 hgpatch = True
231 hgpatchheader = True
231 hgpatchheader = True
232 # drop earlier commit message content
232 # drop earlier commit message content
233 cfp.seek(0)
233 cfp.seek(0)
234 cfp.truncate()
234 cfp.truncate()
235 subject = None
235 subject = None
236 elif hgpatchheader:
236 elif hgpatchheader:
237 if line.startswith('# User '):
237 if line.startswith('# User '):
238 data['user'] = line[7:]
238 data['user'] = line[7:]
239 ui.debug('From: %s\n' % data['user'])
239 ui.debug('From: %s\n' % data['user'])
240 elif line.startswith("# Parent "):
240 elif line.startswith("# Parent "):
241 parents.append(line[9:].lstrip())
241 parents.append(line[9:].lstrip())
242 elif line.startswith("# "):
242 elif line.startswith("# "):
243 for header, key in patchheadermap:
243 for header, key in patchheadermap:
244 prefix = '# %s ' % header
244 prefix = '# %s ' % header
245 if line.startswith(prefix):
245 if line.startswith(prefix):
246 data[key] = line[len(prefix):]
246 data[key] = line[len(prefix):]
247 else:
247 else:
248 hgpatchheader = False
248 hgpatchheader = False
249 elif line == '---':
249 elif line == '---':
250 ignoretext = True
250 ignoretext = True
251 if not hgpatchheader and not ignoretext:
251 if not hgpatchheader and not ignoretext:
252 cfp.write(line)
252 cfp.write(line)
253 cfp.write('\n')
253 cfp.write('\n')
254 message = cfp.getvalue()
254 message = cfp.getvalue()
255 if tmpfp:
255 if tmpfp:
256 tmpfp.write(payload)
256 tmpfp.write(payload)
257 if not payload.endswith('\n'):
257 if not payload.endswith('\n'):
258 tmpfp.write('\n')
258 tmpfp.write('\n')
259 elif not diffs_seen and message and content_type == 'text/plain':
259 elif not diffs_seen and message and content_type == 'text/plain':
260 message += '\n' + payload
260 message += '\n' + payload
261 except: # re-raises
261 except: # re-raises
262 tmpfp.close()
262 tmpfp.close()
263 os.unlink(tmpname)
263 os.unlink(tmpname)
264 raise
264 raise
265
265
266 if subject and not message.startswith(subject):
266 if subject and not message.startswith(subject):
267 message = '%s\n%s' % (subject, message)
267 message = '%s\n%s' % (subject, message)
268 data['message'] = message
268 data['message'] = message
269 tmpfp.close()
269 tmpfp.close()
270 if parents:
270 if parents:
271 data['p1'] = parents.pop(0)
271 data['p1'] = parents.pop(0)
272 if parents:
272 if parents:
273 data['p2'] = parents.pop(0)
273 data['p2'] = parents.pop(0)
274
274
275 if diffs_seen:
275 if diffs_seen:
276 data['filename'] = tmpname
276 data['filename'] = tmpname
277 else:
277 else:
278 os.unlink(tmpname)
278 os.unlink(tmpname)
279 return data
279 return data
280
280
281 class patchmeta(object):
281 class patchmeta(object):
282 """Patched file metadata
282 """Patched file metadata
283
283
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
284 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
285 or COPY. 'path' is patched file path. 'oldpath' is set to the
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
286 origin file when 'op' is either COPY or RENAME, None otherwise. If
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
287 file mode is changed, 'mode' is a tuple (islink, isexec) where
288 'islink' is True if the file is a symlink and 'isexec' is True if
288 'islink' is True if the file is a symlink and 'isexec' is True if
289 the file is executable. Otherwise, 'mode' is None.
289 the file is executable. Otherwise, 'mode' is None.
290 """
290 """
291 def __init__(self, path):
291 def __init__(self, path):
292 self.path = path
292 self.path = path
293 self.oldpath = None
293 self.oldpath = None
294 self.mode = None
294 self.mode = None
295 self.op = 'MODIFY'
295 self.op = 'MODIFY'
296 self.binary = False
296 self.binary = False
297
297
298 def setmode(self, mode):
298 def setmode(self, mode):
299 islink = mode & 0o20000
299 islink = mode & 0o20000
300 isexec = mode & 0o100
300 isexec = mode & 0o100
301 self.mode = (islink, isexec)
301 self.mode = (islink, isexec)
302
302
303 def copy(self):
303 def copy(self):
304 other = patchmeta(self.path)
304 other = patchmeta(self.path)
305 other.oldpath = self.oldpath
305 other.oldpath = self.oldpath
306 other.mode = self.mode
306 other.mode = self.mode
307 other.op = self.op
307 other.op = self.op
308 other.binary = self.binary
308 other.binary = self.binary
309 return other
309 return other
310
310
311 def _ispatchinga(self, afile):
311 def _ispatchinga(self, afile):
312 if afile == '/dev/null':
312 if afile == '/dev/null':
313 return self.op == 'ADD'
313 return self.op == 'ADD'
314 return afile == 'a/' + (self.oldpath or self.path)
314 return afile == 'a/' + (self.oldpath or self.path)
315
315
316 def _ispatchingb(self, bfile):
316 def _ispatchingb(self, bfile):
317 if bfile == '/dev/null':
317 if bfile == '/dev/null':
318 return self.op == 'DELETE'
318 return self.op == 'DELETE'
319 return bfile == 'b/' + self.path
319 return bfile == 'b/' + self.path
320
320
321 def ispatching(self, afile, bfile):
321 def ispatching(self, afile, bfile):
322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
322 return self._ispatchinga(afile) and self._ispatchingb(bfile)
323
323
324 def __repr__(self):
324 def __repr__(self):
325 return "<patchmeta %s %r>" % (self.op, self.path)
325 return "<patchmeta %s %r>" % (self.op, self.path)
326
326
327 def readgitpatch(lr):
327 def readgitpatch(lr):
328 """extract git-style metadata about patches from <patchname>"""
328 """extract git-style metadata about patches from <patchname>"""
329
329
330 # Filter patch for git information
330 # Filter patch for git information
331 gp = None
331 gp = None
332 gitpatches = []
332 gitpatches = []
333 for line in lr:
333 for line in lr:
334 line = line.rstrip(' \r\n')
334 line = line.rstrip(' \r\n')
335 if line.startswith('diff --git a/'):
335 if line.startswith('diff --git a/'):
336 m = gitre.match(line)
336 m = gitre.match(line)
337 if m:
337 if m:
338 if gp:
338 if gp:
339 gitpatches.append(gp)
339 gitpatches.append(gp)
340 dst = m.group(2)
340 dst = m.group(2)
341 gp = patchmeta(dst)
341 gp = patchmeta(dst)
342 elif gp:
342 elif gp:
343 if line.startswith('--- '):
343 if line.startswith('--- '):
344 gitpatches.append(gp)
344 gitpatches.append(gp)
345 gp = None
345 gp = None
346 continue
346 continue
347 if line.startswith('rename from '):
347 if line.startswith('rename from '):
348 gp.op = 'RENAME'
348 gp.op = 'RENAME'
349 gp.oldpath = line[12:]
349 gp.oldpath = line[12:]
350 elif line.startswith('rename to '):
350 elif line.startswith('rename to '):
351 gp.path = line[10:]
351 gp.path = line[10:]
352 elif line.startswith('copy from '):
352 elif line.startswith('copy from '):
353 gp.op = 'COPY'
353 gp.op = 'COPY'
354 gp.oldpath = line[10:]
354 gp.oldpath = line[10:]
355 elif line.startswith('copy to '):
355 elif line.startswith('copy to '):
356 gp.path = line[8:]
356 gp.path = line[8:]
357 elif line.startswith('deleted file'):
357 elif line.startswith('deleted file'):
358 gp.op = 'DELETE'
358 gp.op = 'DELETE'
359 elif line.startswith('new file mode '):
359 elif line.startswith('new file mode '):
360 gp.op = 'ADD'
360 gp.op = 'ADD'
361 gp.setmode(int(line[-6:], 8))
361 gp.setmode(int(line[-6:], 8))
362 elif line.startswith('new mode '):
362 elif line.startswith('new mode '):
363 gp.setmode(int(line[-6:], 8))
363 gp.setmode(int(line[-6:], 8))
364 elif line.startswith('GIT binary patch'):
364 elif line.startswith('GIT binary patch'):
365 gp.binary = True
365 gp.binary = True
366 if gp:
366 if gp:
367 gitpatches.append(gp)
367 gitpatches.append(gp)
368
368
369 return gitpatches
369 return gitpatches
370
370
371 class linereader(object):
371 class linereader(object):
372 # simple class to allow pushing lines back into the input stream
372 # simple class to allow pushing lines back into the input stream
373 def __init__(self, fp):
373 def __init__(self, fp):
374 self.fp = fp
374 self.fp = fp
375 self.buf = []
375 self.buf = []
376
376
377 def push(self, line):
377 def push(self, line):
378 if line is not None:
378 if line is not None:
379 self.buf.append(line)
379 self.buf.append(line)
380
380
381 def readline(self):
381 def readline(self):
382 if self.buf:
382 if self.buf:
383 l = self.buf[0]
383 l = self.buf[0]
384 del self.buf[0]
384 del self.buf[0]
385 return l
385 return l
386 return self.fp.readline()
386 return self.fp.readline()
387
387
388 def __iter__(self):
388 def __iter__(self):
389 while True:
389 while True:
390 l = self.readline()
390 l = self.readline()
391 if not l:
391 if not l:
392 break
392 break
393 yield l
393 yield l
394
394
395 class abstractbackend(object):
395 class abstractbackend(object):
396 def __init__(self, ui):
396 def __init__(self, ui):
397 self.ui = ui
397 self.ui = ui
398
398
399 def getfile(self, fname):
399 def getfile(self, fname):
400 """Return target file data and flags as a (data, (islink,
400 """Return target file data and flags as a (data, (islink,
401 isexec)) tuple. Data is None if file is missing/deleted.
401 isexec)) tuple. Data is None if file is missing/deleted.
402 """
402 """
403 raise NotImplementedError
403 raise NotImplementedError
404
404
405 def setfile(self, fname, data, mode, copysource):
405 def setfile(self, fname, data, mode, copysource):
406 """Write data to target file fname and set its mode. mode is a
406 """Write data to target file fname and set its mode. mode is a
407 (islink, isexec) tuple. If data is None, the file content should
407 (islink, isexec) tuple. If data is None, the file content should
408 be left unchanged. If the file is modified after being copied,
408 be left unchanged. If the file is modified after being copied,
409 copysource is set to the original file name.
409 copysource is set to the original file name.
410 """
410 """
411 raise NotImplementedError
411 raise NotImplementedError
412
412
413 def unlink(self, fname):
413 def unlink(self, fname):
414 """Unlink target file."""
414 """Unlink target file."""
415 raise NotImplementedError
415 raise NotImplementedError
416
416
417 def writerej(self, fname, failed, total, lines):
417 def writerej(self, fname, failed, total, lines):
418 """Write rejected lines for fname. total is the number of hunks
418 """Write rejected lines for fname. total is the number of hunks
419 which failed to apply and total the total number of hunks for this
419 which failed to apply and total the total number of hunks for this
420 files.
420 files.
421 """
421 """
422 pass
422 pass
423
423
424 def exists(self, fname):
424 def exists(self, fname):
425 raise NotImplementedError
425 raise NotImplementedError
426
426
427 class fsbackend(abstractbackend):
427 class fsbackend(abstractbackend):
428 def __init__(self, ui, basedir):
428 def __init__(self, ui, basedir):
429 super(fsbackend, self).__init__(ui)
429 super(fsbackend, self).__init__(ui)
430 self.opener = scmutil.opener(basedir)
430 self.opener = scmutil.opener(basedir)
431
431
432 def _join(self, f):
432 def _join(self, f):
433 return os.path.join(self.opener.base, f)
433 return os.path.join(self.opener.base, f)
434
434
435 def getfile(self, fname):
435 def getfile(self, fname):
436 if self.opener.islink(fname):
436 if self.opener.islink(fname):
437 return (self.opener.readlink(fname), (True, False))
437 return (self.opener.readlink(fname), (True, False))
438
438
439 isexec = False
439 isexec = False
440 try:
440 try:
441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
441 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
442 except OSError as e:
442 except OSError as e:
443 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
444 raise
444 raise
445 try:
445 try:
446 return (self.opener.read(fname), (False, isexec))
446 return (self.opener.read(fname), (False, isexec))
447 except IOError as e:
447 except IOError as e:
448 if e.errno != errno.ENOENT:
448 if e.errno != errno.ENOENT:
449 raise
449 raise
450 return None, None
450 return None, None
451
451
452 def setfile(self, fname, data, mode, copysource):
452 def setfile(self, fname, data, mode, copysource):
453 islink, isexec = mode
453 islink, isexec = mode
454 if data is None:
454 if data is None:
455 self.opener.setflags(fname, islink, isexec)
455 self.opener.setflags(fname, islink, isexec)
456 return
456 return
457 if islink:
457 if islink:
458 self.opener.symlink(data, fname)
458 self.opener.symlink(data, fname)
459 else:
459 else:
460 self.opener.write(fname, data)
460 self.opener.write(fname, data)
461 if isexec:
461 if isexec:
462 self.opener.setflags(fname, False, True)
462 self.opener.setflags(fname, False, True)
463
463
464 def unlink(self, fname):
464 def unlink(self, fname):
465 self.opener.unlinkpath(fname, ignoremissing=True)
465 self.opener.unlinkpath(fname, ignoremissing=True)
466
466
467 def writerej(self, fname, failed, total, lines):
467 def writerej(self, fname, failed, total, lines):
468 fname = fname + ".rej"
468 fname = fname + ".rej"
469 self.ui.warn(
469 self.ui.warn(
470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
470 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
471 (failed, total, fname))
471 (failed, total, fname))
472 fp = self.opener(fname, 'w')
472 fp = self.opener(fname, 'w')
473 fp.writelines(lines)
473 fp.writelines(lines)
474 fp.close()
474 fp.close()
475
475
476 def exists(self, fname):
476 def exists(self, fname):
477 return self.opener.lexists(fname)
477 return self.opener.lexists(fname)
478
478
479 class workingbackend(fsbackend):
479 class workingbackend(fsbackend):
480 def __init__(self, ui, repo, similarity):
480 def __init__(self, ui, repo, similarity):
481 super(workingbackend, self).__init__(ui, repo.root)
481 super(workingbackend, self).__init__(ui, repo.root)
482 self.repo = repo
482 self.repo = repo
483 self.similarity = similarity
483 self.similarity = similarity
484 self.removed = set()
484 self.removed = set()
485 self.changed = set()
485 self.changed = set()
486 self.copied = []
486 self.copied = []
487
487
488 def _checkknown(self, fname):
488 def _checkknown(self, fname):
489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
489 if self.repo.dirstate[fname] == '?' and self.exists(fname):
490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
490 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
491
491
492 def setfile(self, fname, data, mode, copysource):
492 def setfile(self, fname, data, mode, copysource):
493 self._checkknown(fname)
493 self._checkknown(fname)
494 super(workingbackend, self).setfile(fname, data, mode, copysource)
494 super(workingbackend, self).setfile(fname, data, mode, copysource)
495 if copysource is not None:
495 if copysource is not None:
496 self.copied.append((copysource, fname))
496 self.copied.append((copysource, fname))
497 self.changed.add(fname)
497 self.changed.add(fname)
498
498
499 def unlink(self, fname):
499 def unlink(self, fname):
500 self._checkknown(fname)
500 self._checkknown(fname)
501 super(workingbackend, self).unlink(fname)
501 super(workingbackend, self).unlink(fname)
502 self.removed.add(fname)
502 self.removed.add(fname)
503 self.changed.add(fname)
503 self.changed.add(fname)
504
504
505 def close(self):
505 def close(self):
506 wctx = self.repo[None]
506 wctx = self.repo[None]
507 changed = set(self.changed)
507 changed = set(self.changed)
508 for src, dst in self.copied:
508 for src, dst in self.copied:
509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
509 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
510 if self.removed:
510 if self.removed:
511 wctx.forget(sorted(self.removed))
511 wctx.forget(sorted(self.removed))
512 for f in self.removed:
512 for f in self.removed:
513 if f not in self.repo.dirstate:
513 if f not in self.repo.dirstate:
514 # File was deleted and no longer belongs to the
514 # File was deleted and no longer belongs to the
515 # dirstate, it was probably marked added then
515 # dirstate, it was probably marked added then
516 # deleted, and should not be considered by
516 # deleted, and should not be considered by
517 # marktouched().
517 # marktouched().
518 changed.discard(f)
518 changed.discard(f)
519 if changed:
519 if changed:
520 scmutil.marktouched(self.repo, changed, self.similarity)
520 scmutil.marktouched(self.repo, changed, self.similarity)
521 return sorted(self.changed)
521 return sorted(self.changed)
522
522
523 class filestore(object):
523 class filestore(object):
524 def __init__(self, maxsize=None):
524 def __init__(self, maxsize=None):
525 self.opener = None
525 self.opener = None
526 self.files = {}
526 self.files = {}
527 self.created = 0
527 self.created = 0
528 self.maxsize = maxsize
528 self.maxsize = maxsize
529 if self.maxsize is None:
529 if self.maxsize is None:
530 self.maxsize = 4*(2**20)
530 self.maxsize = 4*(2**20)
531 self.size = 0
531 self.size = 0
532 self.data = {}
532 self.data = {}
533
533
534 def setfile(self, fname, data, mode, copied=None):
534 def setfile(self, fname, data, mode, copied=None):
535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
535 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
536 self.data[fname] = (data, mode, copied)
536 self.data[fname] = (data, mode, copied)
537 self.size += len(data)
537 self.size += len(data)
538 else:
538 else:
539 if self.opener is None:
539 if self.opener is None:
540 root = tempfile.mkdtemp(prefix='hg-patch-')
540 root = tempfile.mkdtemp(prefix='hg-patch-')
541 self.opener = scmutil.opener(root)
541 self.opener = scmutil.opener(root)
542 # Avoid filename issues with these simple names
542 # Avoid filename issues with these simple names
543 fn = str(self.created)
543 fn = str(self.created)
544 self.opener.write(fn, data)
544 self.opener.write(fn, data)
545 self.created += 1
545 self.created += 1
546 self.files[fname] = (fn, mode, copied)
546 self.files[fname] = (fn, mode, copied)
547
547
548 def getfile(self, fname):
548 def getfile(self, fname):
549 if fname in self.data:
549 if fname in self.data:
550 return self.data[fname]
550 return self.data[fname]
551 if not self.opener or fname not in self.files:
551 if not self.opener or fname not in self.files:
552 return None, None, None
552 return None, None, None
553 fn, mode, copied = self.files[fname]
553 fn, mode, copied = self.files[fname]
554 return self.opener.read(fn), mode, copied
554 return self.opener.read(fn), mode, copied
555
555
556 def close(self):
556 def close(self):
557 if self.opener:
557 if self.opener:
558 shutil.rmtree(self.opener.base)
558 shutil.rmtree(self.opener.base)
559
559
560 class repobackend(abstractbackend):
560 class repobackend(abstractbackend):
561 def __init__(self, ui, repo, ctx, store):
561 def __init__(self, ui, repo, ctx, store):
562 super(repobackend, self).__init__(ui)
562 super(repobackend, self).__init__(ui)
563 self.repo = repo
563 self.repo = repo
564 self.ctx = ctx
564 self.ctx = ctx
565 self.store = store
565 self.store = store
566 self.changed = set()
566 self.changed = set()
567 self.removed = set()
567 self.removed = set()
568 self.copied = {}
568 self.copied = {}
569
569
570 def _checkknown(self, fname):
570 def _checkknown(self, fname):
571 if fname not in self.ctx:
571 if fname not in self.ctx:
572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
572 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
573
573
574 def getfile(self, fname):
574 def getfile(self, fname):
575 try:
575 try:
576 fctx = self.ctx[fname]
576 fctx = self.ctx[fname]
577 except error.LookupError:
577 except error.LookupError:
578 return None, None
578 return None, None
579 flags = fctx.flags()
579 flags = fctx.flags()
580 return fctx.data(), ('l' in flags, 'x' in flags)
580 return fctx.data(), ('l' in flags, 'x' in flags)
581
581
582 def setfile(self, fname, data, mode, copysource):
582 def setfile(self, fname, data, mode, copysource):
583 if copysource:
583 if copysource:
584 self._checkknown(copysource)
584 self._checkknown(copysource)
585 if data is None:
585 if data is None:
586 data = self.ctx[fname].data()
586 data = self.ctx[fname].data()
587 self.store.setfile(fname, data, mode, copysource)
587 self.store.setfile(fname, data, mode, copysource)
588 self.changed.add(fname)
588 self.changed.add(fname)
589 if copysource:
589 if copysource:
590 self.copied[fname] = copysource
590 self.copied[fname] = copysource
591
591
592 def unlink(self, fname):
592 def unlink(self, fname):
593 self._checkknown(fname)
593 self._checkknown(fname)
594 self.removed.add(fname)
594 self.removed.add(fname)
595
595
596 def exists(self, fname):
596 def exists(self, fname):
597 return fname in self.ctx
597 return fname in self.ctx
598
598
599 def close(self):
599 def close(self):
600 return self.changed | self.removed
600 return self.changed | self.removed
601
601
602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
602 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
603 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
604 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
605 eolmodes = ['strict', 'crlf', 'lf', 'auto']
606
606
607 class patchfile(object):
607 class patchfile(object):
608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
608 def __init__(self, ui, gp, backend, store, eolmode='strict'):
609 self.fname = gp.path
609 self.fname = gp.path
610 self.eolmode = eolmode
610 self.eolmode = eolmode
611 self.eol = None
611 self.eol = None
612 self.backend = backend
612 self.backend = backend
613 self.ui = ui
613 self.ui = ui
614 self.lines = []
614 self.lines = []
615 self.exists = False
615 self.exists = False
616 self.missing = True
616 self.missing = True
617 self.mode = gp.mode
617 self.mode = gp.mode
618 self.copysource = gp.oldpath
618 self.copysource = gp.oldpath
619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
619 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
620 self.remove = gp.op == 'DELETE'
620 self.remove = gp.op == 'DELETE'
621 if self.copysource is None:
621 if self.copysource is None:
622 data, mode = backend.getfile(self.fname)
622 data, mode = backend.getfile(self.fname)
623 else:
623 else:
624 data, mode = store.getfile(self.copysource)[:2]
624 data, mode = store.getfile(self.copysource)[:2]
625 if data is not None:
625 if data is not None:
626 self.exists = self.copysource is None or backend.exists(self.fname)
626 self.exists = self.copysource is None or backend.exists(self.fname)
627 self.missing = False
627 self.missing = False
628 if data:
628 if data:
629 self.lines = mdiff.splitnewlines(data)
629 self.lines = mdiff.splitnewlines(data)
630 if self.mode is None:
630 if self.mode is None:
631 self.mode = mode
631 self.mode = mode
632 if self.lines:
632 if self.lines:
633 # Normalize line endings
633 # Normalize line endings
634 if self.lines[0].endswith('\r\n'):
634 if self.lines[0].endswith('\r\n'):
635 self.eol = '\r\n'
635 self.eol = '\r\n'
636 elif self.lines[0].endswith('\n'):
636 elif self.lines[0].endswith('\n'):
637 self.eol = '\n'
637 self.eol = '\n'
638 if eolmode != 'strict':
638 if eolmode != 'strict':
639 nlines = []
639 nlines = []
640 for l in self.lines:
640 for l in self.lines:
641 if l.endswith('\r\n'):
641 if l.endswith('\r\n'):
642 l = l[:-2] + '\n'
642 l = l[:-2] + '\n'
643 nlines.append(l)
643 nlines.append(l)
644 self.lines = nlines
644 self.lines = nlines
645 else:
645 else:
646 if self.create:
646 if self.create:
647 self.missing = False
647 self.missing = False
648 if self.mode is None:
648 if self.mode is None:
649 self.mode = (False, False)
649 self.mode = (False, False)
650 if self.missing:
650 if self.missing:
651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
651 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
652
652
653 self.hash = {}
653 self.hash = {}
654 self.dirty = 0
654 self.dirty = 0
655 self.offset = 0
655 self.offset = 0
656 self.skew = 0
656 self.skew = 0
657 self.rej = []
657 self.rej = []
658 self.fileprinted = False
658 self.fileprinted = False
659 self.printfile(False)
659 self.printfile(False)
660 self.hunks = 0
660 self.hunks = 0
661
661
662 def writelines(self, fname, lines, mode):
662 def writelines(self, fname, lines, mode):
663 if self.eolmode == 'auto':
663 if self.eolmode == 'auto':
664 eol = self.eol
664 eol = self.eol
665 elif self.eolmode == 'crlf':
665 elif self.eolmode == 'crlf':
666 eol = '\r\n'
666 eol = '\r\n'
667 else:
667 else:
668 eol = '\n'
668 eol = '\n'
669
669
670 if self.eolmode != 'strict' and eol and eol != '\n':
670 if self.eolmode != 'strict' and eol and eol != '\n':
671 rawlines = []
671 rawlines = []
672 for l in lines:
672 for l in lines:
673 if l and l[-1] == '\n':
673 if l and l[-1] == '\n':
674 l = l[:-1] + eol
674 l = l[:-1] + eol
675 rawlines.append(l)
675 rawlines.append(l)
676 lines = rawlines
676 lines = rawlines
677
677
678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
678 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
679
679
680 def printfile(self, warn):
680 def printfile(self, warn):
681 if self.fileprinted:
681 if self.fileprinted:
682 return
682 return
683 if warn or self.ui.verbose:
683 if warn or self.ui.verbose:
684 self.fileprinted = True
684 self.fileprinted = True
685 s = _("patching file %s\n") % self.fname
685 s = _("patching file %s\n") % self.fname
686 if warn:
686 if warn:
687 self.ui.warn(s)
687 self.ui.warn(s)
688 else:
688 else:
689 self.ui.note(s)
689 self.ui.note(s)
690
690
691
691
692 def findlines(self, l, linenum):
692 def findlines(self, l, linenum):
693 # looks through the hash and finds candidate lines. The
693 # looks through the hash and finds candidate lines. The
694 # result is a list of line numbers sorted based on distance
694 # result is a list of line numbers sorted based on distance
695 # from linenum
695 # from linenum
696
696
697 cand = self.hash.get(l, [])
697 cand = self.hash.get(l, [])
698 if len(cand) > 1:
698 if len(cand) > 1:
699 # resort our list of potentials forward then back.
699 # resort our list of potentials forward then back.
700 cand.sort(key=lambda x: abs(x - linenum))
700 cand.sort(key=lambda x: abs(x - linenum))
701 return cand
701 return cand
702
702
703 def write_rej(self):
703 def write_rej(self):
704 # our rejects are a little different from patch(1). This always
704 # our rejects are a little different from patch(1). This always
705 # creates rejects in the same form as the original patch. A file
705 # creates rejects in the same form as the original patch. A file
706 # header is inserted so that you can run the reject through patch again
706 # header is inserted so that you can run the reject through patch again
707 # without having to type the filename.
707 # without having to type the filename.
708 if not self.rej:
708 if not self.rej:
709 return
709 return
710 base = os.path.basename(self.fname)
710 base = os.path.basename(self.fname)
711 lines = ["--- %s\n+++ %s\n" % (base, base)]
711 lines = ["--- %s\n+++ %s\n" % (base, base)]
712 for x in self.rej:
712 for x in self.rej:
713 for l in x.hunk:
713 for l in x.hunk:
714 lines.append(l)
714 lines.append(l)
715 if l[-1] != '\n':
715 if l[-1] != '\n':
716 lines.append("\n\ No newline at end of file\n")
716 lines.append("\n\ No newline at end of file\n")
717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
717 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
718
718
719 def apply(self, h):
719 def apply(self, h):
720 if not h.complete():
720 if not h.complete():
721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
721 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
722 (h.number, h.desc, len(h.a), h.lena, len(h.b),
723 h.lenb))
723 h.lenb))
724
724
725 self.hunks += 1
725 self.hunks += 1
726
726
727 if self.missing:
727 if self.missing:
728 self.rej.append(h)
728 self.rej.append(h)
729 return -1
729 return -1
730
730
731 if self.exists and self.create:
731 if self.exists and self.create:
732 if self.copysource:
732 if self.copysource:
733 self.ui.warn(_("cannot create %s: destination already "
733 self.ui.warn(_("cannot create %s: destination already "
734 "exists\n") % self.fname)
734 "exists\n") % self.fname)
735 else:
735 else:
736 self.ui.warn(_("file %s already exists\n") % self.fname)
736 self.ui.warn(_("file %s already exists\n") % self.fname)
737 self.rej.append(h)
737 self.rej.append(h)
738 return -1
738 return -1
739
739
740 if isinstance(h, binhunk):
740 if isinstance(h, binhunk):
741 if self.remove:
741 if self.remove:
742 self.backend.unlink(self.fname)
742 self.backend.unlink(self.fname)
743 else:
743 else:
744 l = h.new(self.lines)
744 l = h.new(self.lines)
745 self.lines[:] = l
745 self.lines[:] = l
746 self.offset += len(l)
746 self.offset += len(l)
747 self.dirty = True
747 self.dirty = True
748 return 0
748 return 0
749
749
750 horig = h
750 horig = h
751 if (self.eolmode in ('crlf', 'lf')
751 if (self.eolmode in ('crlf', 'lf')
752 or self.eolmode == 'auto' and self.eol):
752 or self.eolmode == 'auto' and self.eol):
753 # If new eols are going to be normalized, then normalize
753 # If new eols are going to be normalized, then normalize
754 # hunk data before patching. Otherwise, preserve input
754 # hunk data before patching. Otherwise, preserve input
755 # line-endings.
755 # line-endings.
756 h = h.getnormalized()
756 h = h.getnormalized()
757
757
758 # fast case first, no offsets, no fuzz
758 # fast case first, no offsets, no fuzz
759 old, oldstart, new, newstart = h.fuzzit(0, False)
759 old, oldstart, new, newstart = h.fuzzit(0, False)
760 oldstart += self.offset
760 oldstart += self.offset
761 orig_start = oldstart
761 orig_start = oldstart
762 # if there's skew we want to emit the "(offset %d lines)" even
762 # if there's skew we want to emit the "(offset %d lines)" even
763 # when the hunk cleanly applies at start + skew, so skip the
763 # when the hunk cleanly applies at start + skew, so skip the
764 # fast case code
764 # fast case code
765 if (self.skew == 0 and
765 if (self.skew == 0 and
766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
766 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
767 if self.remove:
767 if self.remove:
768 self.backend.unlink(self.fname)
768 self.backend.unlink(self.fname)
769 else:
769 else:
770 self.lines[oldstart:oldstart + len(old)] = new
770 self.lines[oldstart:oldstart + len(old)] = new
771 self.offset += len(new) - len(old)
771 self.offset += len(new) - len(old)
772 self.dirty = True
772 self.dirty = True
773 return 0
773 return 0
774
774
775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
775 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
776 self.hash = {}
776 self.hash = {}
777 for x, s in enumerate(self.lines):
777 for x, s in enumerate(self.lines):
778 self.hash.setdefault(s, []).append(x)
778 self.hash.setdefault(s, []).append(x)
779
779
780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
780 for fuzzlen in xrange(self.ui.configint("patch", "fuzz", 2) + 1):
781 for toponly in [True, False]:
781 for toponly in [True, False]:
782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
782 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
783 oldstart = oldstart + self.offset + self.skew
783 oldstart = oldstart + self.offset + self.skew
784 oldstart = min(oldstart, len(self.lines))
784 oldstart = min(oldstart, len(self.lines))
785 if old:
785 if old:
786 cand = self.findlines(old[0][1:], oldstart)
786 cand = self.findlines(old[0][1:], oldstart)
787 else:
787 else:
788 # Only adding lines with no or fuzzed context, just
788 # Only adding lines with no or fuzzed context, just
789 # take the skew in account
789 # take the skew in account
790 cand = [oldstart]
790 cand = [oldstart]
791
791
792 for l in cand:
792 for l in cand:
793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
793 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
794 self.lines[l : l + len(old)] = new
794 self.lines[l : l + len(old)] = new
795 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
796 self.skew = l - orig_start
796 self.skew = l - orig_start
797 self.dirty = True
797 self.dirty = True
798 offset = l - orig_start - fuzzlen
798 offset = l - orig_start - fuzzlen
799 if fuzzlen:
799 if fuzzlen:
800 msg = _("Hunk #%d succeeded at %d "
800 msg = _("Hunk #%d succeeded at %d "
801 "with fuzz %d "
801 "with fuzz %d "
802 "(offset %d lines).\n")
802 "(offset %d lines).\n")
803 self.printfile(True)
803 self.printfile(True)
804 self.ui.warn(msg %
804 self.ui.warn(msg %
805 (h.number, l + 1, fuzzlen, offset))
805 (h.number, l + 1, fuzzlen, offset))
806 else:
806 else:
807 msg = _("Hunk #%d succeeded at %d "
807 msg = _("Hunk #%d succeeded at %d "
808 "(offset %d lines).\n")
808 "(offset %d lines).\n")
809 self.ui.note(msg % (h.number, l + 1, offset))
809 self.ui.note(msg % (h.number, l + 1, offset))
810 return fuzzlen
810 return fuzzlen
811 self.printfile(True)
811 self.printfile(True)
812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
812 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
813 self.rej.append(horig)
813 self.rej.append(horig)
814 return -1
814 return -1
815
815
816 def close(self):
816 def close(self):
817 if self.dirty:
817 if self.dirty:
818 self.writelines(self.fname, self.lines, self.mode)
818 self.writelines(self.fname, self.lines, self.mode)
819 self.write_rej()
819 self.write_rej()
820 return len(self.rej)
820 return len(self.rej)
821
821
822 class header(object):
822 class header(object):
823 """patch header
823 """patch header
824 """
824 """
825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
825 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
826 diff_re = re.compile('diff -r .* (.*)$')
826 diff_re = re.compile('diff -r .* (.*)$')
827 allhunks_re = re.compile('(?:index|deleted file) ')
827 allhunks_re = re.compile('(?:index|deleted file) ')
828 pretty_re = re.compile('(?:new file|deleted file) ')
828 pretty_re = re.compile('(?:new file|deleted file) ')
829 special_re = re.compile('(?:index|deleted|copy|rename) ')
829 special_re = re.compile('(?:index|deleted|copy|rename) ')
830 newfile_re = re.compile('(?:new file)')
830 newfile_re = re.compile('(?:new file)')
831
831
832 def __init__(self, header):
832 def __init__(self, header):
833 self.header = header
833 self.header = header
834 self.hunks = []
834 self.hunks = []
835
835
836 def binary(self):
836 def binary(self):
837 return any(h.startswith('index ') for h in self.header)
837 return any(h.startswith('index ') for h in self.header)
838
838
839 def pretty(self, fp):
839 def pretty(self, fp):
840 for h in self.header:
840 for h in self.header:
841 if h.startswith('index '):
841 if h.startswith('index '):
842 fp.write(_('this modifies a binary file (all or nothing)\n'))
842 fp.write(_('this modifies a binary file (all or nothing)\n'))
843 break
843 break
844 if self.pretty_re.match(h):
844 if self.pretty_re.match(h):
845 fp.write(h)
845 fp.write(h)
846 if self.binary():
846 if self.binary():
847 fp.write(_('this is a binary file\n'))
847 fp.write(_('this is a binary file\n'))
848 break
848 break
849 if h.startswith('---'):
849 if h.startswith('---'):
850 fp.write(_('%d hunks, %d lines changed\n') %
850 fp.write(_('%d hunks, %d lines changed\n') %
851 (len(self.hunks),
851 (len(self.hunks),
852 sum([max(h.added, h.removed) for h in self.hunks])))
852 sum([max(h.added, h.removed) for h in self.hunks])))
853 break
853 break
854 fp.write(h)
854 fp.write(h)
855
855
856 def write(self, fp):
856 def write(self, fp):
857 fp.write(''.join(self.header))
857 fp.write(''.join(self.header))
858
858
859 def allhunks(self):
859 def allhunks(self):
860 return any(self.allhunks_re.match(h) for h in self.header)
860 return any(self.allhunks_re.match(h) for h in self.header)
861
861
862 def files(self):
862 def files(self):
863 match = self.diffgit_re.match(self.header[0])
863 match = self.diffgit_re.match(self.header[0])
864 if match:
864 if match:
865 fromfile, tofile = match.groups()
865 fromfile, tofile = match.groups()
866 if fromfile == tofile:
866 if fromfile == tofile:
867 return [fromfile]
867 return [fromfile]
868 return [fromfile, tofile]
868 return [fromfile, tofile]
869 else:
869 else:
870 return self.diff_re.match(self.header[0]).groups()
870 return self.diff_re.match(self.header[0]).groups()
871
871
872 def filename(self):
872 def filename(self):
873 return self.files()[-1]
873 return self.files()[-1]
874
874
875 def __repr__(self):
875 def __repr__(self):
876 return '<header %s>' % (' '.join(map(repr, self.files())))
876 return '<header %s>' % (' '.join(map(repr, self.files())))
877
877
878 def isnewfile(self):
878 def isnewfile(self):
879 return any(self.newfile_re.match(h) for h in self.header)
879 return any(self.newfile_re.match(h) for h in self.header)
880
880
881 def special(self):
881 def special(self):
882 # Special files are shown only at the header level and not at the hunk
882 # Special files are shown only at the header level and not at the hunk
883 # level for example a file that has been deleted is a special file.
883 # level for example a file that has been deleted is a special file.
884 # The user cannot change the content of the operation, in the case of
884 # The user cannot change the content of the operation, in the case of
885 # the deleted file he has to take the deletion or not take it, he
885 # the deleted file he has to take the deletion or not take it, he
886 # cannot take some of it.
886 # cannot take some of it.
887 # Newly added files are special if they are empty, they are not special
887 # Newly added files are special if they are empty, they are not special
888 # if they have some content as we want to be able to change it
888 # if they have some content as we want to be able to change it
889 nocontent = len(self.header) == 2
889 nocontent = len(self.header) == 2
890 emptynewfile = self.isnewfile() and nocontent
890 emptynewfile = self.isnewfile() and nocontent
891 return emptynewfile or \
891 return emptynewfile or \
892 any(self.special_re.match(h) for h in self.header)
892 any(self.special_re.match(h) for h in self.header)
893
893
894 class recordhunk(object):
894 class recordhunk(object):
895 """patch hunk
895 """patch hunk
896
896
897 XXX shouldn't we merge this with the other hunk class?
897 XXX shouldn't we merge this with the other hunk class?
898 """
898 """
899 maxcontext = 3
899 maxcontext = 3
900
900
901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
901 def __init__(self, header, fromline, toline, proc, before, hunk, after):
902 def trimcontext(number, lines):
902 def trimcontext(number, lines):
903 delta = len(lines) - self.maxcontext
903 delta = len(lines) - self.maxcontext
904 if False and delta > 0:
904 if False and delta > 0:
905 return number + delta, lines[:self.maxcontext]
905 return number + delta, lines[:self.maxcontext]
906 return number, lines
906 return number, lines
907
907
908 self.header = header
908 self.header = header
909 self.fromline, self.before = trimcontext(fromline, before)
909 self.fromline, self.before = trimcontext(fromline, before)
910 self.toline, self.after = trimcontext(toline, after)
910 self.toline, self.after = trimcontext(toline, after)
911 self.proc = proc
911 self.proc = proc
912 self.hunk = hunk
912 self.hunk = hunk
913 self.added, self.removed = self.countchanges(self.hunk)
913 self.added, self.removed = self.countchanges(self.hunk)
914
914
915 def __eq__(self, v):
915 def __eq__(self, v):
916 if not isinstance(v, recordhunk):
916 if not isinstance(v, recordhunk):
917 return False
917 return False
918
918
919 return ((v.hunk == self.hunk) and
919 return ((v.hunk == self.hunk) and
920 (v.proc == self.proc) and
920 (v.proc == self.proc) and
921 (self.fromline == v.fromline) and
921 (self.fromline == v.fromline) and
922 (self.header.files() == v.header.files()))
922 (self.header.files() == v.header.files()))
923
923
924 def __hash__(self):
924 def __hash__(self):
925 return hash((tuple(self.hunk),
925 return hash((tuple(self.hunk),
926 tuple(self.header.files()),
926 tuple(self.header.files()),
927 self.fromline,
927 self.fromline,
928 self.proc))
928 self.proc))
929
929
930 def countchanges(self, hunk):
930 def countchanges(self, hunk):
931 """hunk -> (n+,n-)"""
931 """hunk -> (n+,n-)"""
932 add = len([h for h in hunk if h[0] == '+'])
932 add = len([h for h in hunk if h[0] == '+'])
933 rem = len([h for h in hunk if h[0] == '-'])
933 rem = len([h for h in hunk if h[0] == '-'])
934 return add, rem
934 return add, rem
935
935
936 def write(self, fp):
936 def write(self, fp):
937 delta = len(self.before) + len(self.after)
937 delta = len(self.before) + len(self.after)
938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
938 if self.after and self.after[-1] == '\\ No newline at end of file\n':
939 delta -= 1
939 delta -= 1
940 fromlen = delta + self.removed
940 fromlen = delta + self.removed
941 tolen = delta + self.added
941 tolen = delta + self.added
942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
942 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
943 (self.fromline, fromlen, self.toline, tolen,
943 (self.fromline, fromlen, self.toline, tolen,
944 self.proc and (' ' + self.proc)))
944 self.proc and (' ' + self.proc)))
945 fp.write(''.join(self.before + self.hunk + self.after))
945 fp.write(''.join(self.before + self.hunk + self.after))
946
946
947 pretty = write
947 pretty = write
948
948
949 def filename(self):
949 def filename(self):
950 return self.header.filename()
950 return self.header.filename()
951
951
952 def __repr__(self):
952 def __repr__(self):
953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
953 return '<hunk %r@%d>' % (self.filename(), self.fromline)
954
954
955 def filterpatch(ui, headers, operation=None):
955 def filterpatch(ui, headers, operation=None):
956 """Interactively filter patch chunks into applied-only chunks"""
956 """Interactively filter patch chunks into applied-only chunks"""
957 if operation is None:
957 if operation is None:
958 operation = _('record')
958 operation = _('record')
959
959
960 def prompt(skipfile, skipall, query, chunk):
960 def prompt(skipfile, skipall, query, chunk):
961 """prompt query, and process base inputs
961 """prompt query, and process base inputs
962
962
963 - y/n for the rest of file
963 - y/n for the rest of file
964 - y/n for the rest
964 - y/n for the rest
965 - ? (help)
965 - ? (help)
966 - q (quit)
966 - q (quit)
967
967
968 Return True/False and possibly updated skipfile and skipall.
968 Return True/False and possibly updated skipfile and skipall.
969 """
969 """
970 newpatches = None
970 newpatches = None
971 if skipall is not None:
971 if skipall is not None:
972 return skipall, skipfile, skipall, newpatches
972 return skipall, skipfile, skipall, newpatches
973 if skipfile is not None:
973 if skipfile is not None:
974 return skipfile, skipfile, skipall, newpatches
974 return skipfile, skipfile, skipall, newpatches
975 while True:
975 while True:
976 resps = _('[Ynesfdaq?]'
976 resps = _('[Ynesfdaq?]'
977 '$$ &Yes, record this change'
977 '$$ &Yes, record this change'
978 '$$ &No, skip this change'
978 '$$ &No, skip this change'
979 '$$ &Edit this change manually'
979 '$$ &Edit this change manually'
980 '$$ &Skip remaining changes to this file'
980 '$$ &Skip remaining changes to this file'
981 '$$ Record remaining changes to this &file'
981 '$$ Record remaining changes to this &file'
982 '$$ &Done, skip remaining changes and files'
982 '$$ &Done, skip remaining changes and files'
983 '$$ Record &all changes to all remaining files'
983 '$$ Record &all changes to all remaining files'
984 '$$ &Quit, recording no changes'
984 '$$ &Quit, recording no changes'
985 '$$ &? (display help)')
985 '$$ &? (display help)')
986 r = ui.promptchoice("%s %s" % (query, resps))
986 r = ui.promptchoice("%s %s" % (query, resps))
987 ui.write("\n")
987 ui.write("\n")
988 if r == 8: # ?
988 if r == 8: # ?
989 for c, t in ui.extractchoices(resps)[1]:
989 for c, t in ui.extractchoices(resps)[1]:
990 ui.write('%s - %s\n' % (c, t.lower()))
990 ui.write('%s - %s\n' % (c, t.lower()))
991 continue
991 continue
992 elif r == 0: # yes
992 elif r == 0: # yes
993 ret = True
993 ret = True
994 elif r == 1: # no
994 elif r == 1: # no
995 ret = False
995 ret = False
996 elif r == 2: # Edit patch
996 elif r == 2: # Edit patch
997 if chunk is None:
997 if chunk is None:
998 ui.write(_('cannot edit patch for whole file'))
998 ui.write(_('cannot edit patch for whole file'))
999 ui.write("\n")
999 ui.write("\n")
1000 continue
1000 continue
1001 if chunk.header.binary():
1001 if chunk.header.binary():
1002 ui.write(_('cannot edit patch for binary file'))
1002 ui.write(_('cannot edit patch for binary file'))
1003 ui.write("\n")
1003 ui.write("\n")
1004 continue
1004 continue
1005 # Patch comment based on the Git one (based on comment at end of
1005 # Patch comment based on the Git one (based on comment at end of
1006 # https://mercurial-scm.org/wiki/RecordExtension)
1006 # https://mercurial-scm.org/wiki/RecordExtension)
1007 phelp = '---' + _("""
1007 phelp = '---' + _("""
1008 To remove '-' lines, make them ' ' lines (context).
1008 To remove '-' lines, make them ' ' lines (context).
1009 To remove '+' lines, delete them.
1009 To remove '+' lines, delete them.
1010 Lines starting with # will be removed from the patch.
1010 Lines starting with # will be removed from the patch.
1011
1011
1012 If the patch applies cleanly, the edited hunk will immediately be
1012 If the patch applies cleanly, the edited hunk will immediately be
1013 added to the record list. If it does not apply cleanly, a rejects
1013 added to the record list. If it does not apply cleanly, a rejects
1014 file will be generated: you can use that when you try again. If
1014 file will be generated: you can use that when you try again. If
1015 all lines of the hunk are removed, then the edit is aborted and
1015 all lines of the hunk are removed, then the edit is aborted and
1016 the hunk is left unchanged.
1016 the hunk is left unchanged.
1017 """)
1017 """)
1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1018 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1019 suffix=".diff", text=True)
1019 suffix=".diff", text=True)
1020 ncpatchfp = None
1020 ncpatchfp = None
1021 try:
1021 try:
1022 # Write the initial patch
1022 # Write the initial patch
1023 f = os.fdopen(patchfd, "w")
1023 f = os.fdopen(patchfd, "w")
1024 chunk.header.write(f)
1024 chunk.header.write(f)
1025 chunk.write(f)
1025 chunk.write(f)
1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1026 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1027 f.close()
1027 f.close()
1028 # Start the editor and wait for it to complete
1028 # Start the editor and wait for it to complete
1029 editor = ui.geteditor()
1029 editor = ui.geteditor()
1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1030 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1031 environ={'HGUSER': ui.username()})
1031 environ={'HGUSER': ui.username()})
1032 if ret != 0:
1032 if ret != 0:
1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1033 ui.warn(_("editor exited with exit code %d\n") % ret)
1034 continue
1034 continue
1035 # Remove comment lines
1035 # Remove comment lines
1036 patchfp = open(patchfn)
1036 patchfp = open(patchfn)
1037 ncpatchfp = cStringIO.StringIO()
1037 ncpatchfp = cStringIO.StringIO()
1038 for line in patchfp:
1038 for line in patchfp:
1039 if not line.startswith('#'):
1039 if not line.startswith('#'):
1040 ncpatchfp.write(line)
1040 ncpatchfp.write(line)
1041 patchfp.close()
1041 patchfp.close()
1042 ncpatchfp.seek(0)
1042 ncpatchfp.seek(0)
1043 newpatches = parsepatch(ncpatchfp)
1043 newpatches = parsepatch(ncpatchfp)
1044 finally:
1044 finally:
1045 os.unlink(patchfn)
1045 os.unlink(patchfn)
1046 del ncpatchfp
1046 del ncpatchfp
1047 # Signal that the chunk shouldn't be applied as-is, but
1047 # Signal that the chunk shouldn't be applied as-is, but
1048 # provide the new patch to be used instead.
1048 # provide the new patch to be used instead.
1049 ret = False
1049 ret = False
1050 elif r == 3: # Skip
1050 elif r == 3: # Skip
1051 ret = skipfile = False
1051 ret = skipfile = False
1052 elif r == 4: # file (Record remaining)
1052 elif r == 4: # file (Record remaining)
1053 ret = skipfile = True
1053 ret = skipfile = True
1054 elif r == 5: # done, skip remaining
1054 elif r == 5: # done, skip remaining
1055 ret = skipall = False
1055 ret = skipall = False
1056 elif r == 6: # all
1056 elif r == 6: # all
1057 ret = skipall = True
1057 ret = skipall = True
1058 elif r == 7: # quit
1058 elif r == 7: # quit
1059 raise error.Abort(_('user quit'))
1059 raise error.Abort(_('user quit'))
1060 return ret, skipfile, skipall, newpatches
1060 return ret, skipfile, skipall, newpatches
1061
1061
1062 seen = set()
1062 seen = set()
1063 applied = {} # 'filename' -> [] of chunks
1063 applied = {} # 'filename' -> [] of chunks
1064 skipfile, skipall = None, None
1064 skipfile, skipall = None, None
1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1065 pos, total = 1, sum(len(h.hunks) for h in headers)
1066 for h in headers:
1066 for h in headers:
1067 pos += len(h.hunks)
1067 pos += len(h.hunks)
1068 skipfile = None
1068 skipfile = None
1069 fixoffset = 0
1069 fixoffset = 0
1070 hdr = ''.join(h.header)
1070 hdr = ''.join(h.header)
1071 if hdr in seen:
1071 if hdr in seen:
1072 continue
1072 continue
1073 seen.add(hdr)
1073 seen.add(hdr)
1074 if skipall is None:
1074 if skipall is None:
1075 h.pretty(ui)
1075 h.pretty(ui)
1076 msg = (_('examine changes to %s?') %
1076 msg = (_('examine changes to %s?') %
1077 _(' and ').join("'%s'" % f for f in h.files()))
1077 _(' and ').join("'%s'" % f for f in h.files()))
1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1078 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1079 if not r:
1079 if not r:
1080 continue
1080 continue
1081 applied[h.filename()] = [h]
1081 applied[h.filename()] = [h]
1082 if h.allhunks():
1082 if h.allhunks():
1083 applied[h.filename()] += h.hunks
1083 applied[h.filename()] += h.hunks
1084 continue
1084 continue
1085 for i, chunk in enumerate(h.hunks):
1085 for i, chunk in enumerate(h.hunks):
1086 if skipfile is None and skipall is None:
1086 if skipfile is None and skipall is None:
1087 chunk.pretty(ui)
1087 chunk.pretty(ui)
1088 if total == 1:
1088 if total == 1:
1089 msg = _("record this change to '%s'?") % chunk.filename()
1089 msg = _("record this change to '%s'?") % chunk.filename()
1090 else:
1090 else:
1091 idx = pos - len(h.hunks) + i
1091 idx = pos - len(h.hunks) + i
1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1092 msg = _("record change %d/%d to '%s'?") % (idx, total,
1093 chunk.filename())
1093 chunk.filename())
1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1094 r, skipfile, skipall, newpatches = prompt(skipfile,
1095 skipall, msg, chunk)
1095 skipall, msg, chunk)
1096 if r:
1096 if r:
1097 if fixoffset:
1097 if fixoffset:
1098 chunk = copy.copy(chunk)
1098 chunk = copy.copy(chunk)
1099 chunk.toline += fixoffset
1099 chunk.toline += fixoffset
1100 applied[chunk.filename()].append(chunk)
1100 applied[chunk.filename()].append(chunk)
1101 elif newpatches is not None:
1101 elif newpatches is not None:
1102 for newpatch in newpatches:
1102 for newpatch in newpatches:
1103 for newhunk in newpatch.hunks:
1103 for newhunk in newpatch.hunks:
1104 if fixoffset:
1104 if fixoffset:
1105 newhunk.toline += fixoffset
1105 newhunk.toline += fixoffset
1106 applied[newhunk.filename()].append(newhunk)
1106 applied[newhunk.filename()].append(newhunk)
1107 else:
1107 else:
1108 fixoffset += chunk.removed - chunk.added
1108 fixoffset += chunk.removed - chunk.added
1109 return sum([h for h in applied.itervalues()
1109 return (sum([h for h in applied.itervalues()
1110 if h[0].special() or len(h) > 1], [])
1110 if h[0].special() or len(h) > 1], []), {})
1111 class hunk(object):
1111 class hunk(object):
1112 def __init__(self, desc, num, lr, context):
1112 def __init__(self, desc, num, lr, context):
1113 self.number = num
1113 self.number = num
1114 self.desc = desc
1114 self.desc = desc
1115 self.hunk = [desc]
1115 self.hunk = [desc]
1116 self.a = []
1116 self.a = []
1117 self.b = []
1117 self.b = []
1118 self.starta = self.lena = None
1118 self.starta = self.lena = None
1119 self.startb = self.lenb = None
1119 self.startb = self.lenb = None
1120 if lr is not None:
1120 if lr is not None:
1121 if context:
1121 if context:
1122 self.read_context_hunk(lr)
1122 self.read_context_hunk(lr)
1123 else:
1123 else:
1124 self.read_unified_hunk(lr)
1124 self.read_unified_hunk(lr)
1125
1125
1126 def getnormalized(self):
1126 def getnormalized(self):
1127 """Return a copy with line endings normalized to LF."""
1127 """Return a copy with line endings normalized to LF."""
1128
1128
1129 def normalize(lines):
1129 def normalize(lines):
1130 nlines = []
1130 nlines = []
1131 for line in lines:
1131 for line in lines:
1132 if line.endswith('\r\n'):
1132 if line.endswith('\r\n'):
1133 line = line[:-2] + '\n'
1133 line = line[:-2] + '\n'
1134 nlines.append(line)
1134 nlines.append(line)
1135 return nlines
1135 return nlines
1136
1136
1137 # Dummy object, it is rebuilt manually
1137 # Dummy object, it is rebuilt manually
1138 nh = hunk(self.desc, self.number, None, None)
1138 nh = hunk(self.desc, self.number, None, None)
1139 nh.number = self.number
1139 nh.number = self.number
1140 nh.desc = self.desc
1140 nh.desc = self.desc
1141 nh.hunk = self.hunk
1141 nh.hunk = self.hunk
1142 nh.a = normalize(self.a)
1142 nh.a = normalize(self.a)
1143 nh.b = normalize(self.b)
1143 nh.b = normalize(self.b)
1144 nh.starta = self.starta
1144 nh.starta = self.starta
1145 nh.startb = self.startb
1145 nh.startb = self.startb
1146 nh.lena = self.lena
1146 nh.lena = self.lena
1147 nh.lenb = self.lenb
1147 nh.lenb = self.lenb
1148 return nh
1148 return nh
1149
1149
1150 def read_unified_hunk(self, lr):
1150 def read_unified_hunk(self, lr):
1151 m = unidesc.match(self.desc)
1151 m = unidesc.match(self.desc)
1152 if not m:
1152 if not m:
1153 raise PatchError(_("bad hunk #%d") % self.number)
1153 raise PatchError(_("bad hunk #%d") % self.number)
1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1154 self.starta, self.lena, self.startb, self.lenb = m.groups()
1155 if self.lena is None:
1155 if self.lena is None:
1156 self.lena = 1
1156 self.lena = 1
1157 else:
1157 else:
1158 self.lena = int(self.lena)
1158 self.lena = int(self.lena)
1159 if self.lenb is None:
1159 if self.lenb is None:
1160 self.lenb = 1
1160 self.lenb = 1
1161 else:
1161 else:
1162 self.lenb = int(self.lenb)
1162 self.lenb = int(self.lenb)
1163 self.starta = int(self.starta)
1163 self.starta = int(self.starta)
1164 self.startb = int(self.startb)
1164 self.startb = int(self.startb)
1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1165 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1166 self.b)
1166 self.b)
1167 # if we hit eof before finishing out the hunk, the last line will
1167 # if we hit eof before finishing out the hunk, the last line will
1168 # be zero length. Lets try to fix it up.
1168 # be zero length. Lets try to fix it up.
1169 while len(self.hunk[-1]) == 0:
1169 while len(self.hunk[-1]) == 0:
1170 del self.hunk[-1]
1170 del self.hunk[-1]
1171 del self.a[-1]
1171 del self.a[-1]
1172 del self.b[-1]
1172 del self.b[-1]
1173 self.lena -= 1
1173 self.lena -= 1
1174 self.lenb -= 1
1174 self.lenb -= 1
1175 self._fixnewline(lr)
1175 self._fixnewline(lr)
1176
1176
1177 def read_context_hunk(self, lr):
1177 def read_context_hunk(self, lr):
1178 self.desc = lr.readline()
1178 self.desc = lr.readline()
1179 m = contextdesc.match(self.desc)
1179 m = contextdesc.match(self.desc)
1180 if not m:
1180 if not m:
1181 raise PatchError(_("bad hunk #%d") % self.number)
1181 raise PatchError(_("bad hunk #%d") % self.number)
1182 self.starta, aend = m.groups()
1182 self.starta, aend = m.groups()
1183 self.starta = int(self.starta)
1183 self.starta = int(self.starta)
1184 if aend is None:
1184 if aend is None:
1185 aend = self.starta
1185 aend = self.starta
1186 self.lena = int(aend) - self.starta
1186 self.lena = int(aend) - self.starta
1187 if self.starta:
1187 if self.starta:
1188 self.lena += 1
1188 self.lena += 1
1189 for x in xrange(self.lena):
1189 for x in xrange(self.lena):
1190 l = lr.readline()
1190 l = lr.readline()
1191 if l.startswith('---'):
1191 if l.startswith('---'):
1192 # lines addition, old block is empty
1192 # lines addition, old block is empty
1193 lr.push(l)
1193 lr.push(l)
1194 break
1194 break
1195 s = l[2:]
1195 s = l[2:]
1196 if l.startswith('- ') or l.startswith('! '):
1196 if l.startswith('- ') or l.startswith('! '):
1197 u = '-' + s
1197 u = '-' + s
1198 elif l.startswith(' '):
1198 elif l.startswith(' '):
1199 u = ' ' + s
1199 u = ' ' + s
1200 else:
1200 else:
1201 raise PatchError(_("bad hunk #%d old text line %d") %
1201 raise PatchError(_("bad hunk #%d old text line %d") %
1202 (self.number, x))
1202 (self.number, x))
1203 self.a.append(u)
1203 self.a.append(u)
1204 self.hunk.append(u)
1204 self.hunk.append(u)
1205
1205
1206 l = lr.readline()
1206 l = lr.readline()
1207 if l.startswith('\ '):
1207 if l.startswith('\ '):
1208 s = self.a[-1][:-1]
1208 s = self.a[-1][:-1]
1209 self.a[-1] = s
1209 self.a[-1] = s
1210 self.hunk[-1] = s
1210 self.hunk[-1] = s
1211 l = lr.readline()
1211 l = lr.readline()
1212 m = contextdesc.match(l)
1212 m = contextdesc.match(l)
1213 if not m:
1213 if not m:
1214 raise PatchError(_("bad hunk #%d") % self.number)
1214 raise PatchError(_("bad hunk #%d") % self.number)
1215 self.startb, bend = m.groups()
1215 self.startb, bend = m.groups()
1216 self.startb = int(self.startb)
1216 self.startb = int(self.startb)
1217 if bend is None:
1217 if bend is None:
1218 bend = self.startb
1218 bend = self.startb
1219 self.lenb = int(bend) - self.startb
1219 self.lenb = int(bend) - self.startb
1220 if self.startb:
1220 if self.startb:
1221 self.lenb += 1
1221 self.lenb += 1
1222 hunki = 1
1222 hunki = 1
1223 for x in xrange(self.lenb):
1223 for x in xrange(self.lenb):
1224 l = lr.readline()
1224 l = lr.readline()
1225 if l.startswith('\ '):
1225 if l.startswith('\ '):
1226 # XXX: the only way to hit this is with an invalid line range.
1226 # XXX: the only way to hit this is with an invalid line range.
1227 # The no-eol marker is not counted in the line range, but I
1227 # The no-eol marker is not counted in the line range, but I
1228 # guess there are diff(1) out there which behave differently.
1228 # guess there are diff(1) out there which behave differently.
1229 s = self.b[-1][:-1]
1229 s = self.b[-1][:-1]
1230 self.b[-1] = s
1230 self.b[-1] = s
1231 self.hunk[hunki - 1] = s
1231 self.hunk[hunki - 1] = s
1232 continue
1232 continue
1233 if not l:
1233 if not l:
1234 # line deletions, new block is empty and we hit EOF
1234 # line deletions, new block is empty and we hit EOF
1235 lr.push(l)
1235 lr.push(l)
1236 break
1236 break
1237 s = l[2:]
1237 s = l[2:]
1238 if l.startswith('+ ') or l.startswith('! '):
1238 if l.startswith('+ ') or l.startswith('! '):
1239 u = '+' + s
1239 u = '+' + s
1240 elif l.startswith(' '):
1240 elif l.startswith(' '):
1241 u = ' ' + s
1241 u = ' ' + s
1242 elif len(self.b) == 0:
1242 elif len(self.b) == 0:
1243 # line deletions, new block is empty
1243 # line deletions, new block is empty
1244 lr.push(l)
1244 lr.push(l)
1245 break
1245 break
1246 else:
1246 else:
1247 raise PatchError(_("bad hunk #%d old text line %d") %
1247 raise PatchError(_("bad hunk #%d old text line %d") %
1248 (self.number, x))
1248 (self.number, x))
1249 self.b.append(s)
1249 self.b.append(s)
1250 while True:
1250 while True:
1251 if hunki >= len(self.hunk):
1251 if hunki >= len(self.hunk):
1252 h = ""
1252 h = ""
1253 else:
1253 else:
1254 h = self.hunk[hunki]
1254 h = self.hunk[hunki]
1255 hunki += 1
1255 hunki += 1
1256 if h == u:
1256 if h == u:
1257 break
1257 break
1258 elif h.startswith('-'):
1258 elif h.startswith('-'):
1259 continue
1259 continue
1260 else:
1260 else:
1261 self.hunk.insert(hunki - 1, u)
1261 self.hunk.insert(hunki - 1, u)
1262 break
1262 break
1263
1263
1264 if not self.a:
1264 if not self.a:
1265 # this happens when lines were only added to the hunk
1265 # this happens when lines were only added to the hunk
1266 for x in self.hunk:
1266 for x in self.hunk:
1267 if x.startswith('-') or x.startswith(' '):
1267 if x.startswith('-') or x.startswith(' '):
1268 self.a.append(x)
1268 self.a.append(x)
1269 if not self.b:
1269 if not self.b:
1270 # this happens when lines were only deleted from the hunk
1270 # this happens when lines were only deleted from the hunk
1271 for x in self.hunk:
1271 for x in self.hunk:
1272 if x.startswith('+') or x.startswith(' '):
1272 if x.startswith('+') or x.startswith(' '):
1273 self.b.append(x[1:])
1273 self.b.append(x[1:])
1274 # @@ -start,len +start,len @@
1274 # @@ -start,len +start,len @@
1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1275 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1276 self.startb, self.lenb)
1276 self.startb, self.lenb)
1277 self.hunk[0] = self.desc
1277 self.hunk[0] = self.desc
1278 self._fixnewline(lr)
1278 self._fixnewline(lr)
1279
1279
1280 def _fixnewline(self, lr):
1280 def _fixnewline(self, lr):
1281 l = lr.readline()
1281 l = lr.readline()
1282 if l.startswith('\ '):
1282 if l.startswith('\ '):
1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1283 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1284 else:
1284 else:
1285 lr.push(l)
1285 lr.push(l)
1286
1286
1287 def complete(self):
1287 def complete(self):
1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1288 return len(self.a) == self.lena and len(self.b) == self.lenb
1289
1289
1290 def _fuzzit(self, old, new, fuzz, toponly):
1290 def _fuzzit(self, old, new, fuzz, toponly):
1291 # this removes context lines from the top and bottom of list 'l'. It
1291 # this removes context lines from the top and bottom of list 'l'. It
1292 # checks the hunk to make sure only context lines are removed, and then
1292 # checks the hunk to make sure only context lines are removed, and then
1293 # returns a new shortened list of lines.
1293 # returns a new shortened list of lines.
1294 fuzz = min(fuzz, len(old))
1294 fuzz = min(fuzz, len(old))
1295 if fuzz:
1295 if fuzz:
1296 top = 0
1296 top = 0
1297 bot = 0
1297 bot = 0
1298 hlen = len(self.hunk)
1298 hlen = len(self.hunk)
1299 for x in xrange(hlen - 1):
1299 for x in xrange(hlen - 1):
1300 # the hunk starts with the @@ line, so use x+1
1300 # the hunk starts with the @@ line, so use x+1
1301 if self.hunk[x + 1][0] == ' ':
1301 if self.hunk[x + 1][0] == ' ':
1302 top += 1
1302 top += 1
1303 else:
1303 else:
1304 break
1304 break
1305 if not toponly:
1305 if not toponly:
1306 for x in xrange(hlen - 1):
1306 for x in xrange(hlen - 1):
1307 if self.hunk[hlen - bot - 1][0] == ' ':
1307 if self.hunk[hlen - bot - 1][0] == ' ':
1308 bot += 1
1308 bot += 1
1309 else:
1309 else:
1310 break
1310 break
1311
1311
1312 bot = min(fuzz, bot)
1312 bot = min(fuzz, bot)
1313 top = min(fuzz, top)
1313 top = min(fuzz, top)
1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1314 return old[top:len(old) - bot], new[top:len(new) - bot], top
1315 return old, new, 0
1315 return old, new, 0
1316
1316
1317 def fuzzit(self, fuzz, toponly):
1317 def fuzzit(self, fuzz, toponly):
1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1318 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1319 oldstart = self.starta + top
1319 oldstart = self.starta + top
1320 newstart = self.startb + top
1320 newstart = self.startb + top
1321 # zero length hunk ranges already have their start decremented
1321 # zero length hunk ranges already have their start decremented
1322 if self.lena and oldstart > 0:
1322 if self.lena and oldstart > 0:
1323 oldstart -= 1
1323 oldstart -= 1
1324 if self.lenb and newstart > 0:
1324 if self.lenb and newstart > 0:
1325 newstart -= 1
1325 newstart -= 1
1326 return old, oldstart, new, newstart
1326 return old, oldstart, new, newstart
1327
1327
1328 class binhunk(object):
1328 class binhunk(object):
1329 'A binary patch file.'
1329 'A binary patch file.'
1330 def __init__(self, lr, fname):
1330 def __init__(self, lr, fname):
1331 self.text = None
1331 self.text = None
1332 self.delta = False
1332 self.delta = False
1333 self.hunk = ['GIT binary patch\n']
1333 self.hunk = ['GIT binary patch\n']
1334 self._fname = fname
1334 self._fname = fname
1335 self._read(lr)
1335 self._read(lr)
1336
1336
1337 def complete(self):
1337 def complete(self):
1338 return self.text is not None
1338 return self.text is not None
1339
1339
1340 def new(self, lines):
1340 def new(self, lines):
1341 if self.delta:
1341 if self.delta:
1342 return [applybindelta(self.text, ''.join(lines))]
1342 return [applybindelta(self.text, ''.join(lines))]
1343 return [self.text]
1343 return [self.text]
1344
1344
1345 def _read(self, lr):
1345 def _read(self, lr):
1346 def getline(lr, hunk):
1346 def getline(lr, hunk):
1347 l = lr.readline()
1347 l = lr.readline()
1348 hunk.append(l)
1348 hunk.append(l)
1349 return l.rstrip('\r\n')
1349 return l.rstrip('\r\n')
1350
1350
1351 size = 0
1351 size = 0
1352 while True:
1352 while True:
1353 line = getline(lr, self.hunk)
1353 line = getline(lr, self.hunk)
1354 if not line:
1354 if not line:
1355 raise PatchError(_('could not extract "%s" binary data')
1355 raise PatchError(_('could not extract "%s" binary data')
1356 % self._fname)
1356 % self._fname)
1357 if line.startswith('literal '):
1357 if line.startswith('literal '):
1358 size = int(line[8:].rstrip())
1358 size = int(line[8:].rstrip())
1359 break
1359 break
1360 if line.startswith('delta '):
1360 if line.startswith('delta '):
1361 size = int(line[6:].rstrip())
1361 size = int(line[6:].rstrip())
1362 self.delta = True
1362 self.delta = True
1363 break
1363 break
1364 dec = []
1364 dec = []
1365 line = getline(lr, self.hunk)
1365 line = getline(lr, self.hunk)
1366 while len(line) > 1:
1366 while len(line) > 1:
1367 l = line[0]
1367 l = line[0]
1368 if l <= 'Z' and l >= 'A':
1368 if l <= 'Z' and l >= 'A':
1369 l = ord(l) - ord('A') + 1
1369 l = ord(l) - ord('A') + 1
1370 else:
1370 else:
1371 l = ord(l) - ord('a') + 27
1371 l = ord(l) - ord('a') + 27
1372 try:
1372 try:
1373 dec.append(base85.b85decode(line[1:])[:l])
1373 dec.append(base85.b85decode(line[1:])[:l])
1374 except ValueError as e:
1374 except ValueError as e:
1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1375 raise PatchError(_('could not decode "%s" binary patch: %s')
1376 % (self._fname, str(e)))
1376 % (self._fname, str(e)))
1377 line = getline(lr, self.hunk)
1377 line = getline(lr, self.hunk)
1378 text = zlib.decompress(''.join(dec))
1378 text = zlib.decompress(''.join(dec))
1379 if len(text) != size:
1379 if len(text) != size:
1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1380 raise PatchError(_('"%s" length is %d bytes, should be %d')
1381 % (self._fname, len(text), size))
1381 % (self._fname, len(text), size))
1382 self.text = text
1382 self.text = text
1383
1383
1384 def parsefilename(str):
1384 def parsefilename(str):
1385 # --- filename \t|space stuff
1385 # --- filename \t|space stuff
1386 s = str[4:].rstrip('\r\n')
1386 s = str[4:].rstrip('\r\n')
1387 i = s.find('\t')
1387 i = s.find('\t')
1388 if i < 0:
1388 if i < 0:
1389 i = s.find(' ')
1389 i = s.find(' ')
1390 if i < 0:
1390 if i < 0:
1391 return s
1391 return s
1392 return s[:i]
1392 return s[:i]
1393
1393
1394 def reversehunks(hunks):
1394 def reversehunks(hunks):
1395 '''reverse the signs in the hunks given as argument
1395 '''reverse the signs in the hunks given as argument
1396
1396
1397 This function operates on hunks coming out of patch.filterpatch, that is
1397 This function operates on hunks coming out of patch.filterpatch, that is
1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1398 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1399
1399
1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1400 >>> rawpatch = """diff --git a/folder1/g b/folder1/g
1401 ... --- a/folder1/g
1401 ... --- a/folder1/g
1402 ... +++ b/folder1/g
1402 ... +++ b/folder1/g
1403 ... @@ -1,7 +1,7 @@
1403 ... @@ -1,7 +1,7 @@
1404 ... +firstline
1404 ... +firstline
1405 ... c
1405 ... c
1406 ... 1
1406 ... 1
1407 ... 2
1407 ... 2
1408 ... + 3
1408 ... + 3
1409 ... -4
1409 ... -4
1410 ... 5
1410 ... 5
1411 ... d
1411 ... d
1412 ... +lastline"""
1412 ... +lastline"""
1413 >>> hunks = parsepatch(rawpatch)
1413 >>> hunks = parsepatch(rawpatch)
1414 >>> hunkscomingfromfilterpatch = []
1414 >>> hunkscomingfromfilterpatch = []
1415 >>> for h in hunks:
1415 >>> for h in hunks:
1416 ... hunkscomingfromfilterpatch.append(h)
1416 ... hunkscomingfromfilterpatch.append(h)
1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1417 ... hunkscomingfromfilterpatch.extend(h.hunks)
1418
1418
1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1419 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1420 >>> fp = cStringIO.StringIO()
1420 >>> fp = cStringIO.StringIO()
1421 >>> for c in reversedhunks:
1421 >>> for c in reversedhunks:
1422 ... c.write(fp)
1422 ... c.write(fp)
1423 >>> fp.seek(0)
1423 >>> fp.seek(0)
1424 >>> reversedpatch = fp.read()
1424 >>> reversedpatch = fp.read()
1425 >>> print reversedpatch
1425 >>> print reversedpatch
1426 diff --git a/folder1/g b/folder1/g
1426 diff --git a/folder1/g b/folder1/g
1427 --- a/folder1/g
1427 --- a/folder1/g
1428 +++ b/folder1/g
1428 +++ b/folder1/g
1429 @@ -1,4 +1,3 @@
1429 @@ -1,4 +1,3 @@
1430 -firstline
1430 -firstline
1431 c
1431 c
1432 1
1432 1
1433 2
1433 2
1434 @@ -1,6 +2,6 @@
1434 @@ -1,6 +2,6 @@
1435 c
1435 c
1436 1
1436 1
1437 2
1437 2
1438 - 3
1438 - 3
1439 +4
1439 +4
1440 5
1440 5
1441 d
1441 d
1442 @@ -5,3 +6,2 @@
1442 @@ -5,3 +6,2 @@
1443 5
1443 5
1444 d
1444 d
1445 -lastline
1445 -lastline
1446
1446
1447 '''
1447 '''
1448
1448
1449 import crecord as crecordmod
1449 import crecord as crecordmod
1450 newhunks = []
1450 newhunks = []
1451 for c in hunks:
1451 for c in hunks:
1452 if isinstance(c, crecordmod.uihunk):
1452 if isinstance(c, crecordmod.uihunk):
1453 # curses hunks encapsulate the record hunk in _hunk
1453 # curses hunks encapsulate the record hunk in _hunk
1454 c = c._hunk
1454 c = c._hunk
1455 if isinstance(c, recordhunk):
1455 if isinstance(c, recordhunk):
1456 for j, line in enumerate(c.hunk):
1456 for j, line in enumerate(c.hunk):
1457 if line.startswith("-"):
1457 if line.startswith("-"):
1458 c.hunk[j] = "+" + c.hunk[j][1:]
1458 c.hunk[j] = "+" + c.hunk[j][1:]
1459 elif line.startswith("+"):
1459 elif line.startswith("+"):
1460 c.hunk[j] = "-" + c.hunk[j][1:]
1460 c.hunk[j] = "-" + c.hunk[j][1:]
1461 c.added, c.removed = c.removed, c.added
1461 c.added, c.removed = c.removed, c.added
1462 newhunks.append(c)
1462 newhunks.append(c)
1463 return newhunks
1463 return newhunks
1464
1464
1465 def parsepatch(originalchunks):
1465 def parsepatch(originalchunks):
1466 """patch -> [] of headers -> [] of hunks """
1466 """patch -> [] of headers -> [] of hunks """
1467 class parser(object):
1467 class parser(object):
1468 """patch parsing state machine"""
1468 """patch parsing state machine"""
1469 def __init__(self):
1469 def __init__(self):
1470 self.fromline = 0
1470 self.fromline = 0
1471 self.toline = 0
1471 self.toline = 0
1472 self.proc = ''
1472 self.proc = ''
1473 self.header = None
1473 self.header = None
1474 self.context = []
1474 self.context = []
1475 self.before = []
1475 self.before = []
1476 self.hunk = []
1476 self.hunk = []
1477 self.headers = []
1477 self.headers = []
1478
1478
1479 def addrange(self, limits):
1479 def addrange(self, limits):
1480 fromstart, fromend, tostart, toend, proc = limits
1480 fromstart, fromend, tostart, toend, proc = limits
1481 self.fromline = int(fromstart)
1481 self.fromline = int(fromstart)
1482 self.toline = int(tostart)
1482 self.toline = int(tostart)
1483 self.proc = proc
1483 self.proc = proc
1484
1484
1485 def addcontext(self, context):
1485 def addcontext(self, context):
1486 if self.hunk:
1486 if self.hunk:
1487 h = recordhunk(self.header, self.fromline, self.toline,
1487 h = recordhunk(self.header, self.fromline, self.toline,
1488 self.proc, self.before, self.hunk, context)
1488 self.proc, self.before, self.hunk, context)
1489 self.header.hunks.append(h)
1489 self.header.hunks.append(h)
1490 self.fromline += len(self.before) + h.removed
1490 self.fromline += len(self.before) + h.removed
1491 self.toline += len(self.before) + h.added
1491 self.toline += len(self.before) + h.added
1492 self.before = []
1492 self.before = []
1493 self.hunk = []
1493 self.hunk = []
1494 self.proc = ''
1494 self.proc = ''
1495 self.context = context
1495 self.context = context
1496
1496
1497 def addhunk(self, hunk):
1497 def addhunk(self, hunk):
1498 if self.context:
1498 if self.context:
1499 self.before = self.context
1499 self.before = self.context
1500 self.context = []
1500 self.context = []
1501 self.hunk = hunk
1501 self.hunk = hunk
1502
1502
1503 def newfile(self, hdr):
1503 def newfile(self, hdr):
1504 self.addcontext([])
1504 self.addcontext([])
1505 h = header(hdr)
1505 h = header(hdr)
1506 self.headers.append(h)
1506 self.headers.append(h)
1507 self.header = h
1507 self.header = h
1508
1508
1509 def addother(self, line):
1509 def addother(self, line):
1510 pass # 'other' lines are ignored
1510 pass # 'other' lines are ignored
1511
1511
1512 def finished(self):
1512 def finished(self):
1513 self.addcontext([])
1513 self.addcontext([])
1514 return self.headers
1514 return self.headers
1515
1515
1516 transitions = {
1516 transitions = {
1517 'file': {'context': addcontext,
1517 'file': {'context': addcontext,
1518 'file': newfile,
1518 'file': newfile,
1519 'hunk': addhunk,
1519 'hunk': addhunk,
1520 'range': addrange},
1520 'range': addrange},
1521 'context': {'file': newfile,
1521 'context': {'file': newfile,
1522 'hunk': addhunk,
1522 'hunk': addhunk,
1523 'range': addrange,
1523 'range': addrange,
1524 'other': addother},
1524 'other': addother},
1525 'hunk': {'context': addcontext,
1525 'hunk': {'context': addcontext,
1526 'file': newfile,
1526 'file': newfile,
1527 'range': addrange},
1527 'range': addrange},
1528 'range': {'context': addcontext,
1528 'range': {'context': addcontext,
1529 'hunk': addhunk},
1529 'hunk': addhunk},
1530 'other': {'other': addother},
1530 'other': {'other': addother},
1531 }
1531 }
1532
1532
1533 p = parser()
1533 p = parser()
1534 fp = cStringIO.StringIO()
1534 fp = cStringIO.StringIO()
1535 fp.write(''.join(originalchunks))
1535 fp.write(''.join(originalchunks))
1536 fp.seek(0)
1536 fp.seek(0)
1537
1537
1538 state = 'context'
1538 state = 'context'
1539 for newstate, data in scanpatch(fp):
1539 for newstate, data in scanpatch(fp):
1540 try:
1540 try:
1541 p.transitions[state][newstate](p, data)
1541 p.transitions[state][newstate](p, data)
1542 except KeyError:
1542 except KeyError:
1543 raise PatchError('unhandled transition: %s -> %s' %
1543 raise PatchError('unhandled transition: %s -> %s' %
1544 (state, newstate))
1544 (state, newstate))
1545 state = newstate
1545 state = newstate
1546 del fp
1546 del fp
1547 return p.finished()
1547 return p.finished()
1548
1548
1549 def pathtransform(path, strip, prefix):
1549 def pathtransform(path, strip, prefix):
1550 '''turn a path from a patch into a path suitable for the repository
1550 '''turn a path from a patch into a path suitable for the repository
1551
1551
1552 prefix, if not empty, is expected to be normalized with a / at the end.
1552 prefix, if not empty, is expected to be normalized with a / at the end.
1553
1553
1554 Returns (stripped components, path in repository).
1554 Returns (stripped components, path in repository).
1555
1555
1556 >>> pathtransform('a/b/c', 0, '')
1556 >>> pathtransform('a/b/c', 0, '')
1557 ('', 'a/b/c')
1557 ('', 'a/b/c')
1558 >>> pathtransform(' a/b/c ', 0, '')
1558 >>> pathtransform(' a/b/c ', 0, '')
1559 ('', ' a/b/c')
1559 ('', ' a/b/c')
1560 >>> pathtransform(' a/b/c ', 2, '')
1560 >>> pathtransform(' a/b/c ', 2, '')
1561 ('a/b/', 'c')
1561 ('a/b/', 'c')
1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1562 >>> pathtransform('a/b/c', 0, 'd/e/')
1563 ('', 'd/e/a/b/c')
1563 ('', 'd/e/a/b/c')
1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1564 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1565 ('a//b/', 'd/e/c')
1565 ('a//b/', 'd/e/c')
1566 >>> pathtransform('a/b/c', 3, '')
1566 >>> pathtransform('a/b/c', 3, '')
1567 Traceback (most recent call last):
1567 Traceback (most recent call last):
1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1568 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1569 '''
1569 '''
1570 pathlen = len(path)
1570 pathlen = len(path)
1571 i = 0
1571 i = 0
1572 if strip == 0:
1572 if strip == 0:
1573 return '', prefix + path.rstrip()
1573 return '', prefix + path.rstrip()
1574 count = strip
1574 count = strip
1575 while count > 0:
1575 while count > 0:
1576 i = path.find('/', i)
1576 i = path.find('/', i)
1577 if i == -1:
1577 if i == -1:
1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1578 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1579 (count, strip, path))
1579 (count, strip, path))
1580 i += 1
1580 i += 1
1581 # consume '//' in the path
1581 # consume '//' in the path
1582 while i < pathlen - 1 and path[i] == '/':
1582 while i < pathlen - 1 and path[i] == '/':
1583 i += 1
1583 i += 1
1584 count -= 1
1584 count -= 1
1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1585 return path[:i].lstrip(), prefix + path[i:].rstrip()
1586
1586
1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1587 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1588 nulla = afile_orig == "/dev/null"
1588 nulla = afile_orig == "/dev/null"
1589 nullb = bfile_orig == "/dev/null"
1589 nullb = bfile_orig == "/dev/null"
1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1590 create = nulla and hunk.starta == 0 and hunk.lena == 0
1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1591 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1592 abase, afile = pathtransform(afile_orig, strip, prefix)
1593 gooda = not nulla and backend.exists(afile)
1593 gooda = not nulla and backend.exists(afile)
1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1594 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1595 if afile == bfile:
1595 if afile == bfile:
1596 goodb = gooda
1596 goodb = gooda
1597 else:
1597 else:
1598 goodb = not nullb and backend.exists(bfile)
1598 goodb = not nullb and backend.exists(bfile)
1599 missing = not goodb and not gooda and not create
1599 missing = not goodb and not gooda and not create
1600
1600
1601 # some diff programs apparently produce patches where the afile is
1601 # some diff programs apparently produce patches where the afile is
1602 # not /dev/null, but afile starts with bfile
1602 # not /dev/null, but afile starts with bfile
1603 abasedir = afile[:afile.rfind('/') + 1]
1603 abasedir = afile[:afile.rfind('/') + 1]
1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1604 bbasedir = bfile[:bfile.rfind('/') + 1]
1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1605 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1606 and hunk.starta == 0 and hunk.lena == 0):
1606 and hunk.starta == 0 and hunk.lena == 0):
1607 create = True
1607 create = True
1608 missing = False
1608 missing = False
1609
1609
1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1610 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1611 # diff is between a file and its backup. In this case, the original
1611 # diff is between a file and its backup. In this case, the original
1612 # file should be patched (see original mpatch code).
1612 # file should be patched (see original mpatch code).
1613 isbackup = (abase == bbase and bfile.startswith(afile))
1613 isbackup = (abase == bbase and bfile.startswith(afile))
1614 fname = None
1614 fname = None
1615 if not missing:
1615 if not missing:
1616 if gooda and goodb:
1616 if gooda and goodb:
1617 if isbackup:
1617 if isbackup:
1618 fname = afile
1618 fname = afile
1619 else:
1619 else:
1620 fname = bfile
1620 fname = bfile
1621 elif gooda:
1621 elif gooda:
1622 fname = afile
1622 fname = afile
1623
1623
1624 if not fname:
1624 if not fname:
1625 if not nullb:
1625 if not nullb:
1626 if isbackup:
1626 if isbackup:
1627 fname = afile
1627 fname = afile
1628 else:
1628 else:
1629 fname = bfile
1629 fname = bfile
1630 elif not nulla:
1630 elif not nulla:
1631 fname = afile
1631 fname = afile
1632 else:
1632 else:
1633 raise PatchError(_("undefined source and destination files"))
1633 raise PatchError(_("undefined source and destination files"))
1634
1634
1635 gp = patchmeta(fname)
1635 gp = patchmeta(fname)
1636 if create:
1636 if create:
1637 gp.op = 'ADD'
1637 gp.op = 'ADD'
1638 elif remove:
1638 elif remove:
1639 gp.op = 'DELETE'
1639 gp.op = 'DELETE'
1640 return gp
1640 return gp
1641
1641
1642 def scanpatch(fp):
1642 def scanpatch(fp):
1643 """like patch.iterhunks, but yield different events
1643 """like patch.iterhunks, but yield different events
1644
1644
1645 - ('file', [header_lines + fromfile + tofile])
1645 - ('file', [header_lines + fromfile + tofile])
1646 - ('context', [context_lines])
1646 - ('context', [context_lines])
1647 - ('hunk', [hunk_lines])
1647 - ('hunk', [hunk_lines])
1648 - ('range', (-start,len, +start,len, proc))
1648 - ('range', (-start,len, +start,len, proc))
1649 """
1649 """
1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1650 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1651 lr = linereader(fp)
1651 lr = linereader(fp)
1652
1652
1653 def scanwhile(first, p):
1653 def scanwhile(first, p):
1654 """scan lr while predicate holds"""
1654 """scan lr while predicate holds"""
1655 lines = [first]
1655 lines = [first]
1656 while True:
1656 while True:
1657 line = lr.readline()
1657 line = lr.readline()
1658 if not line:
1658 if not line:
1659 break
1659 break
1660 if p(line):
1660 if p(line):
1661 lines.append(line)
1661 lines.append(line)
1662 else:
1662 else:
1663 lr.push(line)
1663 lr.push(line)
1664 break
1664 break
1665 return lines
1665 return lines
1666
1666
1667 while True:
1667 while True:
1668 line = lr.readline()
1668 line = lr.readline()
1669 if not line:
1669 if not line:
1670 break
1670 break
1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1671 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1672 def notheader(line):
1672 def notheader(line):
1673 s = line.split(None, 1)
1673 s = line.split(None, 1)
1674 return not s or s[0] not in ('---', 'diff')
1674 return not s or s[0] not in ('---', 'diff')
1675 header = scanwhile(line, notheader)
1675 header = scanwhile(line, notheader)
1676 fromfile = lr.readline()
1676 fromfile = lr.readline()
1677 if fromfile.startswith('---'):
1677 if fromfile.startswith('---'):
1678 tofile = lr.readline()
1678 tofile = lr.readline()
1679 header += [fromfile, tofile]
1679 header += [fromfile, tofile]
1680 else:
1680 else:
1681 lr.push(fromfile)
1681 lr.push(fromfile)
1682 yield 'file', header
1682 yield 'file', header
1683 elif line[0] == ' ':
1683 elif line[0] == ' ':
1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1684 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1685 elif line[0] in '-+':
1685 elif line[0] in '-+':
1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1686 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1687 else:
1687 else:
1688 m = lines_re.match(line)
1688 m = lines_re.match(line)
1689 if m:
1689 if m:
1690 yield 'range', m.groups()
1690 yield 'range', m.groups()
1691 else:
1691 else:
1692 yield 'other', line
1692 yield 'other', line
1693
1693
1694 def scangitpatch(lr, firstline):
1694 def scangitpatch(lr, firstline):
1695 """
1695 """
1696 Git patches can emit:
1696 Git patches can emit:
1697 - rename a to b
1697 - rename a to b
1698 - change b
1698 - change b
1699 - copy a to c
1699 - copy a to c
1700 - change c
1700 - change c
1701
1701
1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1702 We cannot apply this sequence as-is, the renamed 'a' could not be
1703 found for it would have been renamed already. And we cannot copy
1703 found for it would have been renamed already. And we cannot copy
1704 from 'b' instead because 'b' would have been changed already. So
1704 from 'b' instead because 'b' would have been changed already. So
1705 we scan the git patch for copy and rename commands so we can
1705 we scan the git patch for copy and rename commands so we can
1706 perform the copies ahead of time.
1706 perform the copies ahead of time.
1707 """
1707 """
1708 pos = 0
1708 pos = 0
1709 try:
1709 try:
1710 pos = lr.fp.tell()
1710 pos = lr.fp.tell()
1711 fp = lr.fp
1711 fp = lr.fp
1712 except IOError:
1712 except IOError:
1713 fp = cStringIO.StringIO(lr.fp.read())
1713 fp = cStringIO.StringIO(lr.fp.read())
1714 gitlr = linereader(fp)
1714 gitlr = linereader(fp)
1715 gitlr.push(firstline)
1715 gitlr.push(firstline)
1716 gitpatches = readgitpatch(gitlr)
1716 gitpatches = readgitpatch(gitlr)
1717 fp.seek(pos)
1717 fp.seek(pos)
1718 return gitpatches
1718 return gitpatches
1719
1719
1720 def iterhunks(fp):
1720 def iterhunks(fp):
1721 """Read a patch and yield the following events:
1721 """Read a patch and yield the following events:
1722 - ("file", afile, bfile, firsthunk): select a new target file.
1722 - ("file", afile, bfile, firsthunk): select a new target file.
1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1723 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1724 "file" event.
1724 "file" event.
1725 - ("git", gitchanges): current diff is in git format, gitchanges
1725 - ("git", gitchanges): current diff is in git format, gitchanges
1726 maps filenames to gitpatch records. Unique event.
1726 maps filenames to gitpatch records. Unique event.
1727 """
1727 """
1728 afile = ""
1728 afile = ""
1729 bfile = ""
1729 bfile = ""
1730 state = None
1730 state = None
1731 hunknum = 0
1731 hunknum = 0
1732 emitfile = newfile = False
1732 emitfile = newfile = False
1733 gitpatches = None
1733 gitpatches = None
1734
1734
1735 # our states
1735 # our states
1736 BFILE = 1
1736 BFILE = 1
1737 context = None
1737 context = None
1738 lr = linereader(fp)
1738 lr = linereader(fp)
1739
1739
1740 while True:
1740 while True:
1741 x = lr.readline()
1741 x = lr.readline()
1742 if not x:
1742 if not x:
1743 break
1743 break
1744 if state == BFILE and (
1744 if state == BFILE and (
1745 (not context and x[0] == '@')
1745 (not context and x[0] == '@')
1746 or (context is not False and x.startswith('***************'))
1746 or (context is not False and x.startswith('***************'))
1747 or x.startswith('GIT binary patch')):
1747 or x.startswith('GIT binary patch')):
1748 gp = None
1748 gp = None
1749 if (gitpatches and
1749 if (gitpatches and
1750 gitpatches[-1].ispatching(afile, bfile)):
1750 gitpatches[-1].ispatching(afile, bfile)):
1751 gp = gitpatches.pop()
1751 gp = gitpatches.pop()
1752 if x.startswith('GIT binary patch'):
1752 if x.startswith('GIT binary patch'):
1753 h = binhunk(lr, gp.path)
1753 h = binhunk(lr, gp.path)
1754 else:
1754 else:
1755 if context is None and x.startswith('***************'):
1755 if context is None and x.startswith('***************'):
1756 context = True
1756 context = True
1757 h = hunk(x, hunknum + 1, lr, context)
1757 h = hunk(x, hunknum + 1, lr, context)
1758 hunknum += 1
1758 hunknum += 1
1759 if emitfile:
1759 if emitfile:
1760 emitfile = False
1760 emitfile = False
1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1761 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1762 yield 'hunk', h
1762 yield 'hunk', h
1763 elif x.startswith('diff --git a/'):
1763 elif x.startswith('diff --git a/'):
1764 m = gitre.match(x.rstrip(' \r\n'))
1764 m = gitre.match(x.rstrip(' \r\n'))
1765 if not m:
1765 if not m:
1766 continue
1766 continue
1767 if gitpatches is None:
1767 if gitpatches is None:
1768 # scan whole input for git metadata
1768 # scan whole input for git metadata
1769 gitpatches = scangitpatch(lr, x)
1769 gitpatches = scangitpatch(lr, x)
1770 yield 'git', [g.copy() for g in gitpatches
1770 yield 'git', [g.copy() for g in gitpatches
1771 if g.op in ('COPY', 'RENAME')]
1771 if g.op in ('COPY', 'RENAME')]
1772 gitpatches.reverse()
1772 gitpatches.reverse()
1773 afile = 'a/' + m.group(1)
1773 afile = 'a/' + m.group(1)
1774 bfile = 'b/' + m.group(2)
1774 bfile = 'b/' + m.group(2)
1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1775 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1776 gp = gitpatches.pop()
1776 gp = gitpatches.pop()
1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1777 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1778 if not gitpatches:
1778 if not gitpatches:
1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1779 raise PatchError(_('failed to synchronize metadata for "%s"')
1780 % afile[2:])
1780 % afile[2:])
1781 gp = gitpatches[-1]
1781 gp = gitpatches[-1]
1782 newfile = True
1782 newfile = True
1783 elif x.startswith('---'):
1783 elif x.startswith('---'):
1784 # check for a unified diff
1784 # check for a unified diff
1785 l2 = lr.readline()
1785 l2 = lr.readline()
1786 if not l2.startswith('+++'):
1786 if not l2.startswith('+++'):
1787 lr.push(l2)
1787 lr.push(l2)
1788 continue
1788 continue
1789 newfile = True
1789 newfile = True
1790 context = False
1790 context = False
1791 afile = parsefilename(x)
1791 afile = parsefilename(x)
1792 bfile = parsefilename(l2)
1792 bfile = parsefilename(l2)
1793 elif x.startswith('***'):
1793 elif x.startswith('***'):
1794 # check for a context diff
1794 # check for a context diff
1795 l2 = lr.readline()
1795 l2 = lr.readline()
1796 if not l2.startswith('---'):
1796 if not l2.startswith('---'):
1797 lr.push(l2)
1797 lr.push(l2)
1798 continue
1798 continue
1799 l3 = lr.readline()
1799 l3 = lr.readline()
1800 lr.push(l3)
1800 lr.push(l3)
1801 if not l3.startswith("***************"):
1801 if not l3.startswith("***************"):
1802 lr.push(l2)
1802 lr.push(l2)
1803 continue
1803 continue
1804 newfile = True
1804 newfile = True
1805 context = True
1805 context = True
1806 afile = parsefilename(x)
1806 afile = parsefilename(x)
1807 bfile = parsefilename(l2)
1807 bfile = parsefilename(l2)
1808
1808
1809 if newfile:
1809 if newfile:
1810 newfile = False
1810 newfile = False
1811 emitfile = True
1811 emitfile = True
1812 state = BFILE
1812 state = BFILE
1813 hunknum = 0
1813 hunknum = 0
1814
1814
1815 while gitpatches:
1815 while gitpatches:
1816 gp = gitpatches.pop()
1816 gp = gitpatches.pop()
1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1817 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1818
1818
1819 def applybindelta(binchunk, data):
1819 def applybindelta(binchunk, data):
1820 """Apply a binary delta hunk
1820 """Apply a binary delta hunk
1821 The algorithm used is the algorithm from git's patch-delta.c
1821 The algorithm used is the algorithm from git's patch-delta.c
1822 """
1822 """
1823 def deltahead(binchunk):
1823 def deltahead(binchunk):
1824 i = 0
1824 i = 0
1825 for c in binchunk:
1825 for c in binchunk:
1826 i += 1
1826 i += 1
1827 if not (ord(c) & 0x80):
1827 if not (ord(c) & 0x80):
1828 return i
1828 return i
1829 return i
1829 return i
1830 out = ""
1830 out = ""
1831 s = deltahead(binchunk)
1831 s = deltahead(binchunk)
1832 binchunk = binchunk[s:]
1832 binchunk = binchunk[s:]
1833 s = deltahead(binchunk)
1833 s = deltahead(binchunk)
1834 binchunk = binchunk[s:]
1834 binchunk = binchunk[s:]
1835 i = 0
1835 i = 0
1836 while i < len(binchunk):
1836 while i < len(binchunk):
1837 cmd = ord(binchunk[i])
1837 cmd = ord(binchunk[i])
1838 i += 1
1838 i += 1
1839 if (cmd & 0x80):
1839 if (cmd & 0x80):
1840 offset = 0
1840 offset = 0
1841 size = 0
1841 size = 0
1842 if (cmd & 0x01):
1842 if (cmd & 0x01):
1843 offset = ord(binchunk[i])
1843 offset = ord(binchunk[i])
1844 i += 1
1844 i += 1
1845 if (cmd & 0x02):
1845 if (cmd & 0x02):
1846 offset |= ord(binchunk[i]) << 8
1846 offset |= ord(binchunk[i]) << 8
1847 i += 1
1847 i += 1
1848 if (cmd & 0x04):
1848 if (cmd & 0x04):
1849 offset |= ord(binchunk[i]) << 16
1849 offset |= ord(binchunk[i]) << 16
1850 i += 1
1850 i += 1
1851 if (cmd & 0x08):
1851 if (cmd & 0x08):
1852 offset |= ord(binchunk[i]) << 24
1852 offset |= ord(binchunk[i]) << 24
1853 i += 1
1853 i += 1
1854 if (cmd & 0x10):
1854 if (cmd & 0x10):
1855 size = ord(binchunk[i])
1855 size = ord(binchunk[i])
1856 i += 1
1856 i += 1
1857 if (cmd & 0x20):
1857 if (cmd & 0x20):
1858 size |= ord(binchunk[i]) << 8
1858 size |= ord(binchunk[i]) << 8
1859 i += 1
1859 i += 1
1860 if (cmd & 0x40):
1860 if (cmd & 0x40):
1861 size |= ord(binchunk[i]) << 16
1861 size |= ord(binchunk[i]) << 16
1862 i += 1
1862 i += 1
1863 if size == 0:
1863 if size == 0:
1864 size = 0x10000
1864 size = 0x10000
1865 offset_end = offset + size
1865 offset_end = offset + size
1866 out += data[offset:offset_end]
1866 out += data[offset:offset_end]
1867 elif cmd != 0:
1867 elif cmd != 0:
1868 offset_end = i + cmd
1868 offset_end = i + cmd
1869 out += binchunk[i:offset_end]
1869 out += binchunk[i:offset_end]
1870 i += cmd
1870 i += cmd
1871 else:
1871 else:
1872 raise PatchError(_('unexpected delta opcode 0'))
1872 raise PatchError(_('unexpected delta opcode 0'))
1873 return out
1873 return out
1874
1874
1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1875 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1876 """Reads a patch from fp and tries to apply it.
1876 """Reads a patch from fp and tries to apply it.
1877
1877
1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1878 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1879 there was any fuzz.
1879 there was any fuzz.
1880
1880
1881 If 'eolmode' is 'strict', the patch content and patched file are
1881 If 'eolmode' is 'strict', the patch content and patched file are
1882 read in binary mode. Otherwise, line endings are ignored when
1882 read in binary mode. Otherwise, line endings are ignored when
1883 patching then normalized according to 'eolmode'.
1883 patching then normalized according to 'eolmode'.
1884 """
1884 """
1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1885 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1886 prefix=prefix, eolmode=eolmode)
1886 prefix=prefix, eolmode=eolmode)
1887
1887
1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1888 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1889 eolmode='strict'):
1889 eolmode='strict'):
1890
1890
1891 if prefix:
1891 if prefix:
1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1892 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1893 prefix)
1893 prefix)
1894 if prefix != '':
1894 if prefix != '':
1895 prefix += '/'
1895 prefix += '/'
1896 def pstrip(p):
1896 def pstrip(p):
1897 return pathtransform(p, strip - 1, prefix)[1]
1897 return pathtransform(p, strip - 1, prefix)[1]
1898
1898
1899 rejects = 0
1899 rejects = 0
1900 err = 0
1900 err = 0
1901 current_file = None
1901 current_file = None
1902
1902
1903 for state, values in iterhunks(fp):
1903 for state, values in iterhunks(fp):
1904 if state == 'hunk':
1904 if state == 'hunk':
1905 if not current_file:
1905 if not current_file:
1906 continue
1906 continue
1907 ret = current_file.apply(values)
1907 ret = current_file.apply(values)
1908 if ret > 0:
1908 if ret > 0:
1909 err = 1
1909 err = 1
1910 elif state == 'file':
1910 elif state == 'file':
1911 if current_file:
1911 if current_file:
1912 rejects += current_file.close()
1912 rejects += current_file.close()
1913 current_file = None
1913 current_file = None
1914 afile, bfile, first_hunk, gp = values
1914 afile, bfile, first_hunk, gp = values
1915 if gp:
1915 if gp:
1916 gp.path = pstrip(gp.path)
1916 gp.path = pstrip(gp.path)
1917 if gp.oldpath:
1917 if gp.oldpath:
1918 gp.oldpath = pstrip(gp.oldpath)
1918 gp.oldpath = pstrip(gp.oldpath)
1919 else:
1919 else:
1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1920 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1921 prefix)
1921 prefix)
1922 if gp.op == 'RENAME':
1922 if gp.op == 'RENAME':
1923 backend.unlink(gp.oldpath)
1923 backend.unlink(gp.oldpath)
1924 if not first_hunk:
1924 if not first_hunk:
1925 if gp.op == 'DELETE':
1925 if gp.op == 'DELETE':
1926 backend.unlink(gp.path)
1926 backend.unlink(gp.path)
1927 continue
1927 continue
1928 data, mode = None, None
1928 data, mode = None, None
1929 if gp.op in ('RENAME', 'COPY'):
1929 if gp.op in ('RENAME', 'COPY'):
1930 data, mode = store.getfile(gp.oldpath)[:2]
1930 data, mode = store.getfile(gp.oldpath)[:2]
1931 # FIXME: failing getfile has never been handled here
1931 # FIXME: failing getfile has never been handled here
1932 assert data is not None
1932 assert data is not None
1933 if gp.mode:
1933 if gp.mode:
1934 mode = gp.mode
1934 mode = gp.mode
1935 if gp.op == 'ADD':
1935 if gp.op == 'ADD':
1936 # Added files without content have no hunk and
1936 # Added files without content have no hunk and
1937 # must be created
1937 # must be created
1938 data = ''
1938 data = ''
1939 if data or mode:
1939 if data or mode:
1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1940 if (gp.op in ('ADD', 'RENAME', 'COPY')
1941 and backend.exists(gp.path)):
1941 and backend.exists(gp.path)):
1942 raise PatchError(_("cannot create %s: destination "
1942 raise PatchError(_("cannot create %s: destination "
1943 "already exists") % gp.path)
1943 "already exists") % gp.path)
1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1944 backend.setfile(gp.path, data, mode, gp.oldpath)
1945 continue
1945 continue
1946 try:
1946 try:
1947 current_file = patcher(ui, gp, backend, store,
1947 current_file = patcher(ui, gp, backend, store,
1948 eolmode=eolmode)
1948 eolmode=eolmode)
1949 except PatchError as inst:
1949 except PatchError as inst:
1950 ui.warn(str(inst) + '\n')
1950 ui.warn(str(inst) + '\n')
1951 current_file = None
1951 current_file = None
1952 rejects += 1
1952 rejects += 1
1953 continue
1953 continue
1954 elif state == 'git':
1954 elif state == 'git':
1955 for gp in values:
1955 for gp in values:
1956 path = pstrip(gp.oldpath)
1956 path = pstrip(gp.oldpath)
1957 data, mode = backend.getfile(path)
1957 data, mode = backend.getfile(path)
1958 if data is None:
1958 if data is None:
1959 # The error ignored here will trigger a getfile()
1959 # The error ignored here will trigger a getfile()
1960 # error in a place more appropriate for error
1960 # error in a place more appropriate for error
1961 # handling, and will not interrupt the patching
1961 # handling, and will not interrupt the patching
1962 # process.
1962 # process.
1963 pass
1963 pass
1964 else:
1964 else:
1965 store.setfile(path, data, mode)
1965 store.setfile(path, data, mode)
1966 else:
1966 else:
1967 raise error.Abort(_('unsupported parser state: %s') % state)
1967 raise error.Abort(_('unsupported parser state: %s') % state)
1968
1968
1969 if current_file:
1969 if current_file:
1970 rejects += current_file.close()
1970 rejects += current_file.close()
1971
1971
1972 if rejects:
1972 if rejects:
1973 return -1
1973 return -1
1974 return err
1974 return err
1975
1975
1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1976 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1977 similarity):
1977 similarity):
1978 """use <patcher> to apply <patchname> to the working directory.
1978 """use <patcher> to apply <patchname> to the working directory.
1979 returns whether patch was applied with fuzz factor."""
1979 returns whether patch was applied with fuzz factor."""
1980
1980
1981 fuzz = False
1981 fuzz = False
1982 args = []
1982 args = []
1983 cwd = repo.root
1983 cwd = repo.root
1984 if cwd:
1984 if cwd:
1985 args.append('-d %s' % util.shellquote(cwd))
1985 args.append('-d %s' % util.shellquote(cwd))
1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1986 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1987 util.shellquote(patchname)))
1987 util.shellquote(patchname)))
1988 try:
1988 try:
1989 for line in fp:
1989 for line in fp:
1990 line = line.rstrip()
1990 line = line.rstrip()
1991 ui.note(line + '\n')
1991 ui.note(line + '\n')
1992 if line.startswith('patching file '):
1992 if line.startswith('patching file '):
1993 pf = util.parsepatchoutput(line)
1993 pf = util.parsepatchoutput(line)
1994 printed_file = False
1994 printed_file = False
1995 files.add(pf)
1995 files.add(pf)
1996 elif line.find('with fuzz') >= 0:
1996 elif line.find('with fuzz') >= 0:
1997 fuzz = True
1997 fuzz = True
1998 if not printed_file:
1998 if not printed_file:
1999 ui.warn(pf + '\n')
1999 ui.warn(pf + '\n')
2000 printed_file = True
2000 printed_file = True
2001 ui.warn(line + '\n')
2001 ui.warn(line + '\n')
2002 elif line.find('saving rejects to file') >= 0:
2002 elif line.find('saving rejects to file') >= 0:
2003 ui.warn(line + '\n')
2003 ui.warn(line + '\n')
2004 elif line.find('FAILED') >= 0:
2004 elif line.find('FAILED') >= 0:
2005 if not printed_file:
2005 if not printed_file:
2006 ui.warn(pf + '\n')
2006 ui.warn(pf + '\n')
2007 printed_file = True
2007 printed_file = True
2008 ui.warn(line + '\n')
2008 ui.warn(line + '\n')
2009 finally:
2009 finally:
2010 if files:
2010 if files:
2011 scmutil.marktouched(repo, files, similarity)
2011 scmutil.marktouched(repo, files, similarity)
2012 code = fp.close()
2012 code = fp.close()
2013 if code:
2013 if code:
2014 raise PatchError(_("patch command failed: %s") %
2014 raise PatchError(_("patch command failed: %s") %
2015 util.explainexit(code)[0])
2015 util.explainexit(code)[0])
2016 return fuzz
2016 return fuzz
2017
2017
2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2018 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2019 eolmode='strict'):
2019 eolmode='strict'):
2020 if files is None:
2020 if files is None:
2021 files = set()
2021 files = set()
2022 if eolmode is None:
2022 if eolmode is None:
2023 eolmode = ui.config('patch', 'eol', 'strict')
2023 eolmode = ui.config('patch', 'eol', 'strict')
2024 if eolmode.lower() not in eolmodes:
2024 if eolmode.lower() not in eolmodes:
2025 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2025 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2026 eolmode = eolmode.lower()
2026 eolmode = eolmode.lower()
2027
2027
2028 store = filestore()
2028 store = filestore()
2029 try:
2029 try:
2030 fp = open(patchobj, 'rb')
2030 fp = open(patchobj, 'rb')
2031 except TypeError:
2031 except TypeError:
2032 fp = patchobj
2032 fp = patchobj
2033 try:
2033 try:
2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2034 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2035 eolmode=eolmode)
2035 eolmode=eolmode)
2036 finally:
2036 finally:
2037 if fp != patchobj:
2037 if fp != patchobj:
2038 fp.close()
2038 fp.close()
2039 files.update(backend.close())
2039 files.update(backend.close())
2040 store.close()
2040 store.close()
2041 if ret < 0:
2041 if ret < 0:
2042 raise PatchError(_('patch failed to apply'))
2042 raise PatchError(_('patch failed to apply'))
2043 return ret > 0
2043 return ret > 0
2044
2044
2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2045 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2046 eolmode='strict', similarity=0):
2046 eolmode='strict', similarity=0):
2047 """use builtin patch to apply <patchobj> to the working directory.
2047 """use builtin patch to apply <patchobj> to the working directory.
2048 returns whether patch was applied with fuzz factor."""
2048 returns whether patch was applied with fuzz factor."""
2049 backend = workingbackend(ui, repo, similarity)
2049 backend = workingbackend(ui, repo, similarity)
2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2050 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2051
2051
2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2052 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2053 eolmode='strict'):
2053 eolmode='strict'):
2054 backend = repobackend(ui, repo, ctx, store)
2054 backend = repobackend(ui, repo, ctx, store)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2055 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2056
2056
2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2057 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2058 similarity=0):
2058 similarity=0):
2059 """Apply <patchname> to the working directory.
2059 """Apply <patchname> to the working directory.
2060
2060
2061 'eolmode' specifies how end of lines should be handled. It can be:
2061 'eolmode' specifies how end of lines should be handled. It can be:
2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2062 - 'strict': inputs are read in binary mode, EOLs are preserved
2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2063 - 'crlf': EOLs are ignored when patching and reset to CRLF
2064 - 'lf': EOLs are ignored when patching and reset to LF
2064 - 'lf': EOLs are ignored when patching and reset to LF
2065 - None: get it from user settings, default to 'strict'
2065 - None: get it from user settings, default to 'strict'
2066 'eolmode' is ignored when using an external patcher program.
2066 'eolmode' is ignored when using an external patcher program.
2067
2067
2068 Returns whether patch was applied with fuzz factor.
2068 Returns whether patch was applied with fuzz factor.
2069 """
2069 """
2070 patcher = ui.config('ui', 'patch')
2070 patcher = ui.config('ui', 'patch')
2071 if files is None:
2071 if files is None:
2072 files = set()
2072 files = set()
2073 if patcher:
2073 if patcher:
2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2074 return _externalpatch(ui, repo, patcher, patchname, strip,
2075 files, similarity)
2075 files, similarity)
2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2076 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2077 similarity)
2077 similarity)
2078
2078
2079 def changedfiles(ui, repo, patchpath, strip=1):
2079 def changedfiles(ui, repo, patchpath, strip=1):
2080 backend = fsbackend(ui, repo.root)
2080 backend = fsbackend(ui, repo.root)
2081 fp = open(patchpath, 'rb')
2081 fp = open(patchpath, 'rb')
2082 try:
2082 try:
2083 changed = set()
2083 changed = set()
2084 for state, values in iterhunks(fp):
2084 for state, values in iterhunks(fp):
2085 if state == 'file':
2085 if state == 'file':
2086 afile, bfile, first_hunk, gp = values
2086 afile, bfile, first_hunk, gp = values
2087 if gp:
2087 if gp:
2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2088 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2089 if gp.oldpath:
2089 if gp.oldpath:
2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2090 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2091 else:
2091 else:
2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2092 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2093 '')
2093 '')
2094 changed.add(gp.path)
2094 changed.add(gp.path)
2095 if gp.op == 'RENAME':
2095 if gp.op == 'RENAME':
2096 changed.add(gp.oldpath)
2096 changed.add(gp.oldpath)
2097 elif state not in ('hunk', 'git'):
2097 elif state not in ('hunk', 'git'):
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2098 raise error.Abort(_('unsupported parser state: %s') % state)
2099 return changed
2099 return changed
2100 finally:
2100 finally:
2101 fp.close()
2101 fp.close()
2102
2102
2103 class GitDiffRequired(Exception):
2103 class GitDiffRequired(Exception):
2104 pass
2104 pass
2105
2105
2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2106 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2107 '''return diffopts with all features supported and parsed'''
2107 '''return diffopts with all features supported and parsed'''
2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2108 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2109 git=True, whitespace=True, formatchanging=True)
2109 git=True, whitespace=True, formatchanging=True)
2110
2110
2111 diffopts = diffallopts
2111 diffopts = diffallopts
2112
2112
2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2113 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2114 whitespace=False, formatchanging=False):
2114 whitespace=False, formatchanging=False):
2115 '''return diffopts with only opted-in features parsed
2115 '''return diffopts with only opted-in features parsed
2116
2116
2117 Features:
2117 Features:
2118 - git: git-style diffs
2118 - git: git-style diffs
2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2119 - whitespace: whitespace options like ignoreblanklines and ignorews
2120 - formatchanging: options that will likely break or cause correctness issues
2120 - formatchanging: options that will likely break or cause correctness issues
2121 with most diff parsers
2121 with most diff parsers
2122 '''
2122 '''
2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2123 def get(key, name=None, getter=ui.configbool, forceplain=None):
2124 if opts:
2124 if opts:
2125 v = opts.get(key)
2125 v = opts.get(key)
2126 if v:
2126 if v:
2127 return v
2127 return v
2128 if forceplain is not None and ui.plain():
2128 if forceplain is not None and ui.plain():
2129 return forceplain
2129 return forceplain
2130 return getter(section, name or key, None, untrusted=untrusted)
2130 return getter(section, name or key, None, untrusted=untrusted)
2131
2131
2132 # core options, expected to be understood by every diff parser
2132 # core options, expected to be understood by every diff parser
2133 buildopts = {
2133 buildopts = {
2134 'nodates': get('nodates'),
2134 'nodates': get('nodates'),
2135 'showfunc': get('show_function', 'showfunc'),
2135 'showfunc': get('show_function', 'showfunc'),
2136 'context': get('unified', getter=ui.config),
2136 'context': get('unified', getter=ui.config),
2137 }
2137 }
2138
2138
2139 if git:
2139 if git:
2140 buildopts['git'] = get('git')
2140 buildopts['git'] = get('git')
2141 if whitespace:
2141 if whitespace:
2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2142 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2143 buildopts['ignorewsamount'] = get('ignore_space_change',
2144 'ignorewsamount')
2144 'ignorewsamount')
2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2145 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2146 'ignoreblanklines')
2146 'ignoreblanklines')
2147 if formatchanging:
2147 if formatchanging:
2148 buildopts['text'] = opts and opts.get('text')
2148 buildopts['text'] = opts and opts.get('text')
2149 buildopts['nobinary'] = get('nobinary')
2149 buildopts['nobinary'] = get('nobinary')
2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2150 buildopts['noprefix'] = get('noprefix', forceplain=False)
2151
2151
2152 return mdiff.diffopts(**buildopts)
2152 return mdiff.diffopts(**buildopts)
2153
2153
2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2154 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2155 losedatafn=None, prefix='', relroot=''):
2155 losedatafn=None, prefix='', relroot=''):
2156 '''yields diff of changes to files between two nodes, or node and
2156 '''yields diff of changes to files between two nodes, or node and
2157 working directory.
2157 working directory.
2158
2158
2159 if node1 is None, use first dirstate parent instead.
2159 if node1 is None, use first dirstate parent instead.
2160 if node2 is None, compare node1 with working directory.
2160 if node2 is None, compare node1 with working directory.
2161
2161
2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2162 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2163 every time some change cannot be represented with the current
2163 every time some change cannot be represented with the current
2164 patch format. Return False to upgrade to git patch format, True to
2164 patch format. Return False to upgrade to git patch format, True to
2165 accept the loss or raise an exception to abort the diff. It is
2165 accept the loss or raise an exception to abort the diff. It is
2166 called with the name of current file being diffed as 'fn'. If set
2166 called with the name of current file being diffed as 'fn'. If set
2167 to None, patches will always be upgraded to git format when
2167 to None, patches will always be upgraded to git format when
2168 necessary.
2168 necessary.
2169
2169
2170 prefix is a filename prefix that is prepended to all filenames on
2170 prefix is a filename prefix that is prepended to all filenames on
2171 display (used for subrepos).
2171 display (used for subrepos).
2172
2172
2173 relroot, if not empty, must be normalized with a trailing /. Any match
2173 relroot, if not empty, must be normalized with a trailing /. Any match
2174 patterns that fall outside it will be ignored.'''
2174 patterns that fall outside it will be ignored.'''
2175
2175
2176 if opts is None:
2176 if opts is None:
2177 opts = mdiff.defaultopts
2177 opts = mdiff.defaultopts
2178
2178
2179 if not node1 and not node2:
2179 if not node1 and not node2:
2180 node1 = repo.dirstate.p1()
2180 node1 = repo.dirstate.p1()
2181
2181
2182 def lrugetfilectx():
2182 def lrugetfilectx():
2183 cache = {}
2183 cache = {}
2184 order = collections.deque()
2184 order = collections.deque()
2185 def getfilectx(f, ctx):
2185 def getfilectx(f, ctx):
2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2186 fctx = ctx.filectx(f, filelog=cache.get(f))
2187 if f not in cache:
2187 if f not in cache:
2188 if len(cache) > 20:
2188 if len(cache) > 20:
2189 del cache[order.popleft()]
2189 del cache[order.popleft()]
2190 cache[f] = fctx.filelog()
2190 cache[f] = fctx.filelog()
2191 else:
2191 else:
2192 order.remove(f)
2192 order.remove(f)
2193 order.append(f)
2193 order.append(f)
2194 return fctx
2194 return fctx
2195 return getfilectx
2195 return getfilectx
2196 getfilectx = lrugetfilectx()
2196 getfilectx = lrugetfilectx()
2197
2197
2198 ctx1 = repo[node1]
2198 ctx1 = repo[node1]
2199 ctx2 = repo[node2]
2199 ctx2 = repo[node2]
2200
2200
2201 relfiltered = False
2201 relfiltered = False
2202 if relroot != '' and match.always():
2202 if relroot != '' and match.always():
2203 # as a special case, create a new matcher with just the relroot
2203 # as a special case, create a new matcher with just the relroot
2204 pats = [relroot]
2204 pats = [relroot]
2205 match = scmutil.match(ctx2, pats, default='path')
2205 match = scmutil.match(ctx2, pats, default='path')
2206 relfiltered = True
2206 relfiltered = True
2207
2207
2208 if not changes:
2208 if not changes:
2209 changes = repo.status(ctx1, ctx2, match=match)
2209 changes = repo.status(ctx1, ctx2, match=match)
2210 modified, added, removed = changes[:3]
2210 modified, added, removed = changes[:3]
2211
2211
2212 if not modified and not added and not removed:
2212 if not modified and not added and not removed:
2213 return []
2213 return []
2214
2214
2215 if repo.ui.debugflag:
2215 if repo.ui.debugflag:
2216 hexfunc = hex
2216 hexfunc = hex
2217 else:
2217 else:
2218 hexfunc = short
2218 hexfunc = short
2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2219 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2220
2220
2221 copy = {}
2221 copy = {}
2222 if opts.git or opts.upgrade:
2222 if opts.git or opts.upgrade:
2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2223 copy = copies.pathcopies(ctx1, ctx2, match=match)
2224
2224
2225 if relroot is not None:
2225 if relroot is not None:
2226 if not relfiltered:
2226 if not relfiltered:
2227 # XXX this would ideally be done in the matcher, but that is
2227 # XXX this would ideally be done in the matcher, but that is
2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2228 # generally meant to 'or' patterns, not 'and' them. In this case we
2229 # need to 'and' all the patterns from the matcher with relroot.
2229 # need to 'and' all the patterns from the matcher with relroot.
2230 def filterrel(l):
2230 def filterrel(l):
2231 return [f for f in l if f.startswith(relroot)]
2231 return [f for f in l if f.startswith(relroot)]
2232 modified = filterrel(modified)
2232 modified = filterrel(modified)
2233 added = filterrel(added)
2233 added = filterrel(added)
2234 removed = filterrel(removed)
2234 removed = filterrel(removed)
2235 relfiltered = True
2235 relfiltered = True
2236 # filter out copies where either side isn't inside the relative root
2236 # filter out copies where either side isn't inside the relative root
2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2237 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2238 if dst.startswith(relroot)
2238 if dst.startswith(relroot)
2239 and src.startswith(relroot)))
2239 and src.startswith(relroot)))
2240
2240
2241 def difffn(opts, losedata):
2241 def difffn(opts, losedata):
2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2242 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2243 copy, getfilectx, opts, losedata, prefix, relroot)
2243 copy, getfilectx, opts, losedata, prefix, relroot)
2244 if opts.upgrade and not opts.git:
2244 if opts.upgrade and not opts.git:
2245 try:
2245 try:
2246 def losedata(fn):
2246 def losedata(fn):
2247 if not losedatafn or not losedatafn(fn=fn):
2247 if not losedatafn or not losedatafn(fn=fn):
2248 raise GitDiffRequired
2248 raise GitDiffRequired
2249 # Buffer the whole output until we are sure it can be generated
2249 # Buffer the whole output until we are sure it can be generated
2250 return list(difffn(opts.copy(git=False), losedata))
2250 return list(difffn(opts.copy(git=False), losedata))
2251 except GitDiffRequired:
2251 except GitDiffRequired:
2252 return difffn(opts.copy(git=True), None)
2252 return difffn(opts.copy(git=True), None)
2253 else:
2253 else:
2254 return difffn(opts, None)
2254 return difffn(opts, None)
2255
2255
2256 def difflabel(func, *args, **kw):
2256 def difflabel(func, *args, **kw):
2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2257 '''yields 2-tuples of (output, label) based on the output of func()'''
2258 headprefixes = [('diff', 'diff.diffline'),
2258 headprefixes = [('diff', 'diff.diffline'),
2259 ('copy', 'diff.extended'),
2259 ('copy', 'diff.extended'),
2260 ('rename', 'diff.extended'),
2260 ('rename', 'diff.extended'),
2261 ('old', 'diff.extended'),
2261 ('old', 'diff.extended'),
2262 ('new', 'diff.extended'),
2262 ('new', 'diff.extended'),
2263 ('deleted', 'diff.extended'),
2263 ('deleted', 'diff.extended'),
2264 ('---', 'diff.file_a'),
2264 ('---', 'diff.file_a'),
2265 ('+++', 'diff.file_b')]
2265 ('+++', 'diff.file_b')]
2266 textprefixes = [('@', 'diff.hunk'),
2266 textprefixes = [('@', 'diff.hunk'),
2267 ('-', 'diff.deleted'),
2267 ('-', 'diff.deleted'),
2268 ('+', 'diff.inserted')]
2268 ('+', 'diff.inserted')]
2269 head = False
2269 head = False
2270 for chunk in func(*args, **kw):
2270 for chunk in func(*args, **kw):
2271 lines = chunk.split('\n')
2271 lines = chunk.split('\n')
2272 for i, line in enumerate(lines):
2272 for i, line in enumerate(lines):
2273 if i != 0:
2273 if i != 0:
2274 yield ('\n', '')
2274 yield ('\n', '')
2275 if head:
2275 if head:
2276 if line.startswith('@'):
2276 if line.startswith('@'):
2277 head = False
2277 head = False
2278 else:
2278 else:
2279 if line and line[0] not in ' +-@\\':
2279 if line and line[0] not in ' +-@\\':
2280 head = True
2280 head = True
2281 stripline = line
2281 stripline = line
2282 diffline = False
2282 diffline = False
2283 if not head and line and line[0] in '+-':
2283 if not head and line and line[0] in '+-':
2284 # highlight tabs and trailing whitespace, but only in
2284 # highlight tabs and trailing whitespace, but only in
2285 # changed lines
2285 # changed lines
2286 stripline = line.rstrip()
2286 stripline = line.rstrip()
2287 diffline = True
2287 diffline = True
2288
2288
2289 prefixes = textprefixes
2289 prefixes = textprefixes
2290 if head:
2290 if head:
2291 prefixes = headprefixes
2291 prefixes = headprefixes
2292 for prefix, label in prefixes:
2292 for prefix, label in prefixes:
2293 if stripline.startswith(prefix):
2293 if stripline.startswith(prefix):
2294 if diffline:
2294 if diffline:
2295 for token in tabsplitter.findall(stripline):
2295 for token in tabsplitter.findall(stripline):
2296 if '\t' == token[0]:
2296 if '\t' == token[0]:
2297 yield (token, 'diff.tab')
2297 yield (token, 'diff.tab')
2298 else:
2298 else:
2299 yield (token, label)
2299 yield (token, label)
2300 else:
2300 else:
2301 yield (stripline, label)
2301 yield (stripline, label)
2302 break
2302 break
2303 else:
2303 else:
2304 yield (line, '')
2304 yield (line, '')
2305 if line != stripline:
2305 if line != stripline:
2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2306 yield (line[len(stripline):], 'diff.trailingwhitespace')
2307
2307
2308 def diffui(*args, **kw):
2308 def diffui(*args, **kw):
2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2309 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2310 return difflabel(diff, *args, **kw)
2310 return difflabel(diff, *args, **kw)
2311
2311
2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2312 def _filepairs(ctx1, modified, added, removed, copy, opts):
2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2313 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2314 before and f2 is the the name after. For added files, f1 will be None,
2314 before and f2 is the the name after. For added files, f1 will be None,
2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2315 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2316 or 'rename' (the latter two only if opts.git is set).'''
2316 or 'rename' (the latter two only if opts.git is set).'''
2317 gone = set()
2317 gone = set()
2318
2318
2319 copyto = dict([(v, k) for k, v in copy.items()])
2319 copyto = dict([(v, k) for k, v in copy.items()])
2320
2320
2321 addedset, removedset = set(added), set(removed)
2321 addedset, removedset = set(added), set(removed)
2322 # Fix up added, since merged-in additions appear as
2322 # Fix up added, since merged-in additions appear as
2323 # modifications during merges
2323 # modifications during merges
2324 for f in modified:
2324 for f in modified:
2325 if f not in ctx1:
2325 if f not in ctx1:
2326 addedset.add(f)
2326 addedset.add(f)
2327
2327
2328 for f in sorted(modified + added + removed):
2328 for f in sorted(modified + added + removed):
2329 copyop = None
2329 copyop = None
2330 f1, f2 = f, f
2330 f1, f2 = f, f
2331 if f in addedset:
2331 if f in addedset:
2332 f1 = None
2332 f1 = None
2333 if f in copy:
2333 if f in copy:
2334 if opts.git:
2334 if opts.git:
2335 f1 = copy[f]
2335 f1 = copy[f]
2336 if f1 in removedset and f1 not in gone:
2336 if f1 in removedset and f1 not in gone:
2337 copyop = 'rename'
2337 copyop = 'rename'
2338 gone.add(f1)
2338 gone.add(f1)
2339 else:
2339 else:
2340 copyop = 'copy'
2340 copyop = 'copy'
2341 elif f in removedset:
2341 elif f in removedset:
2342 f2 = None
2342 f2 = None
2343 if opts.git:
2343 if opts.git:
2344 # have we already reported a copy above?
2344 # have we already reported a copy above?
2345 if (f in copyto and copyto[f] in addedset
2345 if (f in copyto and copyto[f] in addedset
2346 and copy[copyto[f]] == f):
2346 and copy[copyto[f]] == f):
2347 continue
2347 continue
2348 yield f1, f2, copyop
2348 yield f1, f2, copyop
2349
2349
2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2350 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2351 copy, getfilectx, opts, losedatafn, prefix, relroot):
2352 '''given input data, generate a diff and yield it in blocks
2352 '''given input data, generate a diff and yield it in blocks
2353
2353
2354 If generating a diff would lose data like flags or binary data and
2354 If generating a diff would lose data like flags or binary data and
2355 losedatafn is not None, it will be called.
2355 losedatafn is not None, it will be called.
2356
2356
2357 relroot is removed and prefix is added to every path in the diff output.
2357 relroot is removed and prefix is added to every path in the diff output.
2358
2358
2359 If relroot is not empty, this function expects every path in modified,
2359 If relroot is not empty, this function expects every path in modified,
2360 added, removed and copy to start with it.'''
2360 added, removed and copy to start with it.'''
2361
2361
2362 def gitindex(text):
2362 def gitindex(text):
2363 if not text:
2363 if not text:
2364 text = ""
2364 text = ""
2365 l = len(text)
2365 l = len(text)
2366 s = util.sha1('blob %d\0' % l)
2366 s = util.sha1('blob %d\0' % l)
2367 s.update(text)
2367 s.update(text)
2368 return s.hexdigest()
2368 return s.hexdigest()
2369
2369
2370 if opts.noprefix:
2370 if opts.noprefix:
2371 aprefix = bprefix = ''
2371 aprefix = bprefix = ''
2372 else:
2372 else:
2373 aprefix = 'a/'
2373 aprefix = 'a/'
2374 bprefix = 'b/'
2374 bprefix = 'b/'
2375
2375
2376 def diffline(f, revs):
2376 def diffline(f, revs):
2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2377 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2378 return 'diff %s %s' % (revinfo, f)
2378 return 'diff %s %s' % (revinfo, f)
2379
2379
2380 date1 = util.datestr(ctx1.date())
2380 date1 = util.datestr(ctx1.date())
2381 date2 = util.datestr(ctx2.date())
2381 date2 = util.datestr(ctx2.date())
2382
2382
2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2383 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2384
2384
2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2385 if relroot != '' and (repo.ui.configbool('devel', 'all')
2386 or repo.ui.configbool('devel', 'check-relroot')):
2386 or repo.ui.configbool('devel', 'check-relroot')):
2387 for f in modified + added + removed + copy.keys() + copy.values():
2387 for f in modified + added + removed + copy.keys() + copy.values():
2388 if f is not None and not f.startswith(relroot):
2388 if f is not None and not f.startswith(relroot):
2389 raise AssertionError(
2389 raise AssertionError(
2390 "file %s doesn't start with relroot %s" % (f, relroot))
2390 "file %s doesn't start with relroot %s" % (f, relroot))
2391
2391
2392 for f1, f2, copyop in _filepairs(
2392 for f1, f2, copyop in _filepairs(
2393 ctx1, modified, added, removed, copy, opts):
2393 ctx1, modified, added, removed, copy, opts):
2394 content1 = None
2394 content1 = None
2395 content2 = None
2395 content2 = None
2396 flag1 = None
2396 flag1 = None
2397 flag2 = None
2397 flag2 = None
2398 if f1:
2398 if f1:
2399 content1 = getfilectx(f1, ctx1).data()
2399 content1 = getfilectx(f1, ctx1).data()
2400 if opts.git or losedatafn:
2400 if opts.git or losedatafn:
2401 flag1 = ctx1.flags(f1)
2401 flag1 = ctx1.flags(f1)
2402 if f2:
2402 if f2:
2403 content2 = getfilectx(f2, ctx2).data()
2403 content2 = getfilectx(f2, ctx2).data()
2404 if opts.git or losedatafn:
2404 if opts.git or losedatafn:
2405 flag2 = ctx2.flags(f2)
2405 flag2 = ctx2.flags(f2)
2406 binary = False
2406 binary = False
2407 if opts.git or losedatafn:
2407 if opts.git or losedatafn:
2408 binary = util.binary(content1) or util.binary(content2)
2408 binary = util.binary(content1) or util.binary(content2)
2409
2409
2410 if losedatafn and not opts.git:
2410 if losedatafn and not opts.git:
2411 if (binary or
2411 if (binary or
2412 # copy/rename
2412 # copy/rename
2413 f2 in copy or
2413 f2 in copy or
2414 # empty file creation
2414 # empty file creation
2415 (not f1 and not content2) or
2415 (not f1 and not content2) or
2416 # empty file deletion
2416 # empty file deletion
2417 (not content1 and not f2) or
2417 (not content1 and not f2) or
2418 # create with flags
2418 # create with flags
2419 (not f1 and flag2) or
2419 (not f1 and flag2) or
2420 # change flags
2420 # change flags
2421 (f1 and f2 and flag1 != flag2)):
2421 (f1 and f2 and flag1 != flag2)):
2422 losedatafn(f2 or f1)
2422 losedatafn(f2 or f1)
2423
2423
2424 path1 = f1 or f2
2424 path1 = f1 or f2
2425 path2 = f2 or f1
2425 path2 = f2 or f1
2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2426 path1 = posixpath.join(prefix, path1[len(relroot):])
2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2427 path2 = posixpath.join(prefix, path2[len(relroot):])
2428 header = []
2428 header = []
2429 if opts.git:
2429 if opts.git:
2430 header.append('diff --git %s%s %s%s' %
2430 header.append('diff --git %s%s %s%s' %
2431 (aprefix, path1, bprefix, path2))
2431 (aprefix, path1, bprefix, path2))
2432 if not f1: # added
2432 if not f1: # added
2433 header.append('new file mode %s' % gitmode[flag2])
2433 header.append('new file mode %s' % gitmode[flag2])
2434 elif not f2: # removed
2434 elif not f2: # removed
2435 header.append('deleted file mode %s' % gitmode[flag1])
2435 header.append('deleted file mode %s' % gitmode[flag1])
2436 else: # modified/copied/renamed
2436 else: # modified/copied/renamed
2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2437 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2438 if mode1 != mode2:
2438 if mode1 != mode2:
2439 header.append('old mode %s' % mode1)
2439 header.append('old mode %s' % mode1)
2440 header.append('new mode %s' % mode2)
2440 header.append('new mode %s' % mode2)
2441 if copyop is not None:
2441 if copyop is not None:
2442 header.append('%s from %s' % (copyop, path1))
2442 header.append('%s from %s' % (copyop, path1))
2443 header.append('%s to %s' % (copyop, path2))
2443 header.append('%s to %s' % (copyop, path2))
2444 elif revs and not repo.ui.quiet:
2444 elif revs and not repo.ui.quiet:
2445 header.append(diffline(path1, revs))
2445 header.append(diffline(path1, revs))
2446
2446
2447 if binary and opts.git and not opts.nobinary:
2447 if binary and opts.git and not opts.nobinary:
2448 text = mdiff.b85diff(content1, content2)
2448 text = mdiff.b85diff(content1, content2)
2449 if text:
2449 if text:
2450 header.append('index %s..%s' %
2450 header.append('index %s..%s' %
2451 (gitindex(content1), gitindex(content2)))
2451 (gitindex(content1), gitindex(content2)))
2452 else:
2452 else:
2453 text = mdiff.unidiff(content1, date1,
2453 text = mdiff.unidiff(content1, date1,
2454 content2, date2,
2454 content2, date2,
2455 path1, path2, opts=opts)
2455 path1, path2, opts=opts)
2456 if header and (text or len(header) > 1):
2456 if header and (text or len(header) > 1):
2457 yield '\n'.join(header) + '\n'
2457 yield '\n'.join(header) + '\n'
2458 if text:
2458 if text:
2459 yield text
2459 yield text
2460
2460
2461 def diffstatsum(stats):
2461 def diffstatsum(stats):
2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2462 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2463 for f, a, r, b in stats:
2463 for f, a, r, b in stats:
2464 maxfile = max(maxfile, encoding.colwidth(f))
2464 maxfile = max(maxfile, encoding.colwidth(f))
2465 maxtotal = max(maxtotal, a + r)
2465 maxtotal = max(maxtotal, a + r)
2466 addtotal += a
2466 addtotal += a
2467 removetotal += r
2467 removetotal += r
2468 binary = binary or b
2468 binary = binary or b
2469
2469
2470 return maxfile, maxtotal, addtotal, removetotal, binary
2470 return maxfile, maxtotal, addtotal, removetotal, binary
2471
2471
2472 def diffstatdata(lines):
2472 def diffstatdata(lines):
2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2473 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2474
2474
2475 results = []
2475 results = []
2476 filename, adds, removes, isbinary = None, 0, 0, False
2476 filename, adds, removes, isbinary = None, 0, 0, False
2477
2477
2478 def addresult():
2478 def addresult():
2479 if filename:
2479 if filename:
2480 results.append((filename, adds, removes, isbinary))
2480 results.append((filename, adds, removes, isbinary))
2481
2481
2482 for line in lines:
2482 for line in lines:
2483 if line.startswith('diff'):
2483 if line.startswith('diff'):
2484 addresult()
2484 addresult()
2485 # set numbers to 0 anyway when starting new file
2485 # set numbers to 0 anyway when starting new file
2486 adds, removes, isbinary = 0, 0, False
2486 adds, removes, isbinary = 0, 0, False
2487 if line.startswith('diff --git a/'):
2487 if line.startswith('diff --git a/'):
2488 filename = gitre.search(line).group(2)
2488 filename = gitre.search(line).group(2)
2489 elif line.startswith('diff -r'):
2489 elif line.startswith('diff -r'):
2490 # format: "diff -r ... -r ... filename"
2490 # format: "diff -r ... -r ... filename"
2491 filename = diffre.search(line).group(1)
2491 filename = diffre.search(line).group(1)
2492 elif line.startswith('+') and not line.startswith('+++ '):
2492 elif line.startswith('+') and not line.startswith('+++ '):
2493 adds += 1
2493 adds += 1
2494 elif line.startswith('-') and not line.startswith('--- '):
2494 elif line.startswith('-') and not line.startswith('--- '):
2495 removes += 1
2495 removes += 1
2496 elif (line.startswith('GIT binary patch') or
2496 elif (line.startswith('GIT binary patch') or
2497 line.startswith('Binary file')):
2497 line.startswith('Binary file')):
2498 isbinary = True
2498 isbinary = True
2499 addresult()
2499 addresult()
2500 return results
2500 return results
2501
2501
2502 def diffstat(lines, width=80, git=False):
2502 def diffstat(lines, width=80, git=False):
2503 output = []
2503 output = []
2504 stats = diffstatdata(lines)
2504 stats = diffstatdata(lines)
2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2505 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2506
2506
2507 countwidth = len(str(maxtotal))
2507 countwidth = len(str(maxtotal))
2508 if hasbinary and countwidth < 3:
2508 if hasbinary and countwidth < 3:
2509 countwidth = 3
2509 countwidth = 3
2510 graphwidth = width - countwidth - maxname - 6
2510 graphwidth = width - countwidth - maxname - 6
2511 if graphwidth < 10:
2511 if graphwidth < 10:
2512 graphwidth = 10
2512 graphwidth = 10
2513
2513
2514 def scale(i):
2514 def scale(i):
2515 if maxtotal <= graphwidth:
2515 if maxtotal <= graphwidth:
2516 return i
2516 return i
2517 # If diffstat runs out of room it doesn't print anything,
2517 # If diffstat runs out of room it doesn't print anything,
2518 # which isn't very useful, so always print at least one + or -
2518 # which isn't very useful, so always print at least one + or -
2519 # if there were at least some changes.
2519 # if there were at least some changes.
2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2520 return max(i * graphwidth // maxtotal, int(bool(i)))
2521
2521
2522 for filename, adds, removes, isbinary in stats:
2522 for filename, adds, removes, isbinary in stats:
2523 if isbinary:
2523 if isbinary:
2524 count = 'Bin'
2524 count = 'Bin'
2525 else:
2525 else:
2526 count = adds + removes
2526 count = adds + removes
2527 pluses = '+' * scale(adds)
2527 pluses = '+' * scale(adds)
2528 minuses = '-' * scale(removes)
2528 minuses = '-' * scale(removes)
2529 output.append(' %s%s | %*s %s%s\n' %
2529 output.append(' %s%s | %*s %s%s\n' %
2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2530 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2531 countwidth, count, pluses, minuses))
2531 countwidth, count, pluses, minuses))
2532
2532
2533 if stats:
2533 if stats:
2534 output.append(_(' %d files changed, %d insertions(+), '
2534 output.append(_(' %d files changed, %d insertions(+), '
2535 '%d deletions(-)\n')
2535 '%d deletions(-)\n')
2536 % (len(stats), totaladds, totalremoves))
2536 % (len(stats), totaladds, totalremoves))
2537
2537
2538 return ''.join(output)
2538 return ''.join(output)
2539
2539
2540 def diffstatui(*args, **kw):
2540 def diffstatui(*args, **kw):
2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2541 '''like diffstat(), but yields 2-tuples of (output, label) for
2542 ui.write()
2542 ui.write()
2543 '''
2543 '''
2544
2544
2545 for line in diffstat(*args, **kw).splitlines():
2545 for line in diffstat(*args, **kw).splitlines():
2546 if line and line[-1] in '+-':
2546 if line and line[-1] in '+-':
2547 name, graph = line.rsplit(' ', 1)
2547 name, graph = line.rsplit(' ', 1)
2548 yield (name + ' ', '')
2548 yield (name + ' ', '')
2549 m = re.search(r'\++', graph)
2549 m = re.search(r'\++', graph)
2550 if m:
2550 if m:
2551 yield (m.group(0), 'diffstat.inserted')
2551 yield (m.group(0), 'diffstat.inserted')
2552 m = re.search(r'-+', graph)
2552 m = re.search(r'-+', graph)
2553 if m:
2553 if m:
2554 yield (m.group(0), 'diffstat.deleted')
2554 yield (m.group(0), 'diffstat.deleted')
2555 else:
2555 else:
2556 yield (line, '')
2556 yield (line, '')
2557 yield ('\n', '')
2557 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now