##// END OF EJS Templates
record: add an operation arguments to customize recording ui...
Laurent Charignon -
r25310:c1f5ef76 default
parent child Browse files
Show More
@@ -1,3345 +1,3351 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import crecord as crecordmod
17 import crecord as crecordmod
18 import lock as lockmod
18 import lock as lockmod
19
19
20 def ishunk(x):
20 def ishunk(x):
21 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
21 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
22 return isinstance(x, hunkclasses)
22 return isinstance(x, hunkclasses)
23
23
24 def newandmodified(chunks, originalchunks):
24 def newandmodified(chunks, originalchunks):
25 newlyaddedandmodifiedfiles = set()
25 newlyaddedandmodifiedfiles = set()
26 for chunk in chunks:
26 for chunk in chunks:
27 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
27 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
28 originalchunks:
28 originalchunks:
29 newlyaddedandmodifiedfiles.add(chunk.header.filename())
29 newlyaddedandmodifiedfiles.add(chunk.header.filename())
30 return newlyaddedandmodifiedfiles
30 return newlyaddedandmodifiedfiles
31
31
32 def parsealiases(cmd):
32 def parsealiases(cmd):
33 return cmd.lstrip("^").split("|")
33 return cmd.lstrip("^").split("|")
34
34
35 def setupwrapcolorwrite(ui):
35 def setupwrapcolorwrite(ui):
36 # wrap ui.write so diff output can be labeled/colorized
36 # wrap ui.write so diff output can be labeled/colorized
37 def wrapwrite(orig, *args, **kw):
37 def wrapwrite(orig, *args, **kw):
38 label = kw.pop('label', '')
38 label = kw.pop('label', '')
39 for chunk, l in patch.difflabel(lambda: args):
39 for chunk, l in patch.difflabel(lambda: args):
40 orig(chunk, label=label + l)
40 orig(chunk, label=label + l)
41
41
42 oldwrite = ui.write
42 oldwrite = ui.write
43 def wrap(*args, **kwargs):
43 def wrap(*args, **kwargs):
44 return wrapwrite(oldwrite, *args, **kwargs)
44 return wrapwrite(oldwrite, *args, **kwargs)
45 setattr(ui, 'write', wrap)
45 setattr(ui, 'write', wrap)
46 return oldwrite
46 return oldwrite
47
47
48 def filterchunks(ui, originalhunks, usecurses, testfile):
48 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
49 if usecurses:
49 if usecurses:
50 if testfile:
50 if testfile:
51 recordfn = crecordmod.testdecorator(testfile,
51 recordfn = crecordmod.testdecorator(testfile,
52 crecordmod.testchunkselector)
52 crecordmod.testchunkselector)
53 else:
53 else:
54 recordfn = crecordmod.chunkselector
54 recordfn = crecordmod.chunkselector
55
55
56 return crecordmod.filterpatch(ui, originalhunks, recordfn)
56 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
57
57
58 else:
58 else:
59 return patch.filterpatch(ui, originalhunks)
59 return patch.filterpatch(ui, originalhunks, operation)
60
60
61 def recordfilter(ui, originalhunks):
61 def recordfilter(ui, originalhunks, operation=None):
62 """ Prompts the user to filter the originalhunks and return a list of
63 selected hunks.
64 *operation* is used for ui purposes to indicate the user
65 what kind of filtering they are doing: reverting, commiting, shelving, etc.
66 """
62 usecurses = ui.configbool('experimental', 'crecord', False)
67 usecurses = ui.configbool('experimental', 'crecord', False)
63 testfile = ui.config('experimental', 'crecordtest', None)
68 testfile = ui.config('experimental', 'crecordtest', None)
64 oldwrite = setupwrapcolorwrite(ui)
69 oldwrite = setupwrapcolorwrite(ui)
65 try:
70 try:
66 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
71 newchunks = filterchunks(ui, originalhunks, usecurses, testfile,
72 operation)
67 finally:
73 finally:
68 ui.write = oldwrite
74 ui.write = oldwrite
69 return newchunks
75 return newchunks
70
76
71 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
77 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
72 filterfn, *pats, **opts):
78 filterfn, *pats, **opts):
73 import merge as mergemod
79 import merge as mergemod
74
80
75 if not ui.interactive():
81 if not ui.interactive():
76 raise util.Abort(_('running non-interactively, use %s instead') %
82 raise util.Abort(_('running non-interactively, use %s instead') %
77 cmdsuggest)
83 cmdsuggest)
78
84
79 # make sure username is set before going interactive
85 # make sure username is set before going interactive
80 if not opts.get('user'):
86 if not opts.get('user'):
81 ui.username() # raise exception, username not provided
87 ui.username() # raise exception, username not provided
82
88
83 def recordfunc(ui, repo, message, match, opts):
89 def recordfunc(ui, repo, message, match, opts):
84 """This is generic record driver.
90 """This is generic record driver.
85
91
86 Its job is to interactively filter local changes, and
92 Its job is to interactively filter local changes, and
87 accordingly prepare working directory into a state in which the
93 accordingly prepare working directory into a state in which the
88 job can be delegated to a non-interactive commit command such as
94 job can be delegated to a non-interactive commit command such as
89 'commit' or 'qrefresh'.
95 'commit' or 'qrefresh'.
90
96
91 After the actual job is done by non-interactive command, the
97 After the actual job is done by non-interactive command, the
92 working directory is restored to its original state.
98 working directory is restored to its original state.
93
99
94 In the end we'll record interesting changes, and everything else
100 In the end we'll record interesting changes, and everything else
95 will be left in place, so the user can continue working.
101 will be left in place, so the user can continue working.
96 """
102 """
97
103
98 checkunfinished(repo, commit=True)
104 checkunfinished(repo, commit=True)
99 merge = len(repo[None].parents()) > 1
105 merge = len(repo[None].parents()) > 1
100 if merge:
106 if merge:
101 raise util.Abort(_('cannot partially commit a merge '
107 raise util.Abort(_('cannot partially commit a merge '
102 '(use "hg commit" instead)'))
108 '(use "hg commit" instead)'))
103
109
104 status = repo.status(match=match)
110 status = repo.status(match=match)
105 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
111 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
106 diffopts.nodates = True
112 diffopts.nodates = True
107 diffopts.git = True
113 diffopts.git = True
108 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
114 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
109 originalchunks = patch.parsepatch(originaldiff)
115 originalchunks = patch.parsepatch(originaldiff)
110
116
111 # 1. filter patch, so we have intending-to apply subset of it
117 # 1. filter patch, so we have intending-to apply subset of it
112 try:
118 try:
113 chunks = filterfn(ui, originalchunks)
119 chunks = filterfn(ui, originalchunks)
114 except patch.PatchError, err:
120 except patch.PatchError, err:
115 raise util.Abort(_('error parsing patch: %s') % err)
121 raise util.Abort(_('error parsing patch: %s') % err)
116
122
117 # We need to keep a backup of files that have been newly added and
123 # We need to keep a backup of files that have been newly added and
118 # modified during the recording process because there is a previous
124 # modified during the recording process because there is a previous
119 # version without the edit in the workdir
125 # version without the edit in the workdir
120 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
126 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
121 contenders = set()
127 contenders = set()
122 for h in chunks:
128 for h in chunks:
123 try:
129 try:
124 contenders.update(set(h.files()))
130 contenders.update(set(h.files()))
125 except AttributeError:
131 except AttributeError:
126 pass
132 pass
127
133
128 changed = status.modified + status.added + status.removed
134 changed = status.modified + status.added + status.removed
129 newfiles = [f for f in changed if f in contenders]
135 newfiles = [f for f in changed if f in contenders]
130 if not newfiles:
136 if not newfiles:
131 ui.status(_('no changes to record\n'))
137 ui.status(_('no changes to record\n'))
132 return 0
138 return 0
133
139
134 modified = set(status.modified)
140 modified = set(status.modified)
135
141
136 # 2. backup changed files, so we can restore them in the end
142 # 2. backup changed files, so we can restore them in the end
137
143
138 if backupall:
144 if backupall:
139 tobackup = changed
145 tobackup = changed
140 else:
146 else:
141 tobackup = [f for f in newfiles if f in modified or f in \
147 tobackup = [f for f in newfiles if f in modified or f in \
142 newlyaddedandmodifiedfiles]
148 newlyaddedandmodifiedfiles]
143 backups = {}
149 backups = {}
144 if tobackup:
150 if tobackup:
145 backupdir = repo.join('record-backups')
151 backupdir = repo.join('record-backups')
146 try:
152 try:
147 os.mkdir(backupdir)
153 os.mkdir(backupdir)
148 except OSError, err:
154 except OSError, err:
149 if err.errno != errno.EEXIST:
155 if err.errno != errno.EEXIST:
150 raise
156 raise
151 try:
157 try:
152 # backup continues
158 # backup continues
153 for f in tobackup:
159 for f in tobackup:
154 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
160 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
155 dir=backupdir)
161 dir=backupdir)
156 os.close(fd)
162 os.close(fd)
157 ui.debug('backup %r as %r\n' % (f, tmpname))
163 ui.debug('backup %r as %r\n' % (f, tmpname))
158 util.copyfile(repo.wjoin(f), tmpname)
164 util.copyfile(repo.wjoin(f), tmpname)
159 shutil.copystat(repo.wjoin(f), tmpname)
165 shutil.copystat(repo.wjoin(f), tmpname)
160 backups[f] = tmpname
166 backups[f] = tmpname
161
167
162 fp = cStringIO.StringIO()
168 fp = cStringIO.StringIO()
163 for c in chunks:
169 for c in chunks:
164 fname = c.filename()
170 fname = c.filename()
165 if fname in backups:
171 if fname in backups:
166 c.write(fp)
172 c.write(fp)
167 dopatch = fp.tell()
173 dopatch = fp.tell()
168 fp.seek(0)
174 fp.seek(0)
169
175
170 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
176 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
171 # 3a. apply filtered patch to clean repo (clean)
177 # 3a. apply filtered patch to clean repo (clean)
172 if backups:
178 if backups:
173 # Equivalent to hg.revert
179 # Equivalent to hg.revert
174 choices = lambda key: key in backups
180 choices = lambda key: key in backups
175 mergemod.update(repo, repo.dirstate.p1(),
181 mergemod.update(repo, repo.dirstate.p1(),
176 False, True, choices)
182 False, True, choices)
177
183
178 # 3b. (apply)
184 # 3b. (apply)
179 if dopatch:
185 if dopatch:
180 try:
186 try:
181 ui.debug('applying patch\n')
187 ui.debug('applying patch\n')
182 ui.debug(fp.getvalue())
188 ui.debug(fp.getvalue())
183 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
189 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
184 except patch.PatchError, err:
190 except patch.PatchError, err:
185 raise util.Abort(str(err))
191 raise util.Abort(str(err))
186 del fp
192 del fp
187
193
188 # 4. We prepared working directory according to filtered
194 # 4. We prepared working directory according to filtered
189 # patch. Now is the time to delegate the job to
195 # patch. Now is the time to delegate the job to
190 # commit/qrefresh or the like!
196 # commit/qrefresh or the like!
191
197
192 # Make all of the pathnames absolute.
198 # Make all of the pathnames absolute.
193 newfiles = [repo.wjoin(nf) for nf in newfiles]
199 newfiles = [repo.wjoin(nf) for nf in newfiles]
194 return commitfunc(ui, repo, *newfiles, **opts)
200 return commitfunc(ui, repo, *newfiles, **opts)
195 finally:
201 finally:
196 # 5. finally restore backed-up files
202 # 5. finally restore backed-up files
197 try:
203 try:
198 for realname, tmpname in backups.iteritems():
204 for realname, tmpname in backups.iteritems():
199 ui.debug('restoring %r to %r\n' % (tmpname, realname))
205 ui.debug('restoring %r to %r\n' % (tmpname, realname))
200 util.copyfile(tmpname, repo.wjoin(realname))
206 util.copyfile(tmpname, repo.wjoin(realname))
201 # Our calls to copystat() here and above are a
207 # Our calls to copystat() here and above are a
202 # hack to trick any editors that have f open that
208 # hack to trick any editors that have f open that
203 # we haven't modified them.
209 # we haven't modified them.
204 #
210 #
205 # Also note that this racy as an editor could
211 # Also note that this racy as an editor could
206 # notice the file's mtime before we've finished
212 # notice the file's mtime before we've finished
207 # writing it.
213 # writing it.
208 shutil.copystat(tmpname, repo.wjoin(realname))
214 shutil.copystat(tmpname, repo.wjoin(realname))
209 os.unlink(tmpname)
215 os.unlink(tmpname)
210 if tobackup:
216 if tobackup:
211 os.rmdir(backupdir)
217 os.rmdir(backupdir)
212 except OSError:
218 except OSError:
213 pass
219 pass
214
220
215 return commit(ui, repo, recordfunc, pats, opts)
221 return commit(ui, repo, recordfunc, pats, opts)
216
222
217 def findpossible(cmd, table, strict=False):
223 def findpossible(cmd, table, strict=False):
218 """
224 """
219 Return cmd -> (aliases, command table entry)
225 Return cmd -> (aliases, command table entry)
220 for each matching command.
226 for each matching command.
221 Return debug commands (or their aliases) only if no normal command matches.
227 Return debug commands (or their aliases) only if no normal command matches.
222 """
228 """
223 choice = {}
229 choice = {}
224 debugchoice = {}
230 debugchoice = {}
225
231
226 if cmd in table:
232 if cmd in table:
227 # short-circuit exact matches, "log" alias beats "^log|history"
233 # short-circuit exact matches, "log" alias beats "^log|history"
228 keys = [cmd]
234 keys = [cmd]
229 else:
235 else:
230 keys = table.keys()
236 keys = table.keys()
231
237
232 allcmds = []
238 allcmds = []
233 for e in keys:
239 for e in keys:
234 aliases = parsealiases(e)
240 aliases = parsealiases(e)
235 allcmds.extend(aliases)
241 allcmds.extend(aliases)
236 found = None
242 found = None
237 if cmd in aliases:
243 if cmd in aliases:
238 found = cmd
244 found = cmd
239 elif not strict:
245 elif not strict:
240 for a in aliases:
246 for a in aliases:
241 if a.startswith(cmd):
247 if a.startswith(cmd):
242 found = a
248 found = a
243 break
249 break
244 if found is not None:
250 if found is not None:
245 if aliases[0].startswith("debug") or found.startswith("debug"):
251 if aliases[0].startswith("debug") or found.startswith("debug"):
246 debugchoice[found] = (aliases, table[e])
252 debugchoice[found] = (aliases, table[e])
247 else:
253 else:
248 choice[found] = (aliases, table[e])
254 choice[found] = (aliases, table[e])
249
255
250 if not choice and debugchoice:
256 if not choice and debugchoice:
251 choice = debugchoice
257 choice = debugchoice
252
258
253 return choice, allcmds
259 return choice, allcmds
254
260
255 def findcmd(cmd, table, strict=True):
261 def findcmd(cmd, table, strict=True):
256 """Return (aliases, command table entry) for command string."""
262 """Return (aliases, command table entry) for command string."""
257 choice, allcmds = findpossible(cmd, table, strict)
263 choice, allcmds = findpossible(cmd, table, strict)
258
264
259 if cmd in choice:
265 if cmd in choice:
260 return choice[cmd]
266 return choice[cmd]
261
267
262 if len(choice) > 1:
268 if len(choice) > 1:
263 clist = choice.keys()
269 clist = choice.keys()
264 clist.sort()
270 clist.sort()
265 raise error.AmbiguousCommand(cmd, clist)
271 raise error.AmbiguousCommand(cmd, clist)
266
272
267 if choice:
273 if choice:
268 return choice.values()[0]
274 return choice.values()[0]
269
275
270 raise error.UnknownCommand(cmd, allcmds)
276 raise error.UnknownCommand(cmd, allcmds)
271
277
272 def findrepo(p):
278 def findrepo(p):
273 while not os.path.isdir(os.path.join(p, ".hg")):
279 while not os.path.isdir(os.path.join(p, ".hg")):
274 oldp, p = p, os.path.dirname(p)
280 oldp, p = p, os.path.dirname(p)
275 if p == oldp:
281 if p == oldp:
276 return None
282 return None
277
283
278 return p
284 return p
279
285
280 def bailifchanged(repo, merge=True):
286 def bailifchanged(repo, merge=True):
281 if merge and repo.dirstate.p2() != nullid:
287 if merge and repo.dirstate.p2() != nullid:
282 raise util.Abort(_('outstanding uncommitted merge'))
288 raise util.Abort(_('outstanding uncommitted merge'))
283 modified, added, removed, deleted = repo.status()[:4]
289 modified, added, removed, deleted = repo.status()[:4]
284 if modified or added or removed or deleted:
290 if modified or added or removed or deleted:
285 raise util.Abort(_('uncommitted changes'))
291 raise util.Abort(_('uncommitted changes'))
286 ctx = repo[None]
292 ctx = repo[None]
287 for s in sorted(ctx.substate):
293 for s in sorted(ctx.substate):
288 ctx.sub(s).bailifchanged()
294 ctx.sub(s).bailifchanged()
289
295
290 def logmessage(ui, opts):
296 def logmessage(ui, opts):
291 """ get the log message according to -m and -l option """
297 """ get the log message according to -m and -l option """
292 message = opts.get('message')
298 message = opts.get('message')
293 logfile = opts.get('logfile')
299 logfile = opts.get('logfile')
294
300
295 if message and logfile:
301 if message and logfile:
296 raise util.Abort(_('options --message and --logfile are mutually '
302 raise util.Abort(_('options --message and --logfile are mutually '
297 'exclusive'))
303 'exclusive'))
298 if not message and logfile:
304 if not message and logfile:
299 try:
305 try:
300 if logfile == '-':
306 if logfile == '-':
301 message = ui.fin.read()
307 message = ui.fin.read()
302 else:
308 else:
303 message = '\n'.join(util.readfile(logfile).splitlines())
309 message = '\n'.join(util.readfile(logfile).splitlines())
304 except IOError, inst:
310 except IOError, inst:
305 raise util.Abort(_("can't read commit message '%s': %s") %
311 raise util.Abort(_("can't read commit message '%s': %s") %
306 (logfile, inst.strerror))
312 (logfile, inst.strerror))
307 return message
313 return message
308
314
309 def mergeeditform(ctxorbool, baseformname):
315 def mergeeditform(ctxorbool, baseformname):
310 """return appropriate editform name (referencing a committemplate)
316 """return appropriate editform name (referencing a committemplate)
311
317
312 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
318 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
313 merging is committed.
319 merging is committed.
314
320
315 This returns baseformname with '.merge' appended if it is a merge,
321 This returns baseformname with '.merge' appended if it is a merge,
316 otherwise '.normal' is appended.
322 otherwise '.normal' is appended.
317 """
323 """
318 if isinstance(ctxorbool, bool):
324 if isinstance(ctxorbool, bool):
319 if ctxorbool:
325 if ctxorbool:
320 return baseformname + ".merge"
326 return baseformname + ".merge"
321 elif 1 < len(ctxorbool.parents()):
327 elif 1 < len(ctxorbool.parents()):
322 return baseformname + ".merge"
328 return baseformname + ".merge"
323
329
324 return baseformname + ".normal"
330 return baseformname + ".normal"
325
331
326 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
332 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
327 editform='', **opts):
333 editform='', **opts):
328 """get appropriate commit message editor according to '--edit' option
334 """get appropriate commit message editor according to '--edit' option
329
335
330 'finishdesc' is a function to be called with edited commit message
336 'finishdesc' is a function to be called with edited commit message
331 (= 'description' of the new changeset) just after editing, but
337 (= 'description' of the new changeset) just after editing, but
332 before checking empty-ness. It should return actual text to be
338 before checking empty-ness. It should return actual text to be
333 stored into history. This allows to change description before
339 stored into history. This allows to change description before
334 storing.
340 storing.
335
341
336 'extramsg' is a extra message to be shown in the editor instead of
342 'extramsg' is a extra message to be shown in the editor instead of
337 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
343 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
338 is automatically added.
344 is automatically added.
339
345
340 'editform' is a dot-separated list of names, to distinguish
346 'editform' is a dot-separated list of names, to distinguish
341 the purpose of commit text editing.
347 the purpose of commit text editing.
342
348
343 'getcommiteditor' returns 'commitforceeditor' regardless of
349 'getcommiteditor' returns 'commitforceeditor' regardless of
344 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
350 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
345 they are specific for usage in MQ.
351 they are specific for usage in MQ.
346 """
352 """
347 if edit or finishdesc or extramsg:
353 if edit or finishdesc or extramsg:
348 return lambda r, c, s: commitforceeditor(r, c, s,
354 return lambda r, c, s: commitforceeditor(r, c, s,
349 finishdesc=finishdesc,
355 finishdesc=finishdesc,
350 extramsg=extramsg,
356 extramsg=extramsg,
351 editform=editform)
357 editform=editform)
352 elif editform:
358 elif editform:
353 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
359 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
354 else:
360 else:
355 return commiteditor
361 return commiteditor
356
362
357 def loglimit(opts):
363 def loglimit(opts):
358 """get the log limit according to option -l/--limit"""
364 """get the log limit according to option -l/--limit"""
359 limit = opts.get('limit')
365 limit = opts.get('limit')
360 if limit:
366 if limit:
361 try:
367 try:
362 limit = int(limit)
368 limit = int(limit)
363 except ValueError:
369 except ValueError:
364 raise util.Abort(_('limit must be a positive integer'))
370 raise util.Abort(_('limit must be a positive integer'))
365 if limit <= 0:
371 if limit <= 0:
366 raise util.Abort(_('limit must be positive'))
372 raise util.Abort(_('limit must be positive'))
367 else:
373 else:
368 limit = None
374 limit = None
369 return limit
375 return limit
370
376
371 def makefilename(repo, pat, node, desc=None,
377 def makefilename(repo, pat, node, desc=None,
372 total=None, seqno=None, revwidth=None, pathname=None):
378 total=None, seqno=None, revwidth=None, pathname=None):
373 node_expander = {
379 node_expander = {
374 'H': lambda: hex(node),
380 'H': lambda: hex(node),
375 'R': lambda: str(repo.changelog.rev(node)),
381 'R': lambda: str(repo.changelog.rev(node)),
376 'h': lambda: short(node),
382 'h': lambda: short(node),
377 'm': lambda: re.sub('[^\w]', '_', str(desc))
383 'm': lambda: re.sub('[^\w]', '_', str(desc))
378 }
384 }
379 expander = {
385 expander = {
380 '%': lambda: '%',
386 '%': lambda: '%',
381 'b': lambda: os.path.basename(repo.root),
387 'b': lambda: os.path.basename(repo.root),
382 }
388 }
383
389
384 try:
390 try:
385 if node:
391 if node:
386 expander.update(node_expander)
392 expander.update(node_expander)
387 if node:
393 if node:
388 expander['r'] = (lambda:
394 expander['r'] = (lambda:
389 str(repo.changelog.rev(node)).zfill(revwidth or 0))
395 str(repo.changelog.rev(node)).zfill(revwidth or 0))
390 if total is not None:
396 if total is not None:
391 expander['N'] = lambda: str(total)
397 expander['N'] = lambda: str(total)
392 if seqno is not None:
398 if seqno is not None:
393 expander['n'] = lambda: str(seqno)
399 expander['n'] = lambda: str(seqno)
394 if total is not None and seqno is not None:
400 if total is not None and seqno is not None:
395 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
401 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
396 if pathname is not None:
402 if pathname is not None:
397 expander['s'] = lambda: os.path.basename(pathname)
403 expander['s'] = lambda: os.path.basename(pathname)
398 expander['d'] = lambda: os.path.dirname(pathname) or '.'
404 expander['d'] = lambda: os.path.dirname(pathname) or '.'
399 expander['p'] = lambda: pathname
405 expander['p'] = lambda: pathname
400
406
401 newname = []
407 newname = []
402 patlen = len(pat)
408 patlen = len(pat)
403 i = 0
409 i = 0
404 while i < patlen:
410 while i < patlen:
405 c = pat[i]
411 c = pat[i]
406 if c == '%':
412 if c == '%':
407 i += 1
413 i += 1
408 c = pat[i]
414 c = pat[i]
409 c = expander[c]()
415 c = expander[c]()
410 newname.append(c)
416 newname.append(c)
411 i += 1
417 i += 1
412 return ''.join(newname)
418 return ''.join(newname)
413 except KeyError, inst:
419 except KeyError, inst:
414 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
420 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
415 inst.args[0])
421 inst.args[0])
416
422
417 def makefileobj(repo, pat, node=None, desc=None, total=None,
423 def makefileobj(repo, pat, node=None, desc=None, total=None,
418 seqno=None, revwidth=None, mode='wb', modemap=None,
424 seqno=None, revwidth=None, mode='wb', modemap=None,
419 pathname=None):
425 pathname=None):
420
426
421 writable = mode not in ('r', 'rb')
427 writable = mode not in ('r', 'rb')
422
428
423 if not pat or pat == '-':
429 if not pat or pat == '-':
424 if writable:
430 if writable:
425 fp = repo.ui.fout
431 fp = repo.ui.fout
426 else:
432 else:
427 fp = repo.ui.fin
433 fp = repo.ui.fin
428 if util.safehasattr(fp, 'fileno'):
434 if util.safehasattr(fp, 'fileno'):
429 return os.fdopen(os.dup(fp.fileno()), mode)
435 return os.fdopen(os.dup(fp.fileno()), mode)
430 else:
436 else:
431 # if this fp can't be duped properly, return
437 # if this fp can't be duped properly, return
432 # a dummy object that can be closed
438 # a dummy object that can be closed
433 class wrappedfileobj(object):
439 class wrappedfileobj(object):
434 noop = lambda x: None
440 noop = lambda x: None
435 def __init__(self, f):
441 def __init__(self, f):
436 self.f = f
442 self.f = f
437 def __getattr__(self, attr):
443 def __getattr__(self, attr):
438 if attr == 'close':
444 if attr == 'close':
439 return self.noop
445 return self.noop
440 else:
446 else:
441 return getattr(self.f, attr)
447 return getattr(self.f, attr)
442
448
443 return wrappedfileobj(fp)
449 return wrappedfileobj(fp)
444 if util.safehasattr(pat, 'write') and writable:
450 if util.safehasattr(pat, 'write') and writable:
445 return pat
451 return pat
446 if util.safehasattr(pat, 'read') and 'r' in mode:
452 if util.safehasattr(pat, 'read') and 'r' in mode:
447 return pat
453 return pat
448 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
454 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
449 if modemap is not None:
455 if modemap is not None:
450 mode = modemap.get(fn, mode)
456 mode = modemap.get(fn, mode)
451 if mode == 'wb':
457 if mode == 'wb':
452 modemap[fn] = 'ab'
458 modemap[fn] = 'ab'
453 return open(fn, mode)
459 return open(fn, mode)
454
460
455 def openrevlog(repo, cmd, file_, opts):
461 def openrevlog(repo, cmd, file_, opts):
456 """opens the changelog, manifest, a filelog or a given revlog"""
462 """opens the changelog, manifest, a filelog or a given revlog"""
457 cl = opts['changelog']
463 cl = opts['changelog']
458 mf = opts['manifest']
464 mf = opts['manifest']
459 dir = opts['dir']
465 dir = opts['dir']
460 msg = None
466 msg = None
461 if cl and mf:
467 if cl and mf:
462 msg = _('cannot specify --changelog and --manifest at the same time')
468 msg = _('cannot specify --changelog and --manifest at the same time')
463 elif cl and dir:
469 elif cl and dir:
464 msg = _('cannot specify --changelog and --dir at the same time')
470 msg = _('cannot specify --changelog and --dir at the same time')
465 elif cl or mf:
471 elif cl or mf:
466 if file_:
472 if file_:
467 msg = _('cannot specify filename with --changelog or --manifest')
473 msg = _('cannot specify filename with --changelog or --manifest')
468 elif not repo:
474 elif not repo:
469 msg = _('cannot specify --changelog or --manifest or --dir '
475 msg = _('cannot specify --changelog or --manifest or --dir '
470 'without a repository')
476 'without a repository')
471 if msg:
477 if msg:
472 raise util.Abort(msg)
478 raise util.Abort(msg)
473
479
474 r = None
480 r = None
475 if repo:
481 if repo:
476 if cl:
482 if cl:
477 r = repo.unfiltered().changelog
483 r = repo.unfiltered().changelog
478 elif dir:
484 elif dir:
479 if 'treemanifest' not in repo.requirements:
485 if 'treemanifest' not in repo.requirements:
480 raise util.Abort(_("--dir can only be used on repos with "
486 raise util.Abort(_("--dir can only be used on repos with "
481 "treemanifest enabled"))
487 "treemanifest enabled"))
482 dirlog = repo.dirlog(file_)
488 dirlog = repo.dirlog(file_)
483 if len(dirlog):
489 if len(dirlog):
484 r = dirlog
490 r = dirlog
485 elif mf:
491 elif mf:
486 r = repo.manifest
492 r = repo.manifest
487 elif file_:
493 elif file_:
488 filelog = repo.file(file_)
494 filelog = repo.file(file_)
489 if len(filelog):
495 if len(filelog):
490 r = filelog
496 r = filelog
491 if not r:
497 if not r:
492 if not file_:
498 if not file_:
493 raise error.CommandError(cmd, _('invalid arguments'))
499 raise error.CommandError(cmd, _('invalid arguments'))
494 if not os.path.isfile(file_):
500 if not os.path.isfile(file_):
495 raise util.Abort(_("revlog '%s' not found") % file_)
501 raise util.Abort(_("revlog '%s' not found") % file_)
496 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
502 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
497 file_[:-2] + ".i")
503 file_[:-2] + ".i")
498 return r
504 return r
499
505
500 def copy(ui, repo, pats, opts, rename=False):
506 def copy(ui, repo, pats, opts, rename=False):
501 # called with the repo lock held
507 # called with the repo lock held
502 #
508 #
503 # hgsep => pathname that uses "/" to separate directories
509 # hgsep => pathname that uses "/" to separate directories
504 # ossep => pathname that uses os.sep to separate directories
510 # ossep => pathname that uses os.sep to separate directories
505 cwd = repo.getcwd()
511 cwd = repo.getcwd()
506 targets = {}
512 targets = {}
507 after = opts.get("after")
513 after = opts.get("after")
508 dryrun = opts.get("dry_run")
514 dryrun = opts.get("dry_run")
509 wctx = repo[None]
515 wctx = repo[None]
510
516
511 def walkpat(pat):
517 def walkpat(pat):
512 srcs = []
518 srcs = []
513 if after:
519 if after:
514 badstates = '?'
520 badstates = '?'
515 else:
521 else:
516 badstates = '?r'
522 badstates = '?r'
517 m = scmutil.match(repo[None], [pat], opts, globbed=True)
523 m = scmutil.match(repo[None], [pat], opts, globbed=True)
518 for abs in repo.walk(m):
524 for abs in repo.walk(m):
519 state = repo.dirstate[abs]
525 state = repo.dirstate[abs]
520 rel = m.rel(abs)
526 rel = m.rel(abs)
521 exact = m.exact(abs)
527 exact = m.exact(abs)
522 if state in badstates:
528 if state in badstates:
523 if exact and state == '?':
529 if exact and state == '?':
524 ui.warn(_('%s: not copying - file is not managed\n') % rel)
530 ui.warn(_('%s: not copying - file is not managed\n') % rel)
525 if exact and state == 'r':
531 if exact and state == 'r':
526 ui.warn(_('%s: not copying - file has been marked for'
532 ui.warn(_('%s: not copying - file has been marked for'
527 ' remove\n') % rel)
533 ' remove\n') % rel)
528 continue
534 continue
529 # abs: hgsep
535 # abs: hgsep
530 # rel: ossep
536 # rel: ossep
531 srcs.append((abs, rel, exact))
537 srcs.append((abs, rel, exact))
532 return srcs
538 return srcs
533
539
534 # abssrc: hgsep
540 # abssrc: hgsep
535 # relsrc: ossep
541 # relsrc: ossep
536 # otarget: ossep
542 # otarget: ossep
537 def copyfile(abssrc, relsrc, otarget, exact):
543 def copyfile(abssrc, relsrc, otarget, exact):
538 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
544 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
539 if '/' in abstarget:
545 if '/' in abstarget:
540 # We cannot normalize abstarget itself, this would prevent
546 # We cannot normalize abstarget itself, this would prevent
541 # case only renames, like a => A.
547 # case only renames, like a => A.
542 abspath, absname = abstarget.rsplit('/', 1)
548 abspath, absname = abstarget.rsplit('/', 1)
543 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
549 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
544 reltarget = repo.pathto(abstarget, cwd)
550 reltarget = repo.pathto(abstarget, cwd)
545 target = repo.wjoin(abstarget)
551 target = repo.wjoin(abstarget)
546 src = repo.wjoin(abssrc)
552 src = repo.wjoin(abssrc)
547 state = repo.dirstate[abstarget]
553 state = repo.dirstate[abstarget]
548
554
549 scmutil.checkportable(ui, abstarget)
555 scmutil.checkportable(ui, abstarget)
550
556
551 # check for collisions
557 # check for collisions
552 prevsrc = targets.get(abstarget)
558 prevsrc = targets.get(abstarget)
553 if prevsrc is not None:
559 if prevsrc is not None:
554 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
560 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
555 (reltarget, repo.pathto(abssrc, cwd),
561 (reltarget, repo.pathto(abssrc, cwd),
556 repo.pathto(prevsrc, cwd)))
562 repo.pathto(prevsrc, cwd)))
557 return
563 return
558
564
559 # check for overwrites
565 # check for overwrites
560 exists = os.path.lexists(target)
566 exists = os.path.lexists(target)
561 samefile = False
567 samefile = False
562 if exists and abssrc != abstarget:
568 if exists and abssrc != abstarget:
563 if (repo.dirstate.normalize(abssrc) ==
569 if (repo.dirstate.normalize(abssrc) ==
564 repo.dirstate.normalize(abstarget)):
570 repo.dirstate.normalize(abstarget)):
565 if not rename:
571 if not rename:
566 ui.warn(_("%s: can't copy - same file\n") % reltarget)
572 ui.warn(_("%s: can't copy - same file\n") % reltarget)
567 return
573 return
568 exists = False
574 exists = False
569 samefile = True
575 samefile = True
570
576
571 if not after and exists or after and state in 'mn':
577 if not after and exists or after and state in 'mn':
572 if not opts['force']:
578 if not opts['force']:
573 ui.warn(_('%s: not overwriting - file exists\n') %
579 ui.warn(_('%s: not overwriting - file exists\n') %
574 reltarget)
580 reltarget)
575 return
581 return
576
582
577 if after:
583 if after:
578 if not exists:
584 if not exists:
579 if rename:
585 if rename:
580 ui.warn(_('%s: not recording move - %s does not exist\n') %
586 ui.warn(_('%s: not recording move - %s does not exist\n') %
581 (relsrc, reltarget))
587 (relsrc, reltarget))
582 else:
588 else:
583 ui.warn(_('%s: not recording copy - %s does not exist\n') %
589 ui.warn(_('%s: not recording copy - %s does not exist\n') %
584 (relsrc, reltarget))
590 (relsrc, reltarget))
585 return
591 return
586 elif not dryrun:
592 elif not dryrun:
587 try:
593 try:
588 if exists:
594 if exists:
589 os.unlink(target)
595 os.unlink(target)
590 targetdir = os.path.dirname(target) or '.'
596 targetdir = os.path.dirname(target) or '.'
591 if not os.path.isdir(targetdir):
597 if not os.path.isdir(targetdir):
592 os.makedirs(targetdir)
598 os.makedirs(targetdir)
593 if samefile:
599 if samefile:
594 tmp = target + "~hgrename"
600 tmp = target + "~hgrename"
595 os.rename(src, tmp)
601 os.rename(src, tmp)
596 os.rename(tmp, target)
602 os.rename(tmp, target)
597 else:
603 else:
598 util.copyfile(src, target)
604 util.copyfile(src, target)
599 srcexists = True
605 srcexists = True
600 except IOError, inst:
606 except IOError, inst:
601 if inst.errno == errno.ENOENT:
607 if inst.errno == errno.ENOENT:
602 ui.warn(_('%s: deleted in working directory\n') % relsrc)
608 ui.warn(_('%s: deleted in working directory\n') % relsrc)
603 srcexists = False
609 srcexists = False
604 else:
610 else:
605 ui.warn(_('%s: cannot copy - %s\n') %
611 ui.warn(_('%s: cannot copy - %s\n') %
606 (relsrc, inst.strerror))
612 (relsrc, inst.strerror))
607 return True # report a failure
613 return True # report a failure
608
614
609 if ui.verbose or not exact:
615 if ui.verbose or not exact:
610 if rename:
616 if rename:
611 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
617 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
612 else:
618 else:
613 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
619 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
614
620
615 targets[abstarget] = abssrc
621 targets[abstarget] = abssrc
616
622
617 # fix up dirstate
623 # fix up dirstate
618 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
624 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
619 dryrun=dryrun, cwd=cwd)
625 dryrun=dryrun, cwd=cwd)
620 if rename and not dryrun:
626 if rename and not dryrun:
621 if not after and srcexists and not samefile:
627 if not after and srcexists and not samefile:
622 util.unlinkpath(repo.wjoin(abssrc))
628 util.unlinkpath(repo.wjoin(abssrc))
623 wctx.forget([abssrc])
629 wctx.forget([abssrc])
624
630
625 # pat: ossep
631 # pat: ossep
626 # dest ossep
632 # dest ossep
627 # srcs: list of (hgsep, hgsep, ossep, bool)
633 # srcs: list of (hgsep, hgsep, ossep, bool)
628 # return: function that takes hgsep and returns ossep
634 # return: function that takes hgsep and returns ossep
629 def targetpathfn(pat, dest, srcs):
635 def targetpathfn(pat, dest, srcs):
630 if os.path.isdir(pat):
636 if os.path.isdir(pat):
631 abspfx = pathutil.canonpath(repo.root, cwd, pat)
637 abspfx = pathutil.canonpath(repo.root, cwd, pat)
632 abspfx = util.localpath(abspfx)
638 abspfx = util.localpath(abspfx)
633 if destdirexists:
639 if destdirexists:
634 striplen = len(os.path.split(abspfx)[0])
640 striplen = len(os.path.split(abspfx)[0])
635 else:
641 else:
636 striplen = len(abspfx)
642 striplen = len(abspfx)
637 if striplen:
643 if striplen:
638 striplen += len(os.sep)
644 striplen += len(os.sep)
639 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
645 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
640 elif destdirexists:
646 elif destdirexists:
641 res = lambda p: os.path.join(dest,
647 res = lambda p: os.path.join(dest,
642 os.path.basename(util.localpath(p)))
648 os.path.basename(util.localpath(p)))
643 else:
649 else:
644 res = lambda p: dest
650 res = lambda p: dest
645 return res
651 return res
646
652
647 # pat: ossep
653 # pat: ossep
648 # dest ossep
654 # dest ossep
649 # srcs: list of (hgsep, hgsep, ossep, bool)
655 # srcs: list of (hgsep, hgsep, ossep, bool)
650 # return: function that takes hgsep and returns ossep
656 # return: function that takes hgsep and returns ossep
651 def targetpathafterfn(pat, dest, srcs):
657 def targetpathafterfn(pat, dest, srcs):
652 if matchmod.patkind(pat):
658 if matchmod.patkind(pat):
653 # a mercurial pattern
659 # a mercurial pattern
654 res = lambda p: os.path.join(dest,
660 res = lambda p: os.path.join(dest,
655 os.path.basename(util.localpath(p)))
661 os.path.basename(util.localpath(p)))
656 else:
662 else:
657 abspfx = pathutil.canonpath(repo.root, cwd, pat)
663 abspfx = pathutil.canonpath(repo.root, cwd, pat)
658 if len(abspfx) < len(srcs[0][0]):
664 if len(abspfx) < len(srcs[0][0]):
659 # A directory. Either the target path contains the last
665 # A directory. Either the target path contains the last
660 # component of the source path or it does not.
666 # component of the source path or it does not.
661 def evalpath(striplen):
667 def evalpath(striplen):
662 score = 0
668 score = 0
663 for s in srcs:
669 for s in srcs:
664 t = os.path.join(dest, util.localpath(s[0])[striplen:])
670 t = os.path.join(dest, util.localpath(s[0])[striplen:])
665 if os.path.lexists(t):
671 if os.path.lexists(t):
666 score += 1
672 score += 1
667 return score
673 return score
668
674
669 abspfx = util.localpath(abspfx)
675 abspfx = util.localpath(abspfx)
670 striplen = len(abspfx)
676 striplen = len(abspfx)
671 if striplen:
677 if striplen:
672 striplen += len(os.sep)
678 striplen += len(os.sep)
673 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
679 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
674 score = evalpath(striplen)
680 score = evalpath(striplen)
675 striplen1 = len(os.path.split(abspfx)[0])
681 striplen1 = len(os.path.split(abspfx)[0])
676 if striplen1:
682 if striplen1:
677 striplen1 += len(os.sep)
683 striplen1 += len(os.sep)
678 if evalpath(striplen1) > score:
684 if evalpath(striplen1) > score:
679 striplen = striplen1
685 striplen = striplen1
680 res = lambda p: os.path.join(dest,
686 res = lambda p: os.path.join(dest,
681 util.localpath(p)[striplen:])
687 util.localpath(p)[striplen:])
682 else:
688 else:
683 # a file
689 # a file
684 if destdirexists:
690 if destdirexists:
685 res = lambda p: os.path.join(dest,
691 res = lambda p: os.path.join(dest,
686 os.path.basename(util.localpath(p)))
692 os.path.basename(util.localpath(p)))
687 else:
693 else:
688 res = lambda p: dest
694 res = lambda p: dest
689 return res
695 return res
690
696
691 pats = scmutil.expandpats(pats)
697 pats = scmutil.expandpats(pats)
692 if not pats:
698 if not pats:
693 raise util.Abort(_('no source or destination specified'))
699 raise util.Abort(_('no source or destination specified'))
694 if len(pats) == 1:
700 if len(pats) == 1:
695 raise util.Abort(_('no destination specified'))
701 raise util.Abort(_('no destination specified'))
696 dest = pats.pop()
702 dest = pats.pop()
697 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
703 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
698 if not destdirexists:
704 if not destdirexists:
699 if len(pats) > 1 or matchmod.patkind(pats[0]):
705 if len(pats) > 1 or matchmod.patkind(pats[0]):
700 raise util.Abort(_('with multiple sources, destination must be an '
706 raise util.Abort(_('with multiple sources, destination must be an '
701 'existing directory'))
707 'existing directory'))
702 if util.endswithsep(dest):
708 if util.endswithsep(dest):
703 raise util.Abort(_('destination %s is not a directory') % dest)
709 raise util.Abort(_('destination %s is not a directory') % dest)
704
710
705 tfn = targetpathfn
711 tfn = targetpathfn
706 if after:
712 if after:
707 tfn = targetpathafterfn
713 tfn = targetpathafterfn
708 copylist = []
714 copylist = []
709 for pat in pats:
715 for pat in pats:
710 srcs = walkpat(pat)
716 srcs = walkpat(pat)
711 if not srcs:
717 if not srcs:
712 continue
718 continue
713 copylist.append((tfn(pat, dest, srcs), srcs))
719 copylist.append((tfn(pat, dest, srcs), srcs))
714 if not copylist:
720 if not copylist:
715 raise util.Abort(_('no files to copy'))
721 raise util.Abort(_('no files to copy'))
716
722
717 errors = 0
723 errors = 0
718 for targetpath, srcs in copylist:
724 for targetpath, srcs in copylist:
719 for abssrc, relsrc, exact in srcs:
725 for abssrc, relsrc, exact in srcs:
720 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
726 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
721 errors += 1
727 errors += 1
722
728
723 if errors:
729 if errors:
724 ui.warn(_('(consider using --after)\n'))
730 ui.warn(_('(consider using --after)\n'))
725
731
726 return errors != 0
732 return errors != 0
727
733
728 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
734 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
729 runargs=None, appendpid=False):
735 runargs=None, appendpid=False):
730 '''Run a command as a service.'''
736 '''Run a command as a service.'''
731
737
732 def writepid(pid):
738 def writepid(pid):
733 if opts['pid_file']:
739 if opts['pid_file']:
734 if appendpid:
740 if appendpid:
735 mode = 'a'
741 mode = 'a'
736 else:
742 else:
737 mode = 'w'
743 mode = 'w'
738 fp = open(opts['pid_file'], mode)
744 fp = open(opts['pid_file'], mode)
739 fp.write(str(pid) + '\n')
745 fp.write(str(pid) + '\n')
740 fp.close()
746 fp.close()
741
747
742 if opts['daemon'] and not opts['daemon_pipefds']:
748 if opts['daemon'] and not opts['daemon_pipefds']:
743 # Signal child process startup with file removal
749 # Signal child process startup with file removal
744 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
750 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
745 os.close(lockfd)
751 os.close(lockfd)
746 try:
752 try:
747 if not runargs:
753 if not runargs:
748 runargs = util.hgcmd() + sys.argv[1:]
754 runargs = util.hgcmd() + sys.argv[1:]
749 runargs.append('--daemon-pipefds=%s' % lockpath)
755 runargs.append('--daemon-pipefds=%s' % lockpath)
750 # Don't pass --cwd to the child process, because we've already
756 # Don't pass --cwd to the child process, because we've already
751 # changed directory.
757 # changed directory.
752 for i in xrange(1, len(runargs)):
758 for i in xrange(1, len(runargs)):
753 if runargs[i].startswith('--cwd='):
759 if runargs[i].startswith('--cwd='):
754 del runargs[i]
760 del runargs[i]
755 break
761 break
756 elif runargs[i].startswith('--cwd'):
762 elif runargs[i].startswith('--cwd'):
757 del runargs[i:i + 2]
763 del runargs[i:i + 2]
758 break
764 break
759 def condfn():
765 def condfn():
760 return not os.path.exists(lockpath)
766 return not os.path.exists(lockpath)
761 pid = util.rundetached(runargs, condfn)
767 pid = util.rundetached(runargs, condfn)
762 if pid < 0:
768 if pid < 0:
763 raise util.Abort(_('child process failed to start'))
769 raise util.Abort(_('child process failed to start'))
764 writepid(pid)
770 writepid(pid)
765 finally:
771 finally:
766 try:
772 try:
767 os.unlink(lockpath)
773 os.unlink(lockpath)
768 except OSError, e:
774 except OSError, e:
769 if e.errno != errno.ENOENT:
775 if e.errno != errno.ENOENT:
770 raise
776 raise
771 if parentfn:
777 if parentfn:
772 return parentfn(pid)
778 return parentfn(pid)
773 else:
779 else:
774 return
780 return
775
781
776 if initfn:
782 if initfn:
777 initfn()
783 initfn()
778
784
779 if not opts['daemon']:
785 if not opts['daemon']:
780 writepid(os.getpid())
786 writepid(os.getpid())
781
787
782 if opts['daemon_pipefds']:
788 if opts['daemon_pipefds']:
783 lockpath = opts['daemon_pipefds']
789 lockpath = opts['daemon_pipefds']
784 try:
790 try:
785 os.setsid()
791 os.setsid()
786 except AttributeError:
792 except AttributeError:
787 pass
793 pass
788 os.unlink(lockpath)
794 os.unlink(lockpath)
789 util.hidewindow()
795 util.hidewindow()
790 sys.stdout.flush()
796 sys.stdout.flush()
791 sys.stderr.flush()
797 sys.stderr.flush()
792
798
793 nullfd = os.open(os.devnull, os.O_RDWR)
799 nullfd = os.open(os.devnull, os.O_RDWR)
794 logfilefd = nullfd
800 logfilefd = nullfd
795 if logfile:
801 if logfile:
796 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
802 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
797 os.dup2(nullfd, 0)
803 os.dup2(nullfd, 0)
798 os.dup2(logfilefd, 1)
804 os.dup2(logfilefd, 1)
799 os.dup2(logfilefd, 2)
805 os.dup2(logfilefd, 2)
800 if nullfd not in (0, 1, 2):
806 if nullfd not in (0, 1, 2):
801 os.close(nullfd)
807 os.close(nullfd)
802 if logfile and logfilefd not in (0, 1, 2):
808 if logfile and logfilefd not in (0, 1, 2):
803 os.close(logfilefd)
809 os.close(logfilefd)
804
810
805 if runfn:
811 if runfn:
806 return runfn()
812 return runfn()
807
813
808 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
814 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
809 """Utility function used by commands.import to import a single patch
815 """Utility function used by commands.import to import a single patch
810
816
811 This function is explicitly defined here to help the evolve extension to
817 This function is explicitly defined here to help the evolve extension to
812 wrap this part of the import logic.
818 wrap this part of the import logic.
813
819
814 The API is currently a bit ugly because it a simple code translation from
820 The API is currently a bit ugly because it a simple code translation from
815 the import command. Feel free to make it better.
821 the import command. Feel free to make it better.
816
822
817 :hunk: a patch (as a binary string)
823 :hunk: a patch (as a binary string)
818 :parents: nodes that will be parent of the created commit
824 :parents: nodes that will be parent of the created commit
819 :opts: the full dict of option passed to the import command
825 :opts: the full dict of option passed to the import command
820 :msgs: list to save commit message to.
826 :msgs: list to save commit message to.
821 (used in case we need to save it when failing)
827 (used in case we need to save it when failing)
822 :updatefunc: a function that update a repo to a given node
828 :updatefunc: a function that update a repo to a given node
823 updatefunc(<repo>, <node>)
829 updatefunc(<repo>, <node>)
824 """
830 """
825 tmpname, message, user, date, branch, nodeid, p1, p2 = \
831 tmpname, message, user, date, branch, nodeid, p1, p2 = \
826 patch.extract(ui, hunk)
832 patch.extract(ui, hunk)
827
833
828 update = not opts.get('bypass')
834 update = not opts.get('bypass')
829 strip = opts["strip"]
835 strip = opts["strip"]
830 prefix = opts["prefix"]
836 prefix = opts["prefix"]
831 sim = float(opts.get('similarity') or 0)
837 sim = float(opts.get('similarity') or 0)
832 if not tmpname:
838 if not tmpname:
833 return (None, None, False)
839 return (None, None, False)
834 msg = _('applied to working directory')
840 msg = _('applied to working directory')
835
841
836 rejects = False
842 rejects = False
837 dsguard = None
843 dsguard = None
838
844
839 try:
845 try:
840 cmdline_message = logmessage(ui, opts)
846 cmdline_message = logmessage(ui, opts)
841 if cmdline_message:
847 if cmdline_message:
842 # pickup the cmdline msg
848 # pickup the cmdline msg
843 message = cmdline_message
849 message = cmdline_message
844 elif message:
850 elif message:
845 # pickup the patch msg
851 # pickup the patch msg
846 message = message.strip()
852 message = message.strip()
847 else:
853 else:
848 # launch the editor
854 # launch the editor
849 message = None
855 message = None
850 ui.debug('message:\n%s\n' % message)
856 ui.debug('message:\n%s\n' % message)
851
857
852 if len(parents) == 1:
858 if len(parents) == 1:
853 parents.append(repo[nullid])
859 parents.append(repo[nullid])
854 if opts.get('exact'):
860 if opts.get('exact'):
855 if not nodeid or not p1:
861 if not nodeid or not p1:
856 raise util.Abort(_('not a Mercurial patch'))
862 raise util.Abort(_('not a Mercurial patch'))
857 p1 = repo[p1]
863 p1 = repo[p1]
858 p2 = repo[p2 or nullid]
864 p2 = repo[p2 or nullid]
859 elif p2:
865 elif p2:
860 try:
866 try:
861 p1 = repo[p1]
867 p1 = repo[p1]
862 p2 = repo[p2]
868 p2 = repo[p2]
863 # Without any options, consider p2 only if the
869 # Without any options, consider p2 only if the
864 # patch is being applied on top of the recorded
870 # patch is being applied on top of the recorded
865 # first parent.
871 # first parent.
866 if p1 != parents[0]:
872 if p1 != parents[0]:
867 p1 = parents[0]
873 p1 = parents[0]
868 p2 = repo[nullid]
874 p2 = repo[nullid]
869 except error.RepoError:
875 except error.RepoError:
870 p1, p2 = parents
876 p1, p2 = parents
871 if p2.node() == nullid:
877 if p2.node() == nullid:
872 ui.warn(_("warning: import the patch as a normal revision\n"
878 ui.warn(_("warning: import the patch as a normal revision\n"
873 "(use --exact to import the patch as a merge)\n"))
879 "(use --exact to import the patch as a merge)\n"))
874 else:
880 else:
875 p1, p2 = parents
881 p1, p2 = parents
876
882
877 n = None
883 n = None
878 if update:
884 if update:
879 dsguard = dirstateguard(repo, 'tryimportone')
885 dsguard = dirstateguard(repo, 'tryimportone')
880 if p1 != parents[0]:
886 if p1 != parents[0]:
881 updatefunc(repo, p1.node())
887 updatefunc(repo, p1.node())
882 if p2 != parents[1]:
888 if p2 != parents[1]:
883 repo.setparents(p1.node(), p2.node())
889 repo.setparents(p1.node(), p2.node())
884
890
885 if opts.get('exact') or opts.get('import_branch'):
891 if opts.get('exact') or opts.get('import_branch'):
886 repo.dirstate.setbranch(branch or 'default')
892 repo.dirstate.setbranch(branch or 'default')
887
893
888 partial = opts.get('partial', False)
894 partial = opts.get('partial', False)
889 files = set()
895 files = set()
890 try:
896 try:
891 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
897 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
892 files=files, eolmode=None, similarity=sim / 100.0)
898 files=files, eolmode=None, similarity=sim / 100.0)
893 except patch.PatchError, e:
899 except patch.PatchError, e:
894 if not partial:
900 if not partial:
895 raise util.Abort(str(e))
901 raise util.Abort(str(e))
896 if partial:
902 if partial:
897 rejects = True
903 rejects = True
898
904
899 files = list(files)
905 files = list(files)
900 if opts.get('no_commit'):
906 if opts.get('no_commit'):
901 if message:
907 if message:
902 msgs.append(message)
908 msgs.append(message)
903 else:
909 else:
904 if opts.get('exact') or p2:
910 if opts.get('exact') or p2:
905 # If you got here, you either use --force and know what
911 # If you got here, you either use --force and know what
906 # you are doing or used --exact or a merge patch while
912 # you are doing or used --exact or a merge patch while
907 # being updated to its first parent.
913 # being updated to its first parent.
908 m = None
914 m = None
909 else:
915 else:
910 m = scmutil.matchfiles(repo, files or [])
916 m = scmutil.matchfiles(repo, files or [])
911 editform = mergeeditform(repo[None], 'import.normal')
917 editform = mergeeditform(repo[None], 'import.normal')
912 if opts.get('exact'):
918 if opts.get('exact'):
913 editor = None
919 editor = None
914 else:
920 else:
915 editor = getcommiteditor(editform=editform, **opts)
921 editor = getcommiteditor(editform=editform, **opts)
916 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
922 allowemptyback = repo.ui.backupconfig('ui', 'allowemptycommit')
917 try:
923 try:
918 if partial:
924 if partial:
919 repo.ui.setconfig('ui', 'allowemptycommit', True)
925 repo.ui.setconfig('ui', 'allowemptycommit', True)
920 n = repo.commit(message, opts.get('user') or user,
926 n = repo.commit(message, opts.get('user') or user,
921 opts.get('date') or date, match=m,
927 opts.get('date') or date, match=m,
922 editor=editor)
928 editor=editor)
923 finally:
929 finally:
924 repo.ui.restoreconfig(allowemptyback)
930 repo.ui.restoreconfig(allowemptyback)
925 dsguard.close()
931 dsguard.close()
926 else:
932 else:
927 if opts.get('exact') or opts.get('import_branch'):
933 if opts.get('exact') or opts.get('import_branch'):
928 branch = branch or 'default'
934 branch = branch or 'default'
929 else:
935 else:
930 branch = p1.branch()
936 branch = p1.branch()
931 store = patch.filestore()
937 store = patch.filestore()
932 try:
938 try:
933 files = set()
939 files = set()
934 try:
940 try:
935 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
941 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
936 files, eolmode=None)
942 files, eolmode=None)
937 except patch.PatchError, e:
943 except patch.PatchError, e:
938 raise util.Abort(str(e))
944 raise util.Abort(str(e))
939 if opts.get('exact'):
945 if opts.get('exact'):
940 editor = None
946 editor = None
941 else:
947 else:
942 editor = getcommiteditor(editform='import.bypass')
948 editor = getcommiteditor(editform='import.bypass')
943 memctx = context.makememctx(repo, (p1.node(), p2.node()),
949 memctx = context.makememctx(repo, (p1.node(), p2.node()),
944 message,
950 message,
945 opts.get('user') or user,
951 opts.get('user') or user,
946 opts.get('date') or date,
952 opts.get('date') or date,
947 branch, files, store,
953 branch, files, store,
948 editor=editor)
954 editor=editor)
949 n = memctx.commit()
955 n = memctx.commit()
950 finally:
956 finally:
951 store.close()
957 store.close()
952 if opts.get('exact') and opts.get('no_commit'):
958 if opts.get('exact') and opts.get('no_commit'):
953 # --exact with --no-commit is still useful in that it does merge
959 # --exact with --no-commit is still useful in that it does merge
954 # and branch bits
960 # and branch bits
955 ui.warn(_("warning: can't check exact import with --no-commit\n"))
961 ui.warn(_("warning: can't check exact import with --no-commit\n"))
956 elif opts.get('exact') and hex(n) != nodeid:
962 elif opts.get('exact') and hex(n) != nodeid:
957 raise util.Abort(_('patch is damaged or loses information'))
963 raise util.Abort(_('patch is damaged or loses information'))
958 if n:
964 if n:
959 # i18n: refers to a short changeset id
965 # i18n: refers to a short changeset id
960 msg = _('created %s') % short(n)
966 msg = _('created %s') % short(n)
961 return (msg, n, rejects)
967 return (msg, n, rejects)
962 finally:
968 finally:
963 lockmod.release(dsguard)
969 lockmod.release(dsguard)
964 os.unlink(tmpname)
970 os.unlink(tmpname)
965
971
966 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
972 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
967 opts=None):
973 opts=None):
968 '''export changesets as hg patches.'''
974 '''export changesets as hg patches.'''
969
975
970 total = len(revs)
976 total = len(revs)
971 revwidth = max([len(str(rev)) for rev in revs])
977 revwidth = max([len(str(rev)) for rev in revs])
972 filemode = {}
978 filemode = {}
973
979
974 def single(rev, seqno, fp):
980 def single(rev, seqno, fp):
975 ctx = repo[rev]
981 ctx = repo[rev]
976 node = ctx.node()
982 node = ctx.node()
977 parents = [p.node() for p in ctx.parents() if p]
983 parents = [p.node() for p in ctx.parents() if p]
978 branch = ctx.branch()
984 branch = ctx.branch()
979 if switch_parent:
985 if switch_parent:
980 parents.reverse()
986 parents.reverse()
981
987
982 if parents:
988 if parents:
983 prev = parents[0]
989 prev = parents[0]
984 else:
990 else:
985 prev = nullid
991 prev = nullid
986
992
987 shouldclose = False
993 shouldclose = False
988 if not fp and len(template) > 0:
994 if not fp and len(template) > 0:
989 desc_lines = ctx.description().rstrip().split('\n')
995 desc_lines = ctx.description().rstrip().split('\n')
990 desc = desc_lines[0] #Commit always has a first line.
996 desc = desc_lines[0] #Commit always has a first line.
991 fp = makefileobj(repo, template, node, desc=desc, total=total,
997 fp = makefileobj(repo, template, node, desc=desc, total=total,
992 seqno=seqno, revwidth=revwidth, mode='wb',
998 seqno=seqno, revwidth=revwidth, mode='wb',
993 modemap=filemode)
999 modemap=filemode)
994 if fp != template:
1000 if fp != template:
995 shouldclose = True
1001 shouldclose = True
996 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
1002 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
997 repo.ui.note("%s\n" % fp.name)
1003 repo.ui.note("%s\n" % fp.name)
998
1004
999 if not fp:
1005 if not fp:
1000 write = repo.ui.write
1006 write = repo.ui.write
1001 else:
1007 else:
1002 def write(s, **kw):
1008 def write(s, **kw):
1003 fp.write(s)
1009 fp.write(s)
1004
1010
1005 write("# HG changeset patch\n")
1011 write("# HG changeset patch\n")
1006 write("# User %s\n" % ctx.user())
1012 write("# User %s\n" % ctx.user())
1007 write("# Date %d %d\n" % ctx.date())
1013 write("# Date %d %d\n" % ctx.date())
1008 write("# %s\n" % util.datestr(ctx.date()))
1014 write("# %s\n" % util.datestr(ctx.date()))
1009 if branch and branch != 'default':
1015 if branch and branch != 'default':
1010 write("# Branch %s\n" % branch)
1016 write("# Branch %s\n" % branch)
1011 write("# Node ID %s\n" % hex(node))
1017 write("# Node ID %s\n" % hex(node))
1012 write("# Parent %s\n" % hex(prev))
1018 write("# Parent %s\n" % hex(prev))
1013 if len(parents) > 1:
1019 if len(parents) > 1:
1014 write("# Parent %s\n" % hex(parents[1]))
1020 write("# Parent %s\n" % hex(parents[1]))
1015 write(ctx.description().rstrip())
1021 write(ctx.description().rstrip())
1016 write("\n\n")
1022 write("\n\n")
1017
1023
1018 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
1024 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
1019 write(chunk, label=label)
1025 write(chunk, label=label)
1020
1026
1021 if shouldclose:
1027 if shouldclose:
1022 fp.close()
1028 fp.close()
1023
1029
1024 for seqno, rev in enumerate(revs):
1030 for seqno, rev in enumerate(revs):
1025 single(rev, seqno + 1, fp)
1031 single(rev, seqno + 1, fp)
1026
1032
1027 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1033 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1028 changes=None, stat=False, fp=None, prefix='',
1034 changes=None, stat=False, fp=None, prefix='',
1029 root='', listsubrepos=False):
1035 root='', listsubrepos=False):
1030 '''show diff or diffstat.'''
1036 '''show diff or diffstat.'''
1031 if fp is None:
1037 if fp is None:
1032 write = ui.write
1038 write = ui.write
1033 else:
1039 else:
1034 def write(s, **kw):
1040 def write(s, **kw):
1035 fp.write(s)
1041 fp.write(s)
1036
1042
1037 if root:
1043 if root:
1038 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1044 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1039 else:
1045 else:
1040 relroot = ''
1046 relroot = ''
1041 if relroot != '':
1047 if relroot != '':
1042 # XXX relative roots currently don't work if the root is within a
1048 # XXX relative roots currently don't work if the root is within a
1043 # subrepo
1049 # subrepo
1044 uirelroot = match.uipath(relroot)
1050 uirelroot = match.uipath(relroot)
1045 relroot += '/'
1051 relroot += '/'
1046 for matchroot in match.files():
1052 for matchroot in match.files():
1047 if not matchroot.startswith(relroot):
1053 if not matchroot.startswith(relroot):
1048 ui.warn(_('warning: %s not inside relative root %s\n') % (
1054 ui.warn(_('warning: %s not inside relative root %s\n') % (
1049 match.uipath(matchroot), uirelroot))
1055 match.uipath(matchroot), uirelroot))
1050
1056
1051 if stat:
1057 if stat:
1052 diffopts = diffopts.copy(context=0)
1058 diffopts = diffopts.copy(context=0)
1053 width = 80
1059 width = 80
1054 if not ui.plain():
1060 if not ui.plain():
1055 width = ui.termwidth()
1061 width = ui.termwidth()
1056 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1062 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1057 prefix=prefix, relroot=relroot)
1063 prefix=prefix, relroot=relroot)
1058 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1064 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1059 width=width,
1065 width=width,
1060 git=diffopts.git):
1066 git=diffopts.git):
1061 write(chunk, label=label)
1067 write(chunk, label=label)
1062 else:
1068 else:
1063 for chunk, label in patch.diffui(repo, node1, node2, match,
1069 for chunk, label in patch.diffui(repo, node1, node2, match,
1064 changes, diffopts, prefix=prefix,
1070 changes, diffopts, prefix=prefix,
1065 relroot=relroot):
1071 relroot=relroot):
1066 write(chunk, label=label)
1072 write(chunk, label=label)
1067
1073
1068 if listsubrepos:
1074 if listsubrepos:
1069 ctx1 = repo[node1]
1075 ctx1 = repo[node1]
1070 ctx2 = repo[node2]
1076 ctx2 = repo[node2]
1071 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1077 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1072 tempnode2 = node2
1078 tempnode2 = node2
1073 try:
1079 try:
1074 if node2 is not None:
1080 if node2 is not None:
1075 tempnode2 = ctx2.substate[subpath][1]
1081 tempnode2 = ctx2.substate[subpath][1]
1076 except KeyError:
1082 except KeyError:
1077 # A subrepo that existed in node1 was deleted between node1 and
1083 # A subrepo that existed in node1 was deleted between node1 and
1078 # node2 (inclusive). Thus, ctx2's substate won't contain that
1084 # node2 (inclusive). Thus, ctx2's substate won't contain that
1079 # subpath. The best we can do is to ignore it.
1085 # subpath. The best we can do is to ignore it.
1080 tempnode2 = None
1086 tempnode2 = None
1081 submatch = matchmod.narrowmatcher(subpath, match)
1087 submatch = matchmod.narrowmatcher(subpath, match)
1082 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1088 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1083 stat=stat, fp=fp, prefix=prefix)
1089 stat=stat, fp=fp, prefix=prefix)
1084
1090
1085 class changeset_printer(object):
1091 class changeset_printer(object):
1086 '''show changeset information when templating not requested.'''
1092 '''show changeset information when templating not requested.'''
1087
1093
1088 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1094 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1089 self.ui = ui
1095 self.ui = ui
1090 self.repo = repo
1096 self.repo = repo
1091 self.buffered = buffered
1097 self.buffered = buffered
1092 self.matchfn = matchfn
1098 self.matchfn = matchfn
1093 self.diffopts = diffopts
1099 self.diffopts = diffopts
1094 self.header = {}
1100 self.header = {}
1095 self.hunk = {}
1101 self.hunk = {}
1096 self.lastheader = None
1102 self.lastheader = None
1097 self.footer = None
1103 self.footer = None
1098
1104
1099 def flush(self, rev):
1105 def flush(self, rev):
1100 if rev in self.header:
1106 if rev in self.header:
1101 h = self.header[rev]
1107 h = self.header[rev]
1102 if h != self.lastheader:
1108 if h != self.lastheader:
1103 self.lastheader = h
1109 self.lastheader = h
1104 self.ui.write(h)
1110 self.ui.write(h)
1105 del self.header[rev]
1111 del self.header[rev]
1106 if rev in self.hunk:
1112 if rev in self.hunk:
1107 self.ui.write(self.hunk[rev])
1113 self.ui.write(self.hunk[rev])
1108 del self.hunk[rev]
1114 del self.hunk[rev]
1109 return 1
1115 return 1
1110 return 0
1116 return 0
1111
1117
1112 def close(self):
1118 def close(self):
1113 if self.footer:
1119 if self.footer:
1114 self.ui.write(self.footer)
1120 self.ui.write(self.footer)
1115
1121
1116 def show(self, ctx, copies=None, matchfn=None, **props):
1122 def show(self, ctx, copies=None, matchfn=None, **props):
1117 if self.buffered:
1123 if self.buffered:
1118 self.ui.pushbuffer()
1124 self.ui.pushbuffer()
1119 self._show(ctx, copies, matchfn, props)
1125 self._show(ctx, copies, matchfn, props)
1120 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1126 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1121 else:
1127 else:
1122 self._show(ctx, copies, matchfn, props)
1128 self._show(ctx, copies, matchfn, props)
1123
1129
1124 def _show(self, ctx, copies, matchfn, props):
1130 def _show(self, ctx, copies, matchfn, props):
1125 '''show a single changeset or file revision'''
1131 '''show a single changeset or file revision'''
1126 changenode = ctx.node()
1132 changenode = ctx.node()
1127 rev = ctx.rev()
1133 rev = ctx.rev()
1128 if self.ui.debugflag:
1134 if self.ui.debugflag:
1129 hexfunc = hex
1135 hexfunc = hex
1130 else:
1136 else:
1131 hexfunc = short
1137 hexfunc = short
1132 if rev is None:
1138 if rev is None:
1133 pctx = ctx.p1()
1139 pctx = ctx.p1()
1134 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1140 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1135 else:
1141 else:
1136 revnode = (rev, hexfunc(changenode))
1142 revnode = (rev, hexfunc(changenode))
1137
1143
1138 if self.ui.quiet:
1144 if self.ui.quiet:
1139 self.ui.write("%d:%s\n" % revnode, label='log.node')
1145 self.ui.write("%d:%s\n" % revnode, label='log.node')
1140 return
1146 return
1141
1147
1142 date = util.datestr(ctx.date())
1148 date = util.datestr(ctx.date())
1143
1149
1144 # i18n: column positioning for "hg log"
1150 # i18n: column positioning for "hg log"
1145 self.ui.write(_("changeset: %d:%s\n") % revnode,
1151 self.ui.write(_("changeset: %d:%s\n") % revnode,
1146 label='log.changeset changeset.%s' % ctx.phasestr())
1152 label='log.changeset changeset.%s' % ctx.phasestr())
1147
1153
1148 # branches are shown first before any other names due to backwards
1154 # branches are shown first before any other names due to backwards
1149 # compatibility
1155 # compatibility
1150 branch = ctx.branch()
1156 branch = ctx.branch()
1151 # don't show the default branch name
1157 # don't show the default branch name
1152 if branch != 'default':
1158 if branch != 'default':
1153 # i18n: column positioning for "hg log"
1159 # i18n: column positioning for "hg log"
1154 self.ui.write(_("branch: %s\n") % branch,
1160 self.ui.write(_("branch: %s\n") % branch,
1155 label='log.branch')
1161 label='log.branch')
1156
1162
1157 for name, ns in self.repo.names.iteritems():
1163 for name, ns in self.repo.names.iteritems():
1158 # branches has special logic already handled above, so here we just
1164 # branches has special logic already handled above, so here we just
1159 # skip it
1165 # skip it
1160 if name == 'branches':
1166 if name == 'branches':
1161 continue
1167 continue
1162 # we will use the templatename as the color name since those two
1168 # we will use the templatename as the color name since those two
1163 # should be the same
1169 # should be the same
1164 for name in ns.names(self.repo, changenode):
1170 for name in ns.names(self.repo, changenode):
1165 self.ui.write(ns.logfmt % name,
1171 self.ui.write(ns.logfmt % name,
1166 label='log.%s' % ns.colorname)
1172 label='log.%s' % ns.colorname)
1167 if self.ui.debugflag:
1173 if self.ui.debugflag:
1168 # i18n: column positioning for "hg log"
1174 # i18n: column positioning for "hg log"
1169 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1175 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1170 label='log.phase')
1176 label='log.phase')
1171 for pctx in self._meaningful_parentrevs(ctx):
1177 for pctx in self._meaningful_parentrevs(ctx):
1172 label = 'log.parent changeset.%s' % pctx.phasestr()
1178 label = 'log.parent changeset.%s' % pctx.phasestr()
1173 # i18n: column positioning for "hg log"
1179 # i18n: column positioning for "hg log"
1174 self.ui.write(_("parent: %d:%s\n")
1180 self.ui.write(_("parent: %d:%s\n")
1175 % (pctx.rev(), hexfunc(pctx.node())),
1181 % (pctx.rev(), hexfunc(pctx.node())),
1176 label=label)
1182 label=label)
1177
1183
1178 if self.ui.debugflag and rev is not None:
1184 if self.ui.debugflag and rev is not None:
1179 mnode = ctx.manifestnode()
1185 mnode = ctx.manifestnode()
1180 # i18n: column positioning for "hg log"
1186 # i18n: column positioning for "hg log"
1181 self.ui.write(_("manifest: %d:%s\n") %
1187 self.ui.write(_("manifest: %d:%s\n") %
1182 (self.repo.manifest.rev(mnode), hex(mnode)),
1188 (self.repo.manifest.rev(mnode), hex(mnode)),
1183 label='ui.debug log.manifest')
1189 label='ui.debug log.manifest')
1184 # i18n: column positioning for "hg log"
1190 # i18n: column positioning for "hg log"
1185 self.ui.write(_("user: %s\n") % ctx.user(),
1191 self.ui.write(_("user: %s\n") % ctx.user(),
1186 label='log.user')
1192 label='log.user')
1187 # i18n: column positioning for "hg log"
1193 # i18n: column positioning for "hg log"
1188 self.ui.write(_("date: %s\n") % date,
1194 self.ui.write(_("date: %s\n") % date,
1189 label='log.date')
1195 label='log.date')
1190
1196
1191 if self.ui.debugflag:
1197 if self.ui.debugflag:
1192 files = ctx.p1().status(ctx)[:3]
1198 files = ctx.p1().status(ctx)[:3]
1193 for key, value in zip([# i18n: column positioning for "hg log"
1199 for key, value in zip([# i18n: column positioning for "hg log"
1194 _("files:"),
1200 _("files:"),
1195 # i18n: column positioning for "hg log"
1201 # i18n: column positioning for "hg log"
1196 _("files+:"),
1202 _("files+:"),
1197 # i18n: column positioning for "hg log"
1203 # i18n: column positioning for "hg log"
1198 _("files-:")], files):
1204 _("files-:")], files):
1199 if value:
1205 if value:
1200 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1206 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1201 label='ui.debug log.files')
1207 label='ui.debug log.files')
1202 elif ctx.files() and self.ui.verbose:
1208 elif ctx.files() and self.ui.verbose:
1203 # i18n: column positioning for "hg log"
1209 # i18n: column positioning for "hg log"
1204 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1210 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1205 label='ui.note log.files')
1211 label='ui.note log.files')
1206 if copies and self.ui.verbose:
1212 if copies and self.ui.verbose:
1207 copies = ['%s (%s)' % c for c in copies]
1213 copies = ['%s (%s)' % c for c in copies]
1208 # i18n: column positioning for "hg log"
1214 # i18n: column positioning for "hg log"
1209 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1215 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1210 label='ui.note log.copies')
1216 label='ui.note log.copies')
1211
1217
1212 extra = ctx.extra()
1218 extra = ctx.extra()
1213 if extra and self.ui.debugflag:
1219 if extra and self.ui.debugflag:
1214 for key, value in sorted(extra.items()):
1220 for key, value in sorted(extra.items()):
1215 # i18n: column positioning for "hg log"
1221 # i18n: column positioning for "hg log"
1216 self.ui.write(_("extra: %s=%s\n")
1222 self.ui.write(_("extra: %s=%s\n")
1217 % (key, value.encode('string_escape')),
1223 % (key, value.encode('string_escape')),
1218 label='ui.debug log.extra')
1224 label='ui.debug log.extra')
1219
1225
1220 description = ctx.description().strip()
1226 description = ctx.description().strip()
1221 if description:
1227 if description:
1222 if self.ui.verbose:
1228 if self.ui.verbose:
1223 self.ui.write(_("description:\n"),
1229 self.ui.write(_("description:\n"),
1224 label='ui.note log.description')
1230 label='ui.note log.description')
1225 self.ui.write(description,
1231 self.ui.write(description,
1226 label='ui.note log.description')
1232 label='ui.note log.description')
1227 self.ui.write("\n\n")
1233 self.ui.write("\n\n")
1228 else:
1234 else:
1229 # i18n: column positioning for "hg log"
1235 # i18n: column positioning for "hg log"
1230 self.ui.write(_("summary: %s\n") %
1236 self.ui.write(_("summary: %s\n") %
1231 description.splitlines()[0],
1237 description.splitlines()[0],
1232 label='log.summary')
1238 label='log.summary')
1233 self.ui.write("\n")
1239 self.ui.write("\n")
1234
1240
1235 self.showpatch(changenode, matchfn)
1241 self.showpatch(changenode, matchfn)
1236
1242
1237 def showpatch(self, node, matchfn):
1243 def showpatch(self, node, matchfn):
1238 if not matchfn:
1244 if not matchfn:
1239 matchfn = self.matchfn
1245 matchfn = self.matchfn
1240 if matchfn:
1246 if matchfn:
1241 stat = self.diffopts.get('stat')
1247 stat = self.diffopts.get('stat')
1242 diff = self.diffopts.get('patch')
1248 diff = self.diffopts.get('patch')
1243 diffopts = patch.diffallopts(self.ui, self.diffopts)
1249 diffopts = patch.diffallopts(self.ui, self.diffopts)
1244 prev = self.repo.changelog.parents(node)[0]
1250 prev = self.repo.changelog.parents(node)[0]
1245 if stat:
1251 if stat:
1246 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1252 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1247 match=matchfn, stat=True)
1253 match=matchfn, stat=True)
1248 if diff:
1254 if diff:
1249 if stat:
1255 if stat:
1250 self.ui.write("\n")
1256 self.ui.write("\n")
1251 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1257 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1252 match=matchfn, stat=False)
1258 match=matchfn, stat=False)
1253 self.ui.write("\n")
1259 self.ui.write("\n")
1254
1260
1255 def _meaningful_parentrevs(self, ctx):
1261 def _meaningful_parentrevs(self, ctx):
1256 """Return list of meaningful (or all if debug) parentrevs for rev.
1262 """Return list of meaningful (or all if debug) parentrevs for rev.
1257
1263
1258 For merges (two non-nullrev revisions) both parents are meaningful.
1264 For merges (two non-nullrev revisions) both parents are meaningful.
1259 Otherwise the first parent revision is considered meaningful if it
1265 Otherwise the first parent revision is considered meaningful if it
1260 is not the preceding revision.
1266 is not the preceding revision.
1261 """
1267 """
1262 parents = ctx.parents()
1268 parents = ctx.parents()
1263 if len(parents) > 1:
1269 if len(parents) > 1:
1264 return parents
1270 return parents
1265 if self.ui.debugflag:
1271 if self.ui.debugflag:
1266 return [parents[0], self.repo['null']]
1272 return [parents[0], self.repo['null']]
1267 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1273 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1268 return []
1274 return []
1269 return parents
1275 return parents
1270
1276
1271 class jsonchangeset(changeset_printer):
1277 class jsonchangeset(changeset_printer):
1272 '''format changeset information.'''
1278 '''format changeset information.'''
1273
1279
1274 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1280 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1275 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1281 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1276 self.cache = {}
1282 self.cache = {}
1277 self._first = True
1283 self._first = True
1278
1284
1279 def close(self):
1285 def close(self):
1280 if not self._first:
1286 if not self._first:
1281 self.ui.write("\n]\n")
1287 self.ui.write("\n]\n")
1282 else:
1288 else:
1283 self.ui.write("[]\n")
1289 self.ui.write("[]\n")
1284
1290
1285 def _show(self, ctx, copies, matchfn, props):
1291 def _show(self, ctx, copies, matchfn, props):
1286 '''show a single changeset or file revision'''
1292 '''show a single changeset or file revision'''
1287 rev = ctx.rev()
1293 rev = ctx.rev()
1288 if rev is None:
1294 if rev is None:
1289 jrev = jnode = 'null'
1295 jrev = jnode = 'null'
1290 else:
1296 else:
1291 jrev = str(rev)
1297 jrev = str(rev)
1292 jnode = '"%s"' % hex(ctx.node())
1298 jnode = '"%s"' % hex(ctx.node())
1293 j = encoding.jsonescape
1299 j = encoding.jsonescape
1294
1300
1295 if self._first:
1301 if self._first:
1296 self.ui.write("[\n {")
1302 self.ui.write("[\n {")
1297 self._first = False
1303 self._first = False
1298 else:
1304 else:
1299 self.ui.write(",\n {")
1305 self.ui.write(",\n {")
1300
1306
1301 if self.ui.quiet:
1307 if self.ui.quiet:
1302 self.ui.write('\n "rev": %s' % jrev)
1308 self.ui.write('\n "rev": %s' % jrev)
1303 self.ui.write(',\n "node": %s' % jnode)
1309 self.ui.write(',\n "node": %s' % jnode)
1304 self.ui.write('\n }')
1310 self.ui.write('\n }')
1305 return
1311 return
1306
1312
1307 self.ui.write('\n "rev": %s' % jrev)
1313 self.ui.write('\n "rev": %s' % jrev)
1308 self.ui.write(',\n "node": %s' % jnode)
1314 self.ui.write(',\n "node": %s' % jnode)
1309 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1315 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1310 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1316 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1311 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1317 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1312 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1318 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1313 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1319 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1314
1320
1315 self.ui.write(',\n "bookmarks": [%s]' %
1321 self.ui.write(',\n "bookmarks": [%s]' %
1316 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1322 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1317 self.ui.write(',\n "tags": [%s]' %
1323 self.ui.write(',\n "tags": [%s]' %
1318 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1324 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1319 self.ui.write(',\n "parents": [%s]' %
1325 self.ui.write(',\n "parents": [%s]' %
1320 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1326 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1321
1327
1322 if self.ui.debugflag:
1328 if self.ui.debugflag:
1323 if rev is None:
1329 if rev is None:
1324 jmanifestnode = 'null'
1330 jmanifestnode = 'null'
1325 else:
1331 else:
1326 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1332 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1327 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1333 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1328
1334
1329 self.ui.write(',\n "extra": {%s}' %
1335 self.ui.write(',\n "extra": {%s}' %
1330 ", ".join('"%s": "%s"' % (j(k), j(v))
1336 ", ".join('"%s": "%s"' % (j(k), j(v))
1331 for k, v in ctx.extra().items()))
1337 for k, v in ctx.extra().items()))
1332
1338
1333 files = ctx.p1().status(ctx)
1339 files = ctx.p1().status(ctx)
1334 self.ui.write(',\n "modified": [%s]' %
1340 self.ui.write(',\n "modified": [%s]' %
1335 ", ".join('"%s"' % j(f) for f in files[0]))
1341 ", ".join('"%s"' % j(f) for f in files[0]))
1336 self.ui.write(',\n "added": [%s]' %
1342 self.ui.write(',\n "added": [%s]' %
1337 ", ".join('"%s"' % j(f) for f in files[1]))
1343 ", ".join('"%s"' % j(f) for f in files[1]))
1338 self.ui.write(',\n "removed": [%s]' %
1344 self.ui.write(',\n "removed": [%s]' %
1339 ", ".join('"%s"' % j(f) for f in files[2]))
1345 ", ".join('"%s"' % j(f) for f in files[2]))
1340
1346
1341 elif self.ui.verbose:
1347 elif self.ui.verbose:
1342 self.ui.write(',\n "files": [%s]' %
1348 self.ui.write(',\n "files": [%s]' %
1343 ", ".join('"%s"' % j(f) for f in ctx.files()))
1349 ", ".join('"%s"' % j(f) for f in ctx.files()))
1344
1350
1345 if copies:
1351 if copies:
1346 self.ui.write(',\n "copies": {%s}' %
1352 self.ui.write(',\n "copies": {%s}' %
1347 ", ".join('"%s": "%s"' % (j(k), j(v))
1353 ", ".join('"%s": "%s"' % (j(k), j(v))
1348 for k, v in copies))
1354 for k, v in copies))
1349
1355
1350 matchfn = self.matchfn
1356 matchfn = self.matchfn
1351 if matchfn:
1357 if matchfn:
1352 stat = self.diffopts.get('stat')
1358 stat = self.diffopts.get('stat')
1353 diff = self.diffopts.get('patch')
1359 diff = self.diffopts.get('patch')
1354 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1360 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1355 node, prev = ctx.node(), ctx.p1().node()
1361 node, prev = ctx.node(), ctx.p1().node()
1356 if stat:
1362 if stat:
1357 self.ui.pushbuffer()
1363 self.ui.pushbuffer()
1358 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1364 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1359 match=matchfn, stat=True)
1365 match=matchfn, stat=True)
1360 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1366 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1361 if diff:
1367 if diff:
1362 self.ui.pushbuffer()
1368 self.ui.pushbuffer()
1363 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1369 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1364 match=matchfn, stat=False)
1370 match=matchfn, stat=False)
1365 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1371 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1366
1372
1367 self.ui.write("\n }")
1373 self.ui.write("\n }")
1368
1374
1369 class changeset_templater(changeset_printer):
1375 class changeset_templater(changeset_printer):
1370 '''format changeset information.'''
1376 '''format changeset information.'''
1371
1377
1372 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1378 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1373 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1379 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1374 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1380 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1375 defaulttempl = {
1381 defaulttempl = {
1376 'parent': '{rev}:{node|formatnode} ',
1382 'parent': '{rev}:{node|formatnode} ',
1377 'manifest': '{rev}:{node|formatnode}',
1383 'manifest': '{rev}:{node|formatnode}',
1378 'file_copy': '{name} ({source})',
1384 'file_copy': '{name} ({source})',
1379 'extra': '{key}={value|stringescape}'
1385 'extra': '{key}={value|stringescape}'
1380 }
1386 }
1381 # filecopy is preserved for compatibility reasons
1387 # filecopy is preserved for compatibility reasons
1382 defaulttempl['filecopy'] = defaulttempl['file_copy']
1388 defaulttempl['filecopy'] = defaulttempl['file_copy']
1383 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1389 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1384 cache=defaulttempl)
1390 cache=defaulttempl)
1385 if tmpl:
1391 if tmpl:
1386 self.t.cache['changeset'] = tmpl
1392 self.t.cache['changeset'] = tmpl
1387
1393
1388 self.cache = {}
1394 self.cache = {}
1389
1395
1390 def _show(self, ctx, copies, matchfn, props):
1396 def _show(self, ctx, copies, matchfn, props):
1391 '''show a single changeset or file revision'''
1397 '''show a single changeset or file revision'''
1392
1398
1393 showlist = templatekw.showlist
1399 showlist = templatekw.showlist
1394
1400
1395 # showparents() behaviour depends on ui trace level which
1401 # showparents() behaviour depends on ui trace level which
1396 # causes unexpected behaviours at templating level and makes
1402 # causes unexpected behaviours at templating level and makes
1397 # it harder to extract it in a standalone function. Its
1403 # it harder to extract it in a standalone function. Its
1398 # behaviour cannot be changed so leave it here for now.
1404 # behaviour cannot be changed so leave it here for now.
1399 def showparents(**args):
1405 def showparents(**args):
1400 ctx = args['ctx']
1406 ctx = args['ctx']
1401 parents = [[('rev', p.rev()),
1407 parents = [[('rev', p.rev()),
1402 ('node', p.hex()),
1408 ('node', p.hex()),
1403 ('phase', p.phasestr())]
1409 ('phase', p.phasestr())]
1404 for p in self._meaningful_parentrevs(ctx)]
1410 for p in self._meaningful_parentrevs(ctx)]
1405 return showlist('parent', parents, **args)
1411 return showlist('parent', parents, **args)
1406
1412
1407 props = props.copy()
1413 props = props.copy()
1408 props.update(templatekw.keywords)
1414 props.update(templatekw.keywords)
1409 props['parents'] = showparents
1415 props['parents'] = showparents
1410 props['templ'] = self.t
1416 props['templ'] = self.t
1411 props['ctx'] = ctx
1417 props['ctx'] = ctx
1412 props['repo'] = self.repo
1418 props['repo'] = self.repo
1413 props['revcache'] = {'copies': copies}
1419 props['revcache'] = {'copies': copies}
1414 props['cache'] = self.cache
1420 props['cache'] = self.cache
1415
1421
1416 # find correct templates for current mode
1422 # find correct templates for current mode
1417
1423
1418 tmplmodes = [
1424 tmplmodes = [
1419 (True, None),
1425 (True, None),
1420 (self.ui.verbose, 'verbose'),
1426 (self.ui.verbose, 'verbose'),
1421 (self.ui.quiet, 'quiet'),
1427 (self.ui.quiet, 'quiet'),
1422 (self.ui.debugflag, 'debug'),
1428 (self.ui.debugflag, 'debug'),
1423 ]
1429 ]
1424
1430
1425 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1431 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1426 for mode, postfix in tmplmodes:
1432 for mode, postfix in tmplmodes:
1427 for type in types:
1433 for type in types:
1428 cur = postfix and ('%s_%s' % (type, postfix)) or type
1434 cur = postfix and ('%s_%s' % (type, postfix)) or type
1429 if mode and cur in self.t:
1435 if mode and cur in self.t:
1430 types[type] = cur
1436 types[type] = cur
1431
1437
1432 try:
1438 try:
1433
1439
1434 # write header
1440 # write header
1435 if types['header']:
1441 if types['header']:
1436 h = templater.stringify(self.t(types['header'], **props))
1442 h = templater.stringify(self.t(types['header'], **props))
1437 if self.buffered:
1443 if self.buffered:
1438 self.header[ctx.rev()] = h
1444 self.header[ctx.rev()] = h
1439 else:
1445 else:
1440 if self.lastheader != h:
1446 if self.lastheader != h:
1441 self.lastheader = h
1447 self.lastheader = h
1442 self.ui.write(h)
1448 self.ui.write(h)
1443
1449
1444 # write changeset metadata, then patch if requested
1450 # write changeset metadata, then patch if requested
1445 key = types['changeset']
1451 key = types['changeset']
1446 self.ui.write(templater.stringify(self.t(key, **props)))
1452 self.ui.write(templater.stringify(self.t(key, **props)))
1447 self.showpatch(ctx.node(), matchfn)
1453 self.showpatch(ctx.node(), matchfn)
1448
1454
1449 if types['footer']:
1455 if types['footer']:
1450 if not self.footer:
1456 if not self.footer:
1451 self.footer = templater.stringify(self.t(types['footer'],
1457 self.footer = templater.stringify(self.t(types['footer'],
1452 **props))
1458 **props))
1453
1459
1454 except KeyError, inst:
1460 except KeyError, inst:
1455 msg = _("%s: no key named '%s'")
1461 msg = _("%s: no key named '%s'")
1456 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1462 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1457 except SyntaxError, inst:
1463 except SyntaxError, inst:
1458 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1464 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1459
1465
1460 def gettemplate(ui, tmpl, style):
1466 def gettemplate(ui, tmpl, style):
1461 """
1467 """
1462 Find the template matching the given template spec or style.
1468 Find the template matching the given template spec or style.
1463 """
1469 """
1464
1470
1465 # ui settings
1471 # ui settings
1466 if not tmpl and not style: # template are stronger than style
1472 if not tmpl and not style: # template are stronger than style
1467 tmpl = ui.config('ui', 'logtemplate')
1473 tmpl = ui.config('ui', 'logtemplate')
1468 if tmpl:
1474 if tmpl:
1469 try:
1475 try:
1470 tmpl = templater.unquotestring(tmpl)
1476 tmpl = templater.unquotestring(tmpl)
1471 except SyntaxError:
1477 except SyntaxError:
1472 pass
1478 pass
1473 return tmpl, None
1479 return tmpl, None
1474 else:
1480 else:
1475 style = util.expandpath(ui.config('ui', 'style', ''))
1481 style = util.expandpath(ui.config('ui', 'style', ''))
1476
1482
1477 if not tmpl and style:
1483 if not tmpl and style:
1478 mapfile = style
1484 mapfile = style
1479 if not os.path.split(mapfile)[0]:
1485 if not os.path.split(mapfile)[0]:
1480 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1486 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1481 or templater.templatepath(mapfile))
1487 or templater.templatepath(mapfile))
1482 if mapname:
1488 if mapname:
1483 mapfile = mapname
1489 mapfile = mapname
1484 return None, mapfile
1490 return None, mapfile
1485
1491
1486 if not tmpl:
1492 if not tmpl:
1487 return None, None
1493 return None, None
1488
1494
1489 # looks like a literal template?
1495 # looks like a literal template?
1490 if '{' in tmpl:
1496 if '{' in tmpl:
1491 return tmpl, None
1497 return tmpl, None
1492
1498
1493 # perhaps a stock style?
1499 # perhaps a stock style?
1494 if not os.path.split(tmpl)[0]:
1500 if not os.path.split(tmpl)[0]:
1495 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1501 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1496 or templater.templatepath(tmpl))
1502 or templater.templatepath(tmpl))
1497 if mapname and os.path.isfile(mapname):
1503 if mapname and os.path.isfile(mapname):
1498 return None, mapname
1504 return None, mapname
1499
1505
1500 # perhaps it's a reference to [templates]
1506 # perhaps it's a reference to [templates]
1501 t = ui.config('templates', tmpl)
1507 t = ui.config('templates', tmpl)
1502 if t:
1508 if t:
1503 try:
1509 try:
1504 tmpl = templater.unquotestring(t)
1510 tmpl = templater.unquotestring(t)
1505 except SyntaxError:
1511 except SyntaxError:
1506 tmpl = t
1512 tmpl = t
1507 return tmpl, None
1513 return tmpl, None
1508
1514
1509 if tmpl == 'list':
1515 if tmpl == 'list':
1510 ui.write(_("available styles: %s\n") % templater.stylelist())
1516 ui.write(_("available styles: %s\n") % templater.stylelist())
1511 raise util.Abort(_("specify a template"))
1517 raise util.Abort(_("specify a template"))
1512
1518
1513 # perhaps it's a path to a map or a template
1519 # perhaps it's a path to a map or a template
1514 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1520 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1515 # is it a mapfile for a style?
1521 # is it a mapfile for a style?
1516 if os.path.basename(tmpl).startswith("map-"):
1522 if os.path.basename(tmpl).startswith("map-"):
1517 return None, os.path.realpath(tmpl)
1523 return None, os.path.realpath(tmpl)
1518 tmpl = open(tmpl).read()
1524 tmpl = open(tmpl).read()
1519 return tmpl, None
1525 return tmpl, None
1520
1526
1521 # constant string?
1527 # constant string?
1522 return tmpl, None
1528 return tmpl, None
1523
1529
1524 def show_changeset(ui, repo, opts, buffered=False):
1530 def show_changeset(ui, repo, opts, buffered=False):
1525 """show one changeset using template or regular display.
1531 """show one changeset using template or regular display.
1526
1532
1527 Display format will be the first non-empty hit of:
1533 Display format will be the first non-empty hit of:
1528 1. option 'template'
1534 1. option 'template'
1529 2. option 'style'
1535 2. option 'style'
1530 3. [ui] setting 'logtemplate'
1536 3. [ui] setting 'logtemplate'
1531 4. [ui] setting 'style'
1537 4. [ui] setting 'style'
1532 If all of these values are either the unset or the empty string,
1538 If all of these values are either the unset or the empty string,
1533 regular display via changeset_printer() is done.
1539 regular display via changeset_printer() is done.
1534 """
1540 """
1535 # options
1541 # options
1536 matchfn = None
1542 matchfn = None
1537 if opts.get('patch') or opts.get('stat'):
1543 if opts.get('patch') or opts.get('stat'):
1538 matchfn = scmutil.matchall(repo)
1544 matchfn = scmutil.matchall(repo)
1539
1545
1540 if opts.get('template') == 'json':
1546 if opts.get('template') == 'json':
1541 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1547 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1542
1548
1543 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1549 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1544
1550
1545 if not tmpl and not mapfile:
1551 if not tmpl and not mapfile:
1546 return changeset_printer(ui, repo, matchfn, opts, buffered)
1552 return changeset_printer(ui, repo, matchfn, opts, buffered)
1547
1553
1548 try:
1554 try:
1549 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1555 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1550 buffered)
1556 buffered)
1551 except SyntaxError, inst:
1557 except SyntaxError, inst:
1552 raise util.Abort(inst.args[0])
1558 raise util.Abort(inst.args[0])
1553 return t
1559 return t
1554
1560
1555 def showmarker(ui, marker):
1561 def showmarker(ui, marker):
1556 """utility function to display obsolescence marker in a readable way
1562 """utility function to display obsolescence marker in a readable way
1557
1563
1558 To be used by debug function."""
1564 To be used by debug function."""
1559 ui.write(hex(marker.precnode()))
1565 ui.write(hex(marker.precnode()))
1560 for repl in marker.succnodes():
1566 for repl in marker.succnodes():
1561 ui.write(' ')
1567 ui.write(' ')
1562 ui.write(hex(repl))
1568 ui.write(hex(repl))
1563 ui.write(' %X ' % marker.flags())
1569 ui.write(' %X ' % marker.flags())
1564 parents = marker.parentnodes()
1570 parents = marker.parentnodes()
1565 if parents is not None:
1571 if parents is not None:
1566 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1572 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1567 ui.write('(%s) ' % util.datestr(marker.date()))
1573 ui.write('(%s) ' % util.datestr(marker.date()))
1568 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1574 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1569 sorted(marker.metadata().items())
1575 sorted(marker.metadata().items())
1570 if t[0] != 'date')))
1576 if t[0] != 'date')))
1571 ui.write('\n')
1577 ui.write('\n')
1572
1578
1573 def finddate(ui, repo, date):
1579 def finddate(ui, repo, date):
1574 """Find the tipmost changeset that matches the given date spec"""
1580 """Find the tipmost changeset that matches the given date spec"""
1575
1581
1576 df = util.matchdate(date)
1582 df = util.matchdate(date)
1577 m = scmutil.matchall(repo)
1583 m = scmutil.matchall(repo)
1578 results = {}
1584 results = {}
1579
1585
1580 def prep(ctx, fns):
1586 def prep(ctx, fns):
1581 d = ctx.date()
1587 d = ctx.date()
1582 if df(d[0]):
1588 if df(d[0]):
1583 results[ctx.rev()] = d
1589 results[ctx.rev()] = d
1584
1590
1585 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1591 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1586 rev = ctx.rev()
1592 rev = ctx.rev()
1587 if rev in results:
1593 if rev in results:
1588 ui.status(_("found revision %s from %s\n") %
1594 ui.status(_("found revision %s from %s\n") %
1589 (rev, util.datestr(results[rev])))
1595 (rev, util.datestr(results[rev])))
1590 return str(rev)
1596 return str(rev)
1591
1597
1592 raise util.Abort(_("revision matching date not found"))
1598 raise util.Abort(_("revision matching date not found"))
1593
1599
1594 def increasingwindows(windowsize=8, sizelimit=512):
1600 def increasingwindows(windowsize=8, sizelimit=512):
1595 while True:
1601 while True:
1596 yield windowsize
1602 yield windowsize
1597 if windowsize < sizelimit:
1603 if windowsize < sizelimit:
1598 windowsize *= 2
1604 windowsize *= 2
1599
1605
1600 class FileWalkError(Exception):
1606 class FileWalkError(Exception):
1601 pass
1607 pass
1602
1608
1603 def walkfilerevs(repo, match, follow, revs, fncache):
1609 def walkfilerevs(repo, match, follow, revs, fncache):
1604 '''Walks the file history for the matched files.
1610 '''Walks the file history for the matched files.
1605
1611
1606 Returns the changeset revs that are involved in the file history.
1612 Returns the changeset revs that are involved in the file history.
1607
1613
1608 Throws FileWalkError if the file history can't be walked using
1614 Throws FileWalkError if the file history can't be walked using
1609 filelogs alone.
1615 filelogs alone.
1610 '''
1616 '''
1611 wanted = set()
1617 wanted = set()
1612 copies = []
1618 copies = []
1613 minrev, maxrev = min(revs), max(revs)
1619 minrev, maxrev = min(revs), max(revs)
1614 def filerevgen(filelog, last):
1620 def filerevgen(filelog, last):
1615 """
1621 """
1616 Only files, no patterns. Check the history of each file.
1622 Only files, no patterns. Check the history of each file.
1617
1623
1618 Examines filelog entries within minrev, maxrev linkrev range
1624 Examines filelog entries within minrev, maxrev linkrev range
1619 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1625 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1620 tuples in backwards order
1626 tuples in backwards order
1621 """
1627 """
1622 cl_count = len(repo)
1628 cl_count = len(repo)
1623 revs = []
1629 revs = []
1624 for j in xrange(0, last + 1):
1630 for j in xrange(0, last + 1):
1625 linkrev = filelog.linkrev(j)
1631 linkrev = filelog.linkrev(j)
1626 if linkrev < minrev:
1632 if linkrev < minrev:
1627 continue
1633 continue
1628 # only yield rev for which we have the changelog, it can
1634 # only yield rev for which we have the changelog, it can
1629 # happen while doing "hg log" during a pull or commit
1635 # happen while doing "hg log" during a pull or commit
1630 if linkrev >= cl_count:
1636 if linkrev >= cl_count:
1631 break
1637 break
1632
1638
1633 parentlinkrevs = []
1639 parentlinkrevs = []
1634 for p in filelog.parentrevs(j):
1640 for p in filelog.parentrevs(j):
1635 if p != nullrev:
1641 if p != nullrev:
1636 parentlinkrevs.append(filelog.linkrev(p))
1642 parentlinkrevs.append(filelog.linkrev(p))
1637 n = filelog.node(j)
1643 n = filelog.node(j)
1638 revs.append((linkrev, parentlinkrevs,
1644 revs.append((linkrev, parentlinkrevs,
1639 follow and filelog.renamed(n)))
1645 follow and filelog.renamed(n)))
1640
1646
1641 return reversed(revs)
1647 return reversed(revs)
1642 def iterfiles():
1648 def iterfiles():
1643 pctx = repo['.']
1649 pctx = repo['.']
1644 for filename in match.files():
1650 for filename in match.files():
1645 if follow:
1651 if follow:
1646 if filename not in pctx:
1652 if filename not in pctx:
1647 raise util.Abort(_('cannot follow file not in parent '
1653 raise util.Abort(_('cannot follow file not in parent '
1648 'revision: "%s"') % filename)
1654 'revision: "%s"') % filename)
1649 yield filename, pctx[filename].filenode()
1655 yield filename, pctx[filename].filenode()
1650 else:
1656 else:
1651 yield filename, None
1657 yield filename, None
1652 for filename_node in copies:
1658 for filename_node in copies:
1653 yield filename_node
1659 yield filename_node
1654
1660
1655 for file_, node in iterfiles():
1661 for file_, node in iterfiles():
1656 filelog = repo.file(file_)
1662 filelog = repo.file(file_)
1657 if not len(filelog):
1663 if not len(filelog):
1658 if node is None:
1664 if node is None:
1659 # A zero count may be a directory or deleted file, so
1665 # A zero count may be a directory or deleted file, so
1660 # try to find matching entries on the slow path.
1666 # try to find matching entries on the slow path.
1661 if follow:
1667 if follow:
1662 raise util.Abort(
1668 raise util.Abort(
1663 _('cannot follow nonexistent file: "%s"') % file_)
1669 _('cannot follow nonexistent file: "%s"') % file_)
1664 raise FileWalkError("Cannot walk via filelog")
1670 raise FileWalkError("Cannot walk via filelog")
1665 else:
1671 else:
1666 continue
1672 continue
1667
1673
1668 if node is None:
1674 if node is None:
1669 last = len(filelog) - 1
1675 last = len(filelog) - 1
1670 else:
1676 else:
1671 last = filelog.rev(node)
1677 last = filelog.rev(node)
1672
1678
1673 # keep track of all ancestors of the file
1679 # keep track of all ancestors of the file
1674 ancestors = set([filelog.linkrev(last)])
1680 ancestors = set([filelog.linkrev(last)])
1675
1681
1676 # iterate from latest to oldest revision
1682 # iterate from latest to oldest revision
1677 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1683 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1678 if not follow:
1684 if not follow:
1679 if rev > maxrev:
1685 if rev > maxrev:
1680 continue
1686 continue
1681 else:
1687 else:
1682 # Note that last might not be the first interesting
1688 # Note that last might not be the first interesting
1683 # rev to us:
1689 # rev to us:
1684 # if the file has been changed after maxrev, we'll
1690 # if the file has been changed after maxrev, we'll
1685 # have linkrev(last) > maxrev, and we still need
1691 # have linkrev(last) > maxrev, and we still need
1686 # to explore the file graph
1692 # to explore the file graph
1687 if rev not in ancestors:
1693 if rev not in ancestors:
1688 continue
1694 continue
1689 # XXX insert 1327 fix here
1695 # XXX insert 1327 fix here
1690 if flparentlinkrevs:
1696 if flparentlinkrevs:
1691 ancestors.update(flparentlinkrevs)
1697 ancestors.update(flparentlinkrevs)
1692
1698
1693 fncache.setdefault(rev, []).append(file_)
1699 fncache.setdefault(rev, []).append(file_)
1694 wanted.add(rev)
1700 wanted.add(rev)
1695 if copied:
1701 if copied:
1696 copies.append(copied)
1702 copies.append(copied)
1697
1703
1698 return wanted
1704 return wanted
1699
1705
1700 class _followfilter(object):
1706 class _followfilter(object):
1701 def __init__(self, repo, onlyfirst=False):
1707 def __init__(self, repo, onlyfirst=False):
1702 self.repo = repo
1708 self.repo = repo
1703 self.startrev = nullrev
1709 self.startrev = nullrev
1704 self.roots = set()
1710 self.roots = set()
1705 self.onlyfirst = onlyfirst
1711 self.onlyfirst = onlyfirst
1706
1712
1707 def match(self, rev):
1713 def match(self, rev):
1708 def realparents(rev):
1714 def realparents(rev):
1709 if self.onlyfirst:
1715 if self.onlyfirst:
1710 return self.repo.changelog.parentrevs(rev)[0:1]
1716 return self.repo.changelog.parentrevs(rev)[0:1]
1711 else:
1717 else:
1712 return filter(lambda x: x != nullrev,
1718 return filter(lambda x: x != nullrev,
1713 self.repo.changelog.parentrevs(rev))
1719 self.repo.changelog.parentrevs(rev))
1714
1720
1715 if self.startrev == nullrev:
1721 if self.startrev == nullrev:
1716 self.startrev = rev
1722 self.startrev = rev
1717 return True
1723 return True
1718
1724
1719 if rev > self.startrev:
1725 if rev > self.startrev:
1720 # forward: all descendants
1726 # forward: all descendants
1721 if not self.roots:
1727 if not self.roots:
1722 self.roots.add(self.startrev)
1728 self.roots.add(self.startrev)
1723 for parent in realparents(rev):
1729 for parent in realparents(rev):
1724 if parent in self.roots:
1730 if parent in self.roots:
1725 self.roots.add(rev)
1731 self.roots.add(rev)
1726 return True
1732 return True
1727 else:
1733 else:
1728 # backwards: all parents
1734 # backwards: all parents
1729 if not self.roots:
1735 if not self.roots:
1730 self.roots.update(realparents(self.startrev))
1736 self.roots.update(realparents(self.startrev))
1731 if rev in self.roots:
1737 if rev in self.roots:
1732 self.roots.remove(rev)
1738 self.roots.remove(rev)
1733 self.roots.update(realparents(rev))
1739 self.roots.update(realparents(rev))
1734 return True
1740 return True
1735
1741
1736 return False
1742 return False
1737
1743
1738 def walkchangerevs(repo, match, opts, prepare):
1744 def walkchangerevs(repo, match, opts, prepare):
1739 '''Iterate over files and the revs in which they changed.
1745 '''Iterate over files and the revs in which they changed.
1740
1746
1741 Callers most commonly need to iterate backwards over the history
1747 Callers most commonly need to iterate backwards over the history
1742 in which they are interested. Doing so has awful (quadratic-looking)
1748 in which they are interested. Doing so has awful (quadratic-looking)
1743 performance, so we use iterators in a "windowed" way.
1749 performance, so we use iterators in a "windowed" way.
1744
1750
1745 We walk a window of revisions in the desired order. Within the
1751 We walk a window of revisions in the desired order. Within the
1746 window, we first walk forwards to gather data, then in the desired
1752 window, we first walk forwards to gather data, then in the desired
1747 order (usually backwards) to display it.
1753 order (usually backwards) to display it.
1748
1754
1749 This function returns an iterator yielding contexts. Before
1755 This function returns an iterator yielding contexts. Before
1750 yielding each context, the iterator will first call the prepare
1756 yielding each context, the iterator will first call the prepare
1751 function on each context in the window in forward order.'''
1757 function on each context in the window in forward order.'''
1752
1758
1753 follow = opts.get('follow') or opts.get('follow_first')
1759 follow = opts.get('follow') or opts.get('follow_first')
1754 revs = _logrevs(repo, opts)
1760 revs = _logrevs(repo, opts)
1755 if not revs:
1761 if not revs:
1756 return []
1762 return []
1757 wanted = set()
1763 wanted = set()
1758 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1764 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1759 opts.get('removed'))
1765 opts.get('removed'))
1760 fncache = {}
1766 fncache = {}
1761 change = repo.changectx
1767 change = repo.changectx
1762
1768
1763 # First step is to fill wanted, the set of revisions that we want to yield.
1769 # First step is to fill wanted, the set of revisions that we want to yield.
1764 # When it does not induce extra cost, we also fill fncache for revisions in
1770 # When it does not induce extra cost, we also fill fncache for revisions in
1765 # wanted: a cache of filenames that were changed (ctx.files()) and that
1771 # wanted: a cache of filenames that were changed (ctx.files()) and that
1766 # match the file filtering conditions.
1772 # match the file filtering conditions.
1767
1773
1768 if match.always():
1774 if match.always():
1769 # No files, no patterns. Display all revs.
1775 # No files, no patterns. Display all revs.
1770 wanted = revs
1776 wanted = revs
1771 elif not slowpath:
1777 elif not slowpath:
1772 # We only have to read through the filelog to find wanted revisions
1778 # We only have to read through the filelog to find wanted revisions
1773
1779
1774 try:
1780 try:
1775 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1781 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1776 except FileWalkError:
1782 except FileWalkError:
1777 slowpath = True
1783 slowpath = True
1778
1784
1779 # We decided to fall back to the slowpath because at least one
1785 # We decided to fall back to the slowpath because at least one
1780 # of the paths was not a file. Check to see if at least one of them
1786 # of the paths was not a file. Check to see if at least one of them
1781 # existed in history, otherwise simply return
1787 # existed in history, otherwise simply return
1782 for path in match.files():
1788 for path in match.files():
1783 if path == '.' or path in repo.store:
1789 if path == '.' or path in repo.store:
1784 break
1790 break
1785 else:
1791 else:
1786 return []
1792 return []
1787
1793
1788 if slowpath:
1794 if slowpath:
1789 # We have to read the changelog to match filenames against
1795 # We have to read the changelog to match filenames against
1790 # changed files
1796 # changed files
1791
1797
1792 if follow:
1798 if follow:
1793 raise util.Abort(_('can only follow copies/renames for explicit '
1799 raise util.Abort(_('can only follow copies/renames for explicit '
1794 'filenames'))
1800 'filenames'))
1795
1801
1796 # The slow path checks files modified in every changeset.
1802 # The slow path checks files modified in every changeset.
1797 # This is really slow on large repos, so compute the set lazily.
1803 # This is really slow on large repos, so compute the set lazily.
1798 class lazywantedset(object):
1804 class lazywantedset(object):
1799 def __init__(self):
1805 def __init__(self):
1800 self.set = set()
1806 self.set = set()
1801 self.revs = set(revs)
1807 self.revs = set(revs)
1802
1808
1803 # No need to worry about locality here because it will be accessed
1809 # No need to worry about locality here because it will be accessed
1804 # in the same order as the increasing window below.
1810 # in the same order as the increasing window below.
1805 def __contains__(self, value):
1811 def __contains__(self, value):
1806 if value in self.set:
1812 if value in self.set:
1807 return True
1813 return True
1808 elif not value in self.revs:
1814 elif not value in self.revs:
1809 return False
1815 return False
1810 else:
1816 else:
1811 self.revs.discard(value)
1817 self.revs.discard(value)
1812 ctx = change(value)
1818 ctx = change(value)
1813 matches = filter(match, ctx.files())
1819 matches = filter(match, ctx.files())
1814 if matches:
1820 if matches:
1815 fncache[value] = matches
1821 fncache[value] = matches
1816 self.set.add(value)
1822 self.set.add(value)
1817 return True
1823 return True
1818 return False
1824 return False
1819
1825
1820 def discard(self, value):
1826 def discard(self, value):
1821 self.revs.discard(value)
1827 self.revs.discard(value)
1822 self.set.discard(value)
1828 self.set.discard(value)
1823
1829
1824 wanted = lazywantedset()
1830 wanted = lazywantedset()
1825
1831
1826 # it might be worthwhile to do this in the iterator if the rev range
1832 # it might be worthwhile to do this in the iterator if the rev range
1827 # is descending and the prune args are all within that range
1833 # is descending and the prune args are all within that range
1828 for rev in opts.get('prune', ()):
1834 for rev in opts.get('prune', ()):
1829 rev = repo[rev].rev()
1835 rev = repo[rev].rev()
1830 ff = _followfilter(repo)
1836 ff = _followfilter(repo)
1831 stop = min(revs[0], revs[-1])
1837 stop = min(revs[0], revs[-1])
1832 for x in xrange(rev, stop - 1, -1):
1838 for x in xrange(rev, stop - 1, -1):
1833 if ff.match(x):
1839 if ff.match(x):
1834 wanted = wanted - [x]
1840 wanted = wanted - [x]
1835
1841
1836 # Now that wanted is correctly initialized, we can iterate over the
1842 # Now that wanted is correctly initialized, we can iterate over the
1837 # revision range, yielding only revisions in wanted.
1843 # revision range, yielding only revisions in wanted.
1838 def iterate():
1844 def iterate():
1839 if follow and match.always():
1845 if follow and match.always():
1840 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1846 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1841 def want(rev):
1847 def want(rev):
1842 return ff.match(rev) and rev in wanted
1848 return ff.match(rev) and rev in wanted
1843 else:
1849 else:
1844 def want(rev):
1850 def want(rev):
1845 return rev in wanted
1851 return rev in wanted
1846
1852
1847 it = iter(revs)
1853 it = iter(revs)
1848 stopiteration = False
1854 stopiteration = False
1849 for windowsize in increasingwindows():
1855 for windowsize in increasingwindows():
1850 nrevs = []
1856 nrevs = []
1851 for i in xrange(windowsize):
1857 for i in xrange(windowsize):
1852 rev = next(it, None)
1858 rev = next(it, None)
1853 if rev is None:
1859 if rev is None:
1854 stopiteration = True
1860 stopiteration = True
1855 break
1861 break
1856 elif want(rev):
1862 elif want(rev):
1857 nrevs.append(rev)
1863 nrevs.append(rev)
1858 for rev in sorted(nrevs):
1864 for rev in sorted(nrevs):
1859 fns = fncache.get(rev)
1865 fns = fncache.get(rev)
1860 ctx = change(rev)
1866 ctx = change(rev)
1861 if not fns:
1867 if not fns:
1862 def fns_generator():
1868 def fns_generator():
1863 for f in ctx.files():
1869 for f in ctx.files():
1864 if match(f):
1870 if match(f):
1865 yield f
1871 yield f
1866 fns = fns_generator()
1872 fns = fns_generator()
1867 prepare(ctx, fns)
1873 prepare(ctx, fns)
1868 for rev in nrevs:
1874 for rev in nrevs:
1869 yield change(rev)
1875 yield change(rev)
1870
1876
1871 if stopiteration:
1877 if stopiteration:
1872 break
1878 break
1873
1879
1874 return iterate()
1880 return iterate()
1875
1881
1876 def _makefollowlogfilematcher(repo, files, followfirst):
1882 def _makefollowlogfilematcher(repo, files, followfirst):
1877 # When displaying a revision with --patch --follow FILE, we have
1883 # When displaying a revision with --patch --follow FILE, we have
1878 # to know which file of the revision must be diffed. With
1884 # to know which file of the revision must be diffed. With
1879 # --follow, we want the names of the ancestors of FILE in the
1885 # --follow, we want the names of the ancestors of FILE in the
1880 # revision, stored in "fcache". "fcache" is populated by
1886 # revision, stored in "fcache". "fcache" is populated by
1881 # reproducing the graph traversal already done by --follow revset
1887 # reproducing the graph traversal already done by --follow revset
1882 # and relating linkrevs to file names (which is not "correct" but
1888 # and relating linkrevs to file names (which is not "correct" but
1883 # good enough).
1889 # good enough).
1884 fcache = {}
1890 fcache = {}
1885 fcacheready = [False]
1891 fcacheready = [False]
1886 pctx = repo['.']
1892 pctx = repo['.']
1887
1893
1888 def populate():
1894 def populate():
1889 for fn in files:
1895 for fn in files:
1890 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1896 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1891 for c in i:
1897 for c in i:
1892 fcache.setdefault(c.linkrev(), set()).add(c.path())
1898 fcache.setdefault(c.linkrev(), set()).add(c.path())
1893
1899
1894 def filematcher(rev):
1900 def filematcher(rev):
1895 if not fcacheready[0]:
1901 if not fcacheready[0]:
1896 # Lazy initialization
1902 # Lazy initialization
1897 fcacheready[0] = True
1903 fcacheready[0] = True
1898 populate()
1904 populate()
1899 return scmutil.matchfiles(repo, fcache.get(rev, []))
1905 return scmutil.matchfiles(repo, fcache.get(rev, []))
1900
1906
1901 return filematcher
1907 return filematcher
1902
1908
1903 def _makenofollowlogfilematcher(repo, pats, opts):
1909 def _makenofollowlogfilematcher(repo, pats, opts):
1904 '''hook for extensions to override the filematcher for non-follow cases'''
1910 '''hook for extensions to override the filematcher for non-follow cases'''
1905 return None
1911 return None
1906
1912
1907 def _makelogrevset(repo, pats, opts, revs):
1913 def _makelogrevset(repo, pats, opts, revs):
1908 """Return (expr, filematcher) where expr is a revset string built
1914 """Return (expr, filematcher) where expr is a revset string built
1909 from log options and file patterns or None. If --stat or --patch
1915 from log options and file patterns or None. If --stat or --patch
1910 are not passed filematcher is None. Otherwise it is a callable
1916 are not passed filematcher is None. Otherwise it is a callable
1911 taking a revision number and returning a match objects filtering
1917 taking a revision number and returning a match objects filtering
1912 the files to be detailed when displaying the revision.
1918 the files to be detailed when displaying the revision.
1913 """
1919 """
1914 opt2revset = {
1920 opt2revset = {
1915 'no_merges': ('not merge()', None),
1921 'no_merges': ('not merge()', None),
1916 'only_merges': ('merge()', None),
1922 'only_merges': ('merge()', None),
1917 '_ancestors': ('ancestors(%(val)s)', None),
1923 '_ancestors': ('ancestors(%(val)s)', None),
1918 '_fancestors': ('_firstancestors(%(val)s)', None),
1924 '_fancestors': ('_firstancestors(%(val)s)', None),
1919 '_descendants': ('descendants(%(val)s)', None),
1925 '_descendants': ('descendants(%(val)s)', None),
1920 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1926 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1921 '_matchfiles': ('_matchfiles(%(val)s)', None),
1927 '_matchfiles': ('_matchfiles(%(val)s)', None),
1922 'date': ('date(%(val)r)', None),
1928 'date': ('date(%(val)r)', None),
1923 'branch': ('branch(%(val)r)', ' or '),
1929 'branch': ('branch(%(val)r)', ' or '),
1924 '_patslog': ('filelog(%(val)r)', ' or '),
1930 '_patslog': ('filelog(%(val)r)', ' or '),
1925 '_patsfollow': ('follow(%(val)r)', ' or '),
1931 '_patsfollow': ('follow(%(val)r)', ' or '),
1926 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1932 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1927 'keyword': ('keyword(%(val)r)', ' or '),
1933 'keyword': ('keyword(%(val)r)', ' or '),
1928 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1934 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1929 'user': ('user(%(val)r)', ' or '),
1935 'user': ('user(%(val)r)', ' or '),
1930 }
1936 }
1931
1937
1932 opts = dict(opts)
1938 opts = dict(opts)
1933 # follow or not follow?
1939 # follow or not follow?
1934 follow = opts.get('follow') or opts.get('follow_first')
1940 follow = opts.get('follow') or opts.get('follow_first')
1935 if opts.get('follow_first'):
1941 if opts.get('follow_first'):
1936 followfirst = 1
1942 followfirst = 1
1937 else:
1943 else:
1938 followfirst = 0
1944 followfirst = 0
1939 # --follow with FILE behaviour depends on revs...
1945 # --follow with FILE behaviour depends on revs...
1940 it = iter(revs)
1946 it = iter(revs)
1941 startrev = it.next()
1947 startrev = it.next()
1942 followdescendants = startrev < next(it, startrev)
1948 followdescendants = startrev < next(it, startrev)
1943
1949
1944 # branch and only_branch are really aliases and must be handled at
1950 # branch and only_branch are really aliases and must be handled at
1945 # the same time
1951 # the same time
1946 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1952 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1947 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1953 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1948 # pats/include/exclude are passed to match.match() directly in
1954 # pats/include/exclude are passed to match.match() directly in
1949 # _matchfiles() revset but walkchangerevs() builds its matcher with
1955 # _matchfiles() revset but walkchangerevs() builds its matcher with
1950 # scmutil.match(). The difference is input pats are globbed on
1956 # scmutil.match(). The difference is input pats are globbed on
1951 # platforms without shell expansion (windows).
1957 # platforms without shell expansion (windows).
1952 wctx = repo[None]
1958 wctx = repo[None]
1953 match, pats = scmutil.matchandpats(wctx, pats, opts)
1959 match, pats = scmutil.matchandpats(wctx, pats, opts)
1954 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1960 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
1955 opts.get('removed'))
1961 opts.get('removed'))
1956 if not slowpath:
1962 if not slowpath:
1957 for f in match.files():
1963 for f in match.files():
1958 if follow and f not in wctx:
1964 if follow and f not in wctx:
1959 # If the file exists, it may be a directory, so let it
1965 # If the file exists, it may be a directory, so let it
1960 # take the slow path.
1966 # take the slow path.
1961 if os.path.exists(repo.wjoin(f)):
1967 if os.path.exists(repo.wjoin(f)):
1962 slowpath = True
1968 slowpath = True
1963 continue
1969 continue
1964 else:
1970 else:
1965 raise util.Abort(_('cannot follow file not in parent '
1971 raise util.Abort(_('cannot follow file not in parent '
1966 'revision: "%s"') % f)
1972 'revision: "%s"') % f)
1967 filelog = repo.file(f)
1973 filelog = repo.file(f)
1968 if not filelog:
1974 if not filelog:
1969 # A zero count may be a directory or deleted file, so
1975 # A zero count may be a directory or deleted file, so
1970 # try to find matching entries on the slow path.
1976 # try to find matching entries on the slow path.
1971 if follow:
1977 if follow:
1972 raise util.Abort(
1978 raise util.Abort(
1973 _('cannot follow nonexistent file: "%s"') % f)
1979 _('cannot follow nonexistent file: "%s"') % f)
1974 slowpath = True
1980 slowpath = True
1975
1981
1976 # We decided to fall back to the slowpath because at least one
1982 # We decided to fall back to the slowpath because at least one
1977 # of the paths was not a file. Check to see if at least one of them
1983 # of the paths was not a file. Check to see if at least one of them
1978 # existed in history - in that case, we'll continue down the
1984 # existed in history - in that case, we'll continue down the
1979 # slowpath; otherwise, we can turn off the slowpath
1985 # slowpath; otherwise, we can turn off the slowpath
1980 if slowpath:
1986 if slowpath:
1981 for path in match.files():
1987 for path in match.files():
1982 if path == '.' or path in repo.store:
1988 if path == '.' or path in repo.store:
1983 break
1989 break
1984 else:
1990 else:
1985 slowpath = False
1991 slowpath = False
1986
1992
1987 fpats = ('_patsfollow', '_patsfollowfirst')
1993 fpats = ('_patsfollow', '_patsfollowfirst')
1988 fnopats = (('_ancestors', '_fancestors'),
1994 fnopats = (('_ancestors', '_fancestors'),
1989 ('_descendants', '_fdescendants'))
1995 ('_descendants', '_fdescendants'))
1990 if slowpath:
1996 if slowpath:
1991 # See walkchangerevs() slow path.
1997 # See walkchangerevs() slow path.
1992 #
1998 #
1993 # pats/include/exclude cannot be represented as separate
1999 # pats/include/exclude cannot be represented as separate
1994 # revset expressions as their filtering logic applies at file
2000 # revset expressions as their filtering logic applies at file
1995 # level. For instance "-I a -X a" matches a revision touching
2001 # level. For instance "-I a -X a" matches a revision touching
1996 # "a" and "b" while "file(a) and not file(b)" does
2002 # "a" and "b" while "file(a) and not file(b)" does
1997 # not. Besides, filesets are evaluated against the working
2003 # not. Besides, filesets are evaluated against the working
1998 # directory.
2004 # directory.
1999 matchargs = ['r:', 'd:relpath']
2005 matchargs = ['r:', 'd:relpath']
2000 for p in pats:
2006 for p in pats:
2001 matchargs.append('p:' + p)
2007 matchargs.append('p:' + p)
2002 for p in opts.get('include', []):
2008 for p in opts.get('include', []):
2003 matchargs.append('i:' + p)
2009 matchargs.append('i:' + p)
2004 for p in opts.get('exclude', []):
2010 for p in opts.get('exclude', []):
2005 matchargs.append('x:' + p)
2011 matchargs.append('x:' + p)
2006 matchargs = ','.join(('%r' % p) for p in matchargs)
2012 matchargs = ','.join(('%r' % p) for p in matchargs)
2007 opts['_matchfiles'] = matchargs
2013 opts['_matchfiles'] = matchargs
2008 if follow:
2014 if follow:
2009 opts[fnopats[0][followfirst]] = '.'
2015 opts[fnopats[0][followfirst]] = '.'
2010 else:
2016 else:
2011 if follow:
2017 if follow:
2012 if pats:
2018 if pats:
2013 # follow() revset interprets its file argument as a
2019 # follow() revset interprets its file argument as a
2014 # manifest entry, so use match.files(), not pats.
2020 # manifest entry, so use match.files(), not pats.
2015 opts[fpats[followfirst]] = list(match.files())
2021 opts[fpats[followfirst]] = list(match.files())
2016 else:
2022 else:
2017 op = fnopats[followdescendants][followfirst]
2023 op = fnopats[followdescendants][followfirst]
2018 opts[op] = 'rev(%d)' % startrev
2024 opts[op] = 'rev(%d)' % startrev
2019 else:
2025 else:
2020 opts['_patslog'] = list(pats)
2026 opts['_patslog'] = list(pats)
2021
2027
2022 filematcher = None
2028 filematcher = None
2023 if opts.get('patch') or opts.get('stat'):
2029 if opts.get('patch') or opts.get('stat'):
2024 # When following files, track renames via a special matcher.
2030 # When following files, track renames via a special matcher.
2025 # If we're forced to take the slowpath it means we're following
2031 # If we're forced to take the slowpath it means we're following
2026 # at least one pattern/directory, so don't bother with rename tracking.
2032 # at least one pattern/directory, so don't bother with rename tracking.
2027 if follow and not match.always() and not slowpath:
2033 if follow and not match.always() and not slowpath:
2028 # _makefollowlogfilematcher expects its files argument to be
2034 # _makefollowlogfilematcher expects its files argument to be
2029 # relative to the repo root, so use match.files(), not pats.
2035 # relative to the repo root, so use match.files(), not pats.
2030 filematcher = _makefollowlogfilematcher(repo, match.files(),
2036 filematcher = _makefollowlogfilematcher(repo, match.files(),
2031 followfirst)
2037 followfirst)
2032 else:
2038 else:
2033 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2039 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2034 if filematcher is None:
2040 if filematcher is None:
2035 filematcher = lambda rev: match
2041 filematcher = lambda rev: match
2036
2042
2037 expr = []
2043 expr = []
2038 for op, val in sorted(opts.iteritems()):
2044 for op, val in sorted(opts.iteritems()):
2039 if not val:
2045 if not val:
2040 continue
2046 continue
2041 if op not in opt2revset:
2047 if op not in opt2revset:
2042 continue
2048 continue
2043 revop, andor = opt2revset[op]
2049 revop, andor = opt2revset[op]
2044 if '%(val)' not in revop:
2050 if '%(val)' not in revop:
2045 expr.append(revop)
2051 expr.append(revop)
2046 else:
2052 else:
2047 if not isinstance(val, list):
2053 if not isinstance(val, list):
2048 e = revop % {'val': val}
2054 e = revop % {'val': val}
2049 else:
2055 else:
2050 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2056 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2051 expr.append(e)
2057 expr.append(e)
2052
2058
2053 if expr:
2059 if expr:
2054 expr = '(' + ' and '.join(expr) + ')'
2060 expr = '(' + ' and '.join(expr) + ')'
2055 else:
2061 else:
2056 expr = None
2062 expr = None
2057 return expr, filematcher
2063 return expr, filematcher
2058
2064
2059 def _logrevs(repo, opts):
2065 def _logrevs(repo, opts):
2060 # Default --rev value depends on --follow but --follow behaviour
2066 # Default --rev value depends on --follow but --follow behaviour
2061 # depends on revisions resolved from --rev...
2067 # depends on revisions resolved from --rev...
2062 follow = opts.get('follow') or opts.get('follow_first')
2068 follow = opts.get('follow') or opts.get('follow_first')
2063 if opts.get('rev'):
2069 if opts.get('rev'):
2064 revs = scmutil.revrange(repo, opts['rev'])
2070 revs = scmutil.revrange(repo, opts['rev'])
2065 elif follow and repo.dirstate.p1() == nullid:
2071 elif follow and repo.dirstate.p1() == nullid:
2066 revs = revset.baseset()
2072 revs = revset.baseset()
2067 elif follow:
2073 elif follow:
2068 revs = repo.revs('reverse(:.)')
2074 revs = repo.revs('reverse(:.)')
2069 else:
2075 else:
2070 revs = revset.spanset(repo)
2076 revs = revset.spanset(repo)
2071 revs.reverse()
2077 revs.reverse()
2072 return revs
2078 return revs
2073
2079
2074 def getgraphlogrevs(repo, pats, opts):
2080 def getgraphlogrevs(repo, pats, opts):
2075 """Return (revs, expr, filematcher) where revs is an iterable of
2081 """Return (revs, expr, filematcher) where revs is an iterable of
2076 revision numbers, expr is a revset string built from log options
2082 revision numbers, expr is a revset string built from log options
2077 and file patterns or None, and used to filter 'revs'. If --stat or
2083 and file patterns or None, and used to filter 'revs'. If --stat or
2078 --patch are not passed filematcher is None. Otherwise it is a
2084 --patch are not passed filematcher is None. Otherwise it is a
2079 callable taking a revision number and returning a match objects
2085 callable taking a revision number and returning a match objects
2080 filtering the files to be detailed when displaying the revision.
2086 filtering the files to be detailed when displaying the revision.
2081 """
2087 """
2082 limit = loglimit(opts)
2088 limit = loglimit(opts)
2083 revs = _logrevs(repo, opts)
2089 revs = _logrevs(repo, opts)
2084 if not revs:
2090 if not revs:
2085 return revset.baseset(), None, None
2091 return revset.baseset(), None, None
2086 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2092 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2087 if opts.get('rev'):
2093 if opts.get('rev'):
2088 # User-specified revs might be unsorted, but don't sort before
2094 # User-specified revs might be unsorted, but don't sort before
2089 # _makelogrevset because it might depend on the order of revs
2095 # _makelogrevset because it might depend on the order of revs
2090 revs.sort(reverse=True)
2096 revs.sort(reverse=True)
2091 if expr:
2097 if expr:
2092 # Revset matchers often operate faster on revisions in changelog
2098 # Revset matchers often operate faster on revisions in changelog
2093 # order, because most filters deal with the changelog.
2099 # order, because most filters deal with the changelog.
2094 revs.reverse()
2100 revs.reverse()
2095 matcher = revset.match(repo.ui, expr)
2101 matcher = revset.match(repo.ui, expr)
2096 # Revset matches can reorder revisions. "A or B" typically returns
2102 # Revset matches can reorder revisions. "A or B" typically returns
2097 # returns the revision matching A then the revision matching B. Sort
2103 # returns the revision matching A then the revision matching B. Sort
2098 # again to fix that.
2104 # again to fix that.
2099 revs = matcher(repo, revs)
2105 revs = matcher(repo, revs)
2100 revs.sort(reverse=True)
2106 revs.sort(reverse=True)
2101 if limit is not None:
2107 if limit is not None:
2102 limitedrevs = []
2108 limitedrevs = []
2103 for idx, rev in enumerate(revs):
2109 for idx, rev in enumerate(revs):
2104 if idx >= limit:
2110 if idx >= limit:
2105 break
2111 break
2106 limitedrevs.append(rev)
2112 limitedrevs.append(rev)
2107 revs = revset.baseset(limitedrevs)
2113 revs = revset.baseset(limitedrevs)
2108
2114
2109 return revs, expr, filematcher
2115 return revs, expr, filematcher
2110
2116
2111 def getlogrevs(repo, pats, opts):
2117 def getlogrevs(repo, pats, opts):
2112 """Return (revs, expr, filematcher) where revs is an iterable of
2118 """Return (revs, expr, filematcher) where revs is an iterable of
2113 revision numbers, expr is a revset string built from log options
2119 revision numbers, expr is a revset string built from log options
2114 and file patterns or None, and used to filter 'revs'. If --stat or
2120 and file patterns or None, and used to filter 'revs'. If --stat or
2115 --patch are not passed filematcher is None. Otherwise it is a
2121 --patch are not passed filematcher is None. Otherwise it is a
2116 callable taking a revision number and returning a match objects
2122 callable taking a revision number and returning a match objects
2117 filtering the files to be detailed when displaying the revision.
2123 filtering the files to be detailed when displaying the revision.
2118 """
2124 """
2119 limit = loglimit(opts)
2125 limit = loglimit(opts)
2120 revs = _logrevs(repo, opts)
2126 revs = _logrevs(repo, opts)
2121 if not revs:
2127 if not revs:
2122 return revset.baseset([]), None, None
2128 return revset.baseset([]), None, None
2123 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2129 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2124 if expr:
2130 if expr:
2125 # Revset matchers often operate faster on revisions in changelog
2131 # Revset matchers often operate faster on revisions in changelog
2126 # order, because most filters deal with the changelog.
2132 # order, because most filters deal with the changelog.
2127 if not opts.get('rev'):
2133 if not opts.get('rev'):
2128 revs.reverse()
2134 revs.reverse()
2129 matcher = revset.match(repo.ui, expr)
2135 matcher = revset.match(repo.ui, expr)
2130 # Revset matches can reorder revisions. "A or B" typically returns
2136 # Revset matches can reorder revisions. "A or B" typically returns
2131 # returns the revision matching A then the revision matching B. Sort
2137 # returns the revision matching A then the revision matching B. Sort
2132 # again to fix that.
2138 # again to fix that.
2133 revs = matcher(repo, revs)
2139 revs = matcher(repo, revs)
2134 if not opts.get('rev'):
2140 if not opts.get('rev'):
2135 revs.sort(reverse=True)
2141 revs.sort(reverse=True)
2136 if limit is not None:
2142 if limit is not None:
2137 limitedrevs = []
2143 limitedrevs = []
2138 for idx, r in enumerate(revs):
2144 for idx, r in enumerate(revs):
2139 if limit <= idx:
2145 if limit <= idx:
2140 break
2146 break
2141 limitedrevs.append(r)
2147 limitedrevs.append(r)
2142 revs = revset.baseset(limitedrevs)
2148 revs = revset.baseset(limitedrevs)
2143
2149
2144 return revs, expr, filematcher
2150 return revs, expr, filematcher
2145
2151
2146 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2152 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2147 filematcher=None):
2153 filematcher=None):
2148 seen, state = [], graphmod.asciistate()
2154 seen, state = [], graphmod.asciistate()
2149 for rev, type, ctx, parents in dag:
2155 for rev, type, ctx, parents in dag:
2150 char = 'o'
2156 char = 'o'
2151 if ctx.node() in showparents:
2157 if ctx.node() in showparents:
2152 char = '@'
2158 char = '@'
2153 elif ctx.obsolete():
2159 elif ctx.obsolete():
2154 char = 'x'
2160 char = 'x'
2155 elif ctx.closesbranch():
2161 elif ctx.closesbranch():
2156 char = '_'
2162 char = '_'
2157 copies = None
2163 copies = None
2158 if getrenamed and ctx.rev():
2164 if getrenamed and ctx.rev():
2159 copies = []
2165 copies = []
2160 for fn in ctx.files():
2166 for fn in ctx.files():
2161 rename = getrenamed(fn, ctx.rev())
2167 rename = getrenamed(fn, ctx.rev())
2162 if rename:
2168 if rename:
2163 copies.append((fn, rename[0]))
2169 copies.append((fn, rename[0]))
2164 revmatchfn = None
2170 revmatchfn = None
2165 if filematcher is not None:
2171 if filematcher is not None:
2166 revmatchfn = filematcher(ctx.rev())
2172 revmatchfn = filematcher(ctx.rev())
2167 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2173 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2168 lines = displayer.hunk.pop(rev).split('\n')
2174 lines = displayer.hunk.pop(rev).split('\n')
2169 if not lines[-1]:
2175 if not lines[-1]:
2170 del lines[-1]
2176 del lines[-1]
2171 displayer.flush(rev)
2177 displayer.flush(rev)
2172 edges = edgefn(type, char, lines, seen, rev, parents)
2178 edges = edgefn(type, char, lines, seen, rev, parents)
2173 for type, char, lines, coldata in edges:
2179 for type, char, lines, coldata in edges:
2174 graphmod.ascii(ui, state, type, char, lines, coldata)
2180 graphmod.ascii(ui, state, type, char, lines, coldata)
2175 displayer.close()
2181 displayer.close()
2176
2182
2177 def graphlog(ui, repo, *pats, **opts):
2183 def graphlog(ui, repo, *pats, **opts):
2178 # Parameters are identical to log command ones
2184 # Parameters are identical to log command ones
2179 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2185 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2180 revdag = graphmod.dagwalker(repo, revs)
2186 revdag = graphmod.dagwalker(repo, revs)
2181
2187
2182 getrenamed = None
2188 getrenamed = None
2183 if opts.get('copies'):
2189 if opts.get('copies'):
2184 endrev = None
2190 endrev = None
2185 if opts.get('rev'):
2191 if opts.get('rev'):
2186 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2192 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2187 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2193 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2188 displayer = show_changeset(ui, repo, opts, buffered=True)
2194 displayer = show_changeset(ui, repo, opts, buffered=True)
2189 showparents = [ctx.node() for ctx in repo[None].parents()]
2195 showparents = [ctx.node() for ctx in repo[None].parents()]
2190 displaygraph(ui, revdag, displayer, showparents,
2196 displaygraph(ui, revdag, displayer, showparents,
2191 graphmod.asciiedges, getrenamed, filematcher)
2197 graphmod.asciiedges, getrenamed, filematcher)
2192
2198
2193 def checkunsupportedgraphflags(pats, opts):
2199 def checkunsupportedgraphflags(pats, opts):
2194 for op in ["newest_first"]:
2200 for op in ["newest_first"]:
2195 if op in opts and opts[op]:
2201 if op in opts and opts[op]:
2196 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2202 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2197 % op.replace("_", "-"))
2203 % op.replace("_", "-"))
2198
2204
2199 def graphrevs(repo, nodes, opts):
2205 def graphrevs(repo, nodes, opts):
2200 limit = loglimit(opts)
2206 limit = loglimit(opts)
2201 nodes.reverse()
2207 nodes.reverse()
2202 if limit is not None:
2208 if limit is not None:
2203 nodes = nodes[:limit]
2209 nodes = nodes[:limit]
2204 return graphmod.nodes(repo, nodes)
2210 return graphmod.nodes(repo, nodes)
2205
2211
2206 def add(ui, repo, match, prefix, explicitonly, **opts):
2212 def add(ui, repo, match, prefix, explicitonly, **opts):
2207 join = lambda f: os.path.join(prefix, f)
2213 join = lambda f: os.path.join(prefix, f)
2208 bad = []
2214 bad = []
2209 oldbad = match.bad
2215 oldbad = match.bad
2210 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2216 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2211 names = []
2217 names = []
2212 wctx = repo[None]
2218 wctx = repo[None]
2213 cca = None
2219 cca = None
2214 abort, warn = scmutil.checkportabilityalert(ui)
2220 abort, warn = scmutil.checkportabilityalert(ui)
2215 if abort or warn:
2221 if abort or warn:
2216 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2222 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2217 for f in wctx.walk(match):
2223 for f in wctx.walk(match):
2218 exact = match.exact(f)
2224 exact = match.exact(f)
2219 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2225 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2220 if cca:
2226 if cca:
2221 cca(f)
2227 cca(f)
2222 names.append(f)
2228 names.append(f)
2223 if ui.verbose or not exact:
2229 if ui.verbose or not exact:
2224 ui.status(_('adding %s\n') % match.rel(f))
2230 ui.status(_('adding %s\n') % match.rel(f))
2225
2231
2226 for subpath in sorted(wctx.substate):
2232 for subpath in sorted(wctx.substate):
2227 sub = wctx.sub(subpath)
2233 sub = wctx.sub(subpath)
2228 try:
2234 try:
2229 submatch = matchmod.narrowmatcher(subpath, match)
2235 submatch = matchmod.narrowmatcher(subpath, match)
2230 if opts.get('subrepos'):
2236 if opts.get('subrepos'):
2231 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2237 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2232 else:
2238 else:
2233 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2239 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2234 except error.LookupError:
2240 except error.LookupError:
2235 ui.status(_("skipping missing subrepository: %s\n")
2241 ui.status(_("skipping missing subrepository: %s\n")
2236 % join(subpath))
2242 % join(subpath))
2237
2243
2238 if not opts.get('dry_run'):
2244 if not opts.get('dry_run'):
2239 rejected = wctx.add(names, prefix)
2245 rejected = wctx.add(names, prefix)
2240 bad.extend(f for f in rejected if f in match.files())
2246 bad.extend(f for f in rejected if f in match.files())
2241 return bad
2247 return bad
2242
2248
2243 def forget(ui, repo, match, prefix, explicitonly):
2249 def forget(ui, repo, match, prefix, explicitonly):
2244 join = lambda f: os.path.join(prefix, f)
2250 join = lambda f: os.path.join(prefix, f)
2245 bad = []
2251 bad = []
2246 oldbad = match.bad
2252 oldbad = match.bad
2247 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2253 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2248 wctx = repo[None]
2254 wctx = repo[None]
2249 forgot = []
2255 forgot = []
2250 s = repo.status(match=match, clean=True)
2256 s = repo.status(match=match, clean=True)
2251 forget = sorted(s[0] + s[1] + s[3] + s[6])
2257 forget = sorted(s[0] + s[1] + s[3] + s[6])
2252 if explicitonly:
2258 if explicitonly:
2253 forget = [f for f in forget if match.exact(f)]
2259 forget = [f for f in forget if match.exact(f)]
2254
2260
2255 for subpath in sorted(wctx.substate):
2261 for subpath in sorted(wctx.substate):
2256 sub = wctx.sub(subpath)
2262 sub = wctx.sub(subpath)
2257 try:
2263 try:
2258 submatch = matchmod.narrowmatcher(subpath, match)
2264 submatch = matchmod.narrowmatcher(subpath, match)
2259 subbad, subforgot = sub.forget(submatch, prefix)
2265 subbad, subforgot = sub.forget(submatch, prefix)
2260 bad.extend([subpath + '/' + f for f in subbad])
2266 bad.extend([subpath + '/' + f for f in subbad])
2261 forgot.extend([subpath + '/' + f for f in subforgot])
2267 forgot.extend([subpath + '/' + f for f in subforgot])
2262 except error.LookupError:
2268 except error.LookupError:
2263 ui.status(_("skipping missing subrepository: %s\n")
2269 ui.status(_("skipping missing subrepository: %s\n")
2264 % join(subpath))
2270 % join(subpath))
2265
2271
2266 if not explicitonly:
2272 if not explicitonly:
2267 for f in match.files():
2273 for f in match.files():
2268 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2274 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2269 if f not in forgot:
2275 if f not in forgot:
2270 if repo.wvfs.exists(f):
2276 if repo.wvfs.exists(f):
2271 # Don't complain if the exact case match wasn't given.
2277 # Don't complain if the exact case match wasn't given.
2272 # But don't do this until after checking 'forgot', so
2278 # But don't do this until after checking 'forgot', so
2273 # that subrepo files aren't normalized, and this op is
2279 # that subrepo files aren't normalized, and this op is
2274 # purely from data cached by the status walk above.
2280 # purely from data cached by the status walk above.
2275 if repo.dirstate.normalize(f) in repo.dirstate:
2281 if repo.dirstate.normalize(f) in repo.dirstate:
2276 continue
2282 continue
2277 ui.warn(_('not removing %s: '
2283 ui.warn(_('not removing %s: '
2278 'file is already untracked\n')
2284 'file is already untracked\n')
2279 % match.rel(f))
2285 % match.rel(f))
2280 bad.append(f)
2286 bad.append(f)
2281
2287
2282 for f in forget:
2288 for f in forget:
2283 if ui.verbose or not match.exact(f):
2289 if ui.verbose or not match.exact(f):
2284 ui.status(_('removing %s\n') % match.rel(f))
2290 ui.status(_('removing %s\n') % match.rel(f))
2285
2291
2286 rejected = wctx.forget(forget, prefix)
2292 rejected = wctx.forget(forget, prefix)
2287 bad.extend(f for f in rejected if f in match.files())
2293 bad.extend(f for f in rejected if f in match.files())
2288 forgot.extend(f for f in forget if f not in rejected)
2294 forgot.extend(f for f in forget if f not in rejected)
2289 return bad, forgot
2295 return bad, forgot
2290
2296
2291 def files(ui, ctx, m, fm, fmt, subrepos):
2297 def files(ui, ctx, m, fm, fmt, subrepos):
2292 rev = ctx.rev()
2298 rev = ctx.rev()
2293 ret = 1
2299 ret = 1
2294 ds = ctx.repo().dirstate
2300 ds = ctx.repo().dirstate
2295
2301
2296 for f in ctx.matches(m):
2302 for f in ctx.matches(m):
2297 if rev is None and ds[f] == 'r':
2303 if rev is None and ds[f] == 'r':
2298 continue
2304 continue
2299 fm.startitem()
2305 fm.startitem()
2300 if ui.verbose:
2306 if ui.verbose:
2301 fc = ctx[f]
2307 fc = ctx[f]
2302 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2308 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2303 fm.data(abspath=f)
2309 fm.data(abspath=f)
2304 fm.write('path', fmt, m.rel(f))
2310 fm.write('path', fmt, m.rel(f))
2305 ret = 0
2311 ret = 0
2306
2312
2307 for subpath in sorted(ctx.substate):
2313 for subpath in sorted(ctx.substate):
2308 def matchessubrepo(subpath):
2314 def matchessubrepo(subpath):
2309 return (m.always() or m.exact(subpath)
2315 return (m.always() or m.exact(subpath)
2310 or any(f.startswith(subpath + '/') for f in m.files()))
2316 or any(f.startswith(subpath + '/') for f in m.files()))
2311
2317
2312 if subrepos or matchessubrepo(subpath):
2318 if subrepos or matchessubrepo(subpath):
2313 sub = ctx.sub(subpath)
2319 sub = ctx.sub(subpath)
2314 try:
2320 try:
2315 submatch = matchmod.narrowmatcher(subpath, m)
2321 submatch = matchmod.narrowmatcher(subpath, m)
2316 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2322 if sub.printfiles(ui, submatch, fm, fmt, subrepos) == 0:
2317 ret = 0
2323 ret = 0
2318 except error.LookupError:
2324 except error.LookupError:
2319 ui.status(_("skipping missing subrepository: %s\n")
2325 ui.status(_("skipping missing subrepository: %s\n")
2320 % m.abs(subpath))
2326 % m.abs(subpath))
2321
2327
2322 return ret
2328 return ret
2323
2329
2324 def remove(ui, repo, m, prefix, after, force, subrepos):
2330 def remove(ui, repo, m, prefix, after, force, subrepos):
2325 join = lambda f: os.path.join(prefix, f)
2331 join = lambda f: os.path.join(prefix, f)
2326 ret = 0
2332 ret = 0
2327 s = repo.status(match=m, clean=True)
2333 s = repo.status(match=m, clean=True)
2328 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2334 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2329
2335
2330 wctx = repo[None]
2336 wctx = repo[None]
2331
2337
2332 for subpath in sorted(wctx.substate):
2338 for subpath in sorted(wctx.substate):
2333 def matchessubrepo(matcher, subpath):
2339 def matchessubrepo(matcher, subpath):
2334 if matcher.exact(subpath):
2340 if matcher.exact(subpath):
2335 return True
2341 return True
2336 for f in matcher.files():
2342 for f in matcher.files():
2337 if f.startswith(subpath):
2343 if f.startswith(subpath):
2338 return True
2344 return True
2339 return False
2345 return False
2340
2346
2341 if subrepos or matchessubrepo(m, subpath):
2347 if subrepos or matchessubrepo(m, subpath):
2342 sub = wctx.sub(subpath)
2348 sub = wctx.sub(subpath)
2343 try:
2349 try:
2344 submatch = matchmod.narrowmatcher(subpath, m)
2350 submatch = matchmod.narrowmatcher(subpath, m)
2345 if sub.removefiles(submatch, prefix, after, force, subrepos):
2351 if sub.removefiles(submatch, prefix, after, force, subrepos):
2346 ret = 1
2352 ret = 1
2347 except error.LookupError:
2353 except error.LookupError:
2348 ui.status(_("skipping missing subrepository: %s\n")
2354 ui.status(_("skipping missing subrepository: %s\n")
2349 % join(subpath))
2355 % join(subpath))
2350
2356
2351 # warn about failure to delete explicit files/dirs
2357 # warn about failure to delete explicit files/dirs
2352 deleteddirs = util.dirs(deleted)
2358 deleteddirs = util.dirs(deleted)
2353 for f in m.files():
2359 for f in m.files():
2354 def insubrepo():
2360 def insubrepo():
2355 for subpath in wctx.substate:
2361 for subpath in wctx.substate:
2356 if f.startswith(subpath):
2362 if f.startswith(subpath):
2357 return True
2363 return True
2358 return False
2364 return False
2359
2365
2360 isdir = f in deleteddirs or wctx.hasdir(f)
2366 isdir = f in deleteddirs or wctx.hasdir(f)
2361 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2367 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2362 continue
2368 continue
2363
2369
2364 if repo.wvfs.exists(f):
2370 if repo.wvfs.exists(f):
2365 if repo.wvfs.isdir(f):
2371 if repo.wvfs.isdir(f):
2366 ui.warn(_('not removing %s: no tracked files\n')
2372 ui.warn(_('not removing %s: no tracked files\n')
2367 % m.rel(f))
2373 % m.rel(f))
2368 else:
2374 else:
2369 ui.warn(_('not removing %s: file is untracked\n')
2375 ui.warn(_('not removing %s: file is untracked\n')
2370 % m.rel(f))
2376 % m.rel(f))
2371 # missing files will generate a warning elsewhere
2377 # missing files will generate a warning elsewhere
2372 ret = 1
2378 ret = 1
2373
2379
2374 if force:
2380 if force:
2375 list = modified + deleted + clean + added
2381 list = modified + deleted + clean + added
2376 elif after:
2382 elif after:
2377 list = deleted
2383 list = deleted
2378 for f in modified + added + clean:
2384 for f in modified + added + clean:
2379 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2385 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2380 ret = 1
2386 ret = 1
2381 else:
2387 else:
2382 list = deleted + clean
2388 list = deleted + clean
2383 for f in modified:
2389 for f in modified:
2384 ui.warn(_('not removing %s: file is modified (use -f'
2390 ui.warn(_('not removing %s: file is modified (use -f'
2385 ' to force removal)\n') % m.rel(f))
2391 ' to force removal)\n') % m.rel(f))
2386 ret = 1
2392 ret = 1
2387 for f in added:
2393 for f in added:
2388 ui.warn(_('not removing %s: file has been marked for add'
2394 ui.warn(_('not removing %s: file has been marked for add'
2389 ' (use forget to undo)\n') % m.rel(f))
2395 ' (use forget to undo)\n') % m.rel(f))
2390 ret = 1
2396 ret = 1
2391
2397
2392 for f in sorted(list):
2398 for f in sorted(list):
2393 if ui.verbose or not m.exact(f):
2399 if ui.verbose or not m.exact(f):
2394 ui.status(_('removing %s\n') % m.rel(f))
2400 ui.status(_('removing %s\n') % m.rel(f))
2395
2401
2396 wlock = repo.wlock()
2402 wlock = repo.wlock()
2397 try:
2403 try:
2398 if not after:
2404 if not after:
2399 for f in list:
2405 for f in list:
2400 if f in added:
2406 if f in added:
2401 continue # we never unlink added files on remove
2407 continue # we never unlink added files on remove
2402 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2408 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2403 repo[None].forget(list)
2409 repo[None].forget(list)
2404 finally:
2410 finally:
2405 wlock.release()
2411 wlock.release()
2406
2412
2407 return ret
2413 return ret
2408
2414
2409 def cat(ui, repo, ctx, matcher, prefix, **opts):
2415 def cat(ui, repo, ctx, matcher, prefix, **opts):
2410 err = 1
2416 err = 1
2411
2417
2412 def write(path):
2418 def write(path):
2413 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2419 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2414 pathname=os.path.join(prefix, path))
2420 pathname=os.path.join(prefix, path))
2415 data = ctx[path].data()
2421 data = ctx[path].data()
2416 if opts.get('decode'):
2422 if opts.get('decode'):
2417 data = repo.wwritedata(path, data)
2423 data = repo.wwritedata(path, data)
2418 fp.write(data)
2424 fp.write(data)
2419 fp.close()
2425 fp.close()
2420
2426
2421 # Automation often uses hg cat on single files, so special case it
2427 # Automation often uses hg cat on single files, so special case it
2422 # for performance to avoid the cost of parsing the manifest.
2428 # for performance to avoid the cost of parsing the manifest.
2423 if len(matcher.files()) == 1 and not matcher.anypats():
2429 if len(matcher.files()) == 1 and not matcher.anypats():
2424 file = matcher.files()[0]
2430 file = matcher.files()[0]
2425 mf = repo.manifest
2431 mf = repo.manifest
2426 mfnode = ctx.manifestnode()
2432 mfnode = ctx.manifestnode()
2427 if mfnode and mf.find(mfnode, file)[0]:
2433 if mfnode and mf.find(mfnode, file)[0]:
2428 write(file)
2434 write(file)
2429 return 0
2435 return 0
2430
2436
2431 # Don't warn about "missing" files that are really in subrepos
2437 # Don't warn about "missing" files that are really in subrepos
2432 bad = matcher.bad
2438 bad = matcher.bad
2433
2439
2434 def badfn(path, msg):
2440 def badfn(path, msg):
2435 for subpath in ctx.substate:
2441 for subpath in ctx.substate:
2436 if path.startswith(subpath):
2442 if path.startswith(subpath):
2437 return
2443 return
2438 bad(path, msg)
2444 bad(path, msg)
2439
2445
2440 matcher.bad = badfn
2446 matcher.bad = badfn
2441
2447
2442 for abs in ctx.walk(matcher):
2448 for abs in ctx.walk(matcher):
2443 write(abs)
2449 write(abs)
2444 err = 0
2450 err = 0
2445
2451
2446 matcher.bad = bad
2452 matcher.bad = bad
2447
2453
2448 for subpath in sorted(ctx.substate):
2454 for subpath in sorted(ctx.substate):
2449 sub = ctx.sub(subpath)
2455 sub = ctx.sub(subpath)
2450 try:
2456 try:
2451 submatch = matchmod.narrowmatcher(subpath, matcher)
2457 submatch = matchmod.narrowmatcher(subpath, matcher)
2452
2458
2453 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2459 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2454 **opts):
2460 **opts):
2455 err = 0
2461 err = 0
2456 except error.RepoLookupError:
2462 except error.RepoLookupError:
2457 ui.status(_("skipping missing subrepository: %s\n")
2463 ui.status(_("skipping missing subrepository: %s\n")
2458 % os.path.join(prefix, subpath))
2464 % os.path.join(prefix, subpath))
2459
2465
2460 return err
2466 return err
2461
2467
2462 def commit(ui, repo, commitfunc, pats, opts):
2468 def commit(ui, repo, commitfunc, pats, opts):
2463 '''commit the specified files or all outstanding changes'''
2469 '''commit the specified files or all outstanding changes'''
2464 date = opts.get('date')
2470 date = opts.get('date')
2465 if date:
2471 if date:
2466 opts['date'] = util.parsedate(date)
2472 opts['date'] = util.parsedate(date)
2467 message = logmessage(ui, opts)
2473 message = logmessage(ui, opts)
2468 matcher = scmutil.match(repo[None], pats, opts)
2474 matcher = scmutil.match(repo[None], pats, opts)
2469
2475
2470 # extract addremove carefully -- this function can be called from a command
2476 # extract addremove carefully -- this function can be called from a command
2471 # that doesn't support addremove
2477 # that doesn't support addremove
2472 if opts.get('addremove'):
2478 if opts.get('addremove'):
2473 if scmutil.addremove(repo, matcher, "", opts) != 0:
2479 if scmutil.addremove(repo, matcher, "", opts) != 0:
2474 raise util.Abort(
2480 raise util.Abort(
2475 _("failed to mark all new/missing files as added/removed"))
2481 _("failed to mark all new/missing files as added/removed"))
2476
2482
2477 return commitfunc(ui, repo, message, matcher, opts)
2483 return commitfunc(ui, repo, message, matcher, opts)
2478
2484
2479 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2485 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2480 # amend will reuse the existing user if not specified, but the obsolete
2486 # amend will reuse the existing user if not specified, but the obsolete
2481 # marker creation requires that the current user's name is specified.
2487 # marker creation requires that the current user's name is specified.
2482 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2488 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2483 ui.username() # raise exception if username not set
2489 ui.username() # raise exception if username not set
2484
2490
2485 ui.note(_('amending changeset %s\n') % old)
2491 ui.note(_('amending changeset %s\n') % old)
2486 base = old.p1()
2492 base = old.p1()
2487
2493
2488 wlock = dsguard = lock = newid = None
2494 wlock = dsguard = lock = newid = None
2489 try:
2495 try:
2490 wlock = repo.wlock()
2496 wlock = repo.wlock()
2491 dsguard = dirstateguard(repo, 'amend')
2497 dsguard = dirstateguard(repo, 'amend')
2492 lock = repo.lock()
2498 lock = repo.lock()
2493 tr = repo.transaction('amend')
2499 tr = repo.transaction('amend')
2494 try:
2500 try:
2495 # See if we got a message from -m or -l, if not, open the editor
2501 # See if we got a message from -m or -l, if not, open the editor
2496 # with the message of the changeset to amend
2502 # with the message of the changeset to amend
2497 message = logmessage(ui, opts)
2503 message = logmessage(ui, opts)
2498 # ensure logfile does not conflict with later enforcement of the
2504 # ensure logfile does not conflict with later enforcement of the
2499 # message. potential logfile content has been processed by
2505 # message. potential logfile content has been processed by
2500 # `logmessage` anyway.
2506 # `logmessage` anyway.
2501 opts.pop('logfile')
2507 opts.pop('logfile')
2502 # First, do a regular commit to record all changes in the working
2508 # First, do a regular commit to record all changes in the working
2503 # directory (if there are any)
2509 # directory (if there are any)
2504 ui.callhooks = False
2510 ui.callhooks = False
2505 activebookmark = repo._activebookmark
2511 activebookmark = repo._activebookmark
2506 try:
2512 try:
2507 repo._activebookmark = None
2513 repo._activebookmark = None
2508 opts['message'] = 'temporary amend commit for %s' % old
2514 opts['message'] = 'temporary amend commit for %s' % old
2509 node = commit(ui, repo, commitfunc, pats, opts)
2515 node = commit(ui, repo, commitfunc, pats, opts)
2510 finally:
2516 finally:
2511 repo._activebookmark = activebookmark
2517 repo._activebookmark = activebookmark
2512 ui.callhooks = True
2518 ui.callhooks = True
2513 ctx = repo[node]
2519 ctx = repo[node]
2514
2520
2515 # Participating changesets:
2521 # Participating changesets:
2516 #
2522 #
2517 # node/ctx o - new (intermediate) commit that contains changes
2523 # node/ctx o - new (intermediate) commit that contains changes
2518 # | from working dir to go into amending commit
2524 # | from working dir to go into amending commit
2519 # | (or a workingctx if there were no changes)
2525 # | (or a workingctx if there were no changes)
2520 # |
2526 # |
2521 # old o - changeset to amend
2527 # old o - changeset to amend
2522 # |
2528 # |
2523 # base o - parent of amending changeset
2529 # base o - parent of amending changeset
2524
2530
2525 # Update extra dict from amended commit (e.g. to preserve graft
2531 # Update extra dict from amended commit (e.g. to preserve graft
2526 # source)
2532 # source)
2527 extra.update(old.extra())
2533 extra.update(old.extra())
2528
2534
2529 # Also update it from the intermediate commit or from the wctx
2535 # Also update it from the intermediate commit or from the wctx
2530 extra.update(ctx.extra())
2536 extra.update(ctx.extra())
2531
2537
2532 if len(old.parents()) > 1:
2538 if len(old.parents()) > 1:
2533 # ctx.files() isn't reliable for merges, so fall back to the
2539 # ctx.files() isn't reliable for merges, so fall back to the
2534 # slower repo.status() method
2540 # slower repo.status() method
2535 files = set([fn for st in repo.status(base, old)[:3]
2541 files = set([fn for st in repo.status(base, old)[:3]
2536 for fn in st])
2542 for fn in st])
2537 else:
2543 else:
2538 files = set(old.files())
2544 files = set(old.files())
2539
2545
2540 # Second, we use either the commit we just did, or if there were no
2546 # Second, we use either the commit we just did, or if there were no
2541 # changes the parent of the working directory as the version of the
2547 # changes the parent of the working directory as the version of the
2542 # files in the final amend commit
2548 # files in the final amend commit
2543 if node:
2549 if node:
2544 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2550 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2545
2551
2546 user = ctx.user()
2552 user = ctx.user()
2547 date = ctx.date()
2553 date = ctx.date()
2548 # Recompute copies (avoid recording a -> b -> a)
2554 # Recompute copies (avoid recording a -> b -> a)
2549 copied = copies.pathcopies(base, ctx)
2555 copied = copies.pathcopies(base, ctx)
2550 if old.p2:
2556 if old.p2:
2551 copied.update(copies.pathcopies(old.p2(), ctx))
2557 copied.update(copies.pathcopies(old.p2(), ctx))
2552
2558
2553 # Prune files which were reverted by the updates: if old
2559 # Prune files which were reverted by the updates: if old
2554 # introduced file X and our intermediate commit, node,
2560 # introduced file X and our intermediate commit, node,
2555 # renamed that file, then those two files are the same and
2561 # renamed that file, then those two files are the same and
2556 # we can discard X from our list of files. Likewise if X
2562 # we can discard X from our list of files. Likewise if X
2557 # was deleted, it's no longer relevant
2563 # was deleted, it's no longer relevant
2558 files.update(ctx.files())
2564 files.update(ctx.files())
2559
2565
2560 def samefile(f):
2566 def samefile(f):
2561 if f in ctx.manifest():
2567 if f in ctx.manifest():
2562 a = ctx.filectx(f)
2568 a = ctx.filectx(f)
2563 if f in base.manifest():
2569 if f in base.manifest():
2564 b = base.filectx(f)
2570 b = base.filectx(f)
2565 return (not a.cmp(b)
2571 return (not a.cmp(b)
2566 and a.flags() == b.flags())
2572 and a.flags() == b.flags())
2567 else:
2573 else:
2568 return False
2574 return False
2569 else:
2575 else:
2570 return f not in base.manifest()
2576 return f not in base.manifest()
2571 files = [f for f in files if not samefile(f)]
2577 files = [f for f in files if not samefile(f)]
2572
2578
2573 def filectxfn(repo, ctx_, path):
2579 def filectxfn(repo, ctx_, path):
2574 try:
2580 try:
2575 fctx = ctx[path]
2581 fctx = ctx[path]
2576 flags = fctx.flags()
2582 flags = fctx.flags()
2577 mctx = context.memfilectx(repo,
2583 mctx = context.memfilectx(repo,
2578 fctx.path(), fctx.data(),
2584 fctx.path(), fctx.data(),
2579 islink='l' in flags,
2585 islink='l' in flags,
2580 isexec='x' in flags,
2586 isexec='x' in flags,
2581 copied=copied.get(path))
2587 copied=copied.get(path))
2582 return mctx
2588 return mctx
2583 except KeyError:
2589 except KeyError:
2584 return None
2590 return None
2585 else:
2591 else:
2586 ui.note(_('copying changeset %s to %s\n') % (old, base))
2592 ui.note(_('copying changeset %s to %s\n') % (old, base))
2587
2593
2588 # Use version of files as in the old cset
2594 # Use version of files as in the old cset
2589 def filectxfn(repo, ctx_, path):
2595 def filectxfn(repo, ctx_, path):
2590 try:
2596 try:
2591 return old.filectx(path)
2597 return old.filectx(path)
2592 except KeyError:
2598 except KeyError:
2593 return None
2599 return None
2594
2600
2595 user = opts.get('user') or old.user()
2601 user = opts.get('user') or old.user()
2596 date = opts.get('date') or old.date()
2602 date = opts.get('date') or old.date()
2597 editform = mergeeditform(old, 'commit.amend')
2603 editform = mergeeditform(old, 'commit.amend')
2598 editor = getcommiteditor(editform=editform, **opts)
2604 editor = getcommiteditor(editform=editform, **opts)
2599 if not message:
2605 if not message:
2600 editor = getcommiteditor(edit=True, editform=editform)
2606 editor = getcommiteditor(edit=True, editform=editform)
2601 message = old.description()
2607 message = old.description()
2602
2608
2603 pureextra = extra.copy()
2609 pureextra = extra.copy()
2604 extra['amend_source'] = old.hex()
2610 extra['amend_source'] = old.hex()
2605
2611
2606 new = context.memctx(repo,
2612 new = context.memctx(repo,
2607 parents=[base.node(), old.p2().node()],
2613 parents=[base.node(), old.p2().node()],
2608 text=message,
2614 text=message,
2609 files=files,
2615 files=files,
2610 filectxfn=filectxfn,
2616 filectxfn=filectxfn,
2611 user=user,
2617 user=user,
2612 date=date,
2618 date=date,
2613 extra=extra,
2619 extra=extra,
2614 editor=editor)
2620 editor=editor)
2615
2621
2616 newdesc = changelog.stripdesc(new.description())
2622 newdesc = changelog.stripdesc(new.description())
2617 if ((not node)
2623 if ((not node)
2618 and newdesc == old.description()
2624 and newdesc == old.description()
2619 and user == old.user()
2625 and user == old.user()
2620 and date == old.date()
2626 and date == old.date()
2621 and pureextra == old.extra()):
2627 and pureextra == old.extra()):
2622 # nothing changed. continuing here would create a new node
2628 # nothing changed. continuing here would create a new node
2623 # anyway because of the amend_source noise.
2629 # anyway because of the amend_source noise.
2624 #
2630 #
2625 # This not what we expect from amend.
2631 # This not what we expect from amend.
2626 return old.node()
2632 return old.node()
2627
2633
2628 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2634 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2629 try:
2635 try:
2630 if opts.get('secret'):
2636 if opts.get('secret'):
2631 commitphase = 'secret'
2637 commitphase = 'secret'
2632 else:
2638 else:
2633 commitphase = old.phase()
2639 commitphase = old.phase()
2634 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2640 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2635 newid = repo.commitctx(new)
2641 newid = repo.commitctx(new)
2636 finally:
2642 finally:
2637 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2643 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2638 if newid != old.node():
2644 if newid != old.node():
2639 # Reroute the working copy parent to the new changeset
2645 # Reroute the working copy parent to the new changeset
2640 repo.setparents(newid, nullid)
2646 repo.setparents(newid, nullid)
2641
2647
2642 # Move bookmarks from old parent to amend commit
2648 # Move bookmarks from old parent to amend commit
2643 bms = repo.nodebookmarks(old.node())
2649 bms = repo.nodebookmarks(old.node())
2644 if bms:
2650 if bms:
2645 marks = repo._bookmarks
2651 marks = repo._bookmarks
2646 for bm in bms:
2652 for bm in bms:
2647 marks[bm] = newid
2653 marks[bm] = newid
2648 marks.write()
2654 marks.write()
2649 #commit the whole amend process
2655 #commit the whole amend process
2650 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2656 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2651 if createmarkers and newid != old.node():
2657 if createmarkers and newid != old.node():
2652 # mark the new changeset as successor of the rewritten one
2658 # mark the new changeset as successor of the rewritten one
2653 new = repo[newid]
2659 new = repo[newid]
2654 obs = [(old, (new,))]
2660 obs = [(old, (new,))]
2655 if node:
2661 if node:
2656 obs.append((ctx, ()))
2662 obs.append((ctx, ()))
2657
2663
2658 obsolete.createmarkers(repo, obs)
2664 obsolete.createmarkers(repo, obs)
2659 tr.close()
2665 tr.close()
2660 finally:
2666 finally:
2661 tr.release()
2667 tr.release()
2662 dsguard.close()
2668 dsguard.close()
2663 if not createmarkers and newid != old.node():
2669 if not createmarkers and newid != old.node():
2664 # Strip the intermediate commit (if there was one) and the amended
2670 # Strip the intermediate commit (if there was one) and the amended
2665 # commit
2671 # commit
2666 if node:
2672 if node:
2667 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2673 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2668 ui.note(_('stripping amended changeset %s\n') % old)
2674 ui.note(_('stripping amended changeset %s\n') % old)
2669 repair.strip(ui, repo, old.node(), topic='amend-backup')
2675 repair.strip(ui, repo, old.node(), topic='amend-backup')
2670 finally:
2676 finally:
2671 lockmod.release(lock, dsguard, wlock)
2677 lockmod.release(lock, dsguard, wlock)
2672 return newid
2678 return newid
2673
2679
2674 def commiteditor(repo, ctx, subs, editform=''):
2680 def commiteditor(repo, ctx, subs, editform=''):
2675 if ctx.description():
2681 if ctx.description():
2676 return ctx.description()
2682 return ctx.description()
2677 return commitforceeditor(repo, ctx, subs, editform=editform)
2683 return commitforceeditor(repo, ctx, subs, editform=editform)
2678
2684
2679 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2685 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2680 editform=''):
2686 editform=''):
2681 if not extramsg:
2687 if not extramsg:
2682 extramsg = _("Leave message empty to abort commit.")
2688 extramsg = _("Leave message empty to abort commit.")
2683
2689
2684 forms = [e for e in editform.split('.') if e]
2690 forms = [e for e in editform.split('.') if e]
2685 forms.insert(0, 'changeset')
2691 forms.insert(0, 'changeset')
2686 while forms:
2692 while forms:
2687 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2693 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2688 if tmpl:
2694 if tmpl:
2689 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2695 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2690 break
2696 break
2691 forms.pop()
2697 forms.pop()
2692 else:
2698 else:
2693 committext = buildcommittext(repo, ctx, subs, extramsg)
2699 committext = buildcommittext(repo, ctx, subs, extramsg)
2694
2700
2695 # run editor in the repository root
2701 # run editor in the repository root
2696 olddir = os.getcwd()
2702 olddir = os.getcwd()
2697 os.chdir(repo.root)
2703 os.chdir(repo.root)
2698 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2704 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2699 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2705 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2700 os.chdir(olddir)
2706 os.chdir(olddir)
2701
2707
2702 if finishdesc:
2708 if finishdesc:
2703 text = finishdesc(text)
2709 text = finishdesc(text)
2704 if not text.strip():
2710 if not text.strip():
2705 raise util.Abort(_("empty commit message"))
2711 raise util.Abort(_("empty commit message"))
2706
2712
2707 return text
2713 return text
2708
2714
2709 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2715 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2710 ui = repo.ui
2716 ui = repo.ui
2711 tmpl, mapfile = gettemplate(ui, tmpl, None)
2717 tmpl, mapfile = gettemplate(ui, tmpl, None)
2712
2718
2713 try:
2719 try:
2714 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2720 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2715 except SyntaxError, inst:
2721 except SyntaxError, inst:
2716 raise util.Abort(inst.args[0])
2722 raise util.Abort(inst.args[0])
2717
2723
2718 for k, v in repo.ui.configitems('committemplate'):
2724 for k, v in repo.ui.configitems('committemplate'):
2719 if k != 'changeset':
2725 if k != 'changeset':
2720 t.t.cache[k] = v
2726 t.t.cache[k] = v
2721
2727
2722 if not extramsg:
2728 if not extramsg:
2723 extramsg = '' # ensure that extramsg is string
2729 extramsg = '' # ensure that extramsg is string
2724
2730
2725 ui.pushbuffer()
2731 ui.pushbuffer()
2726 t.show(ctx, extramsg=extramsg)
2732 t.show(ctx, extramsg=extramsg)
2727 return ui.popbuffer()
2733 return ui.popbuffer()
2728
2734
2729 def buildcommittext(repo, ctx, subs, extramsg):
2735 def buildcommittext(repo, ctx, subs, extramsg):
2730 edittext = []
2736 edittext = []
2731 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2737 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2732 if ctx.description():
2738 if ctx.description():
2733 edittext.append(ctx.description())
2739 edittext.append(ctx.description())
2734 edittext.append("")
2740 edittext.append("")
2735 edittext.append("") # Empty line between message and comments.
2741 edittext.append("") # Empty line between message and comments.
2736 edittext.append(_("HG: Enter commit message."
2742 edittext.append(_("HG: Enter commit message."
2737 " Lines beginning with 'HG:' are removed."))
2743 " Lines beginning with 'HG:' are removed."))
2738 edittext.append("HG: %s" % extramsg)
2744 edittext.append("HG: %s" % extramsg)
2739 edittext.append("HG: --")
2745 edittext.append("HG: --")
2740 edittext.append(_("HG: user: %s") % ctx.user())
2746 edittext.append(_("HG: user: %s") % ctx.user())
2741 if ctx.p2():
2747 if ctx.p2():
2742 edittext.append(_("HG: branch merge"))
2748 edittext.append(_("HG: branch merge"))
2743 if ctx.branch():
2749 if ctx.branch():
2744 edittext.append(_("HG: branch '%s'") % ctx.branch())
2750 edittext.append(_("HG: branch '%s'") % ctx.branch())
2745 if bookmarks.isactivewdirparent(repo):
2751 if bookmarks.isactivewdirparent(repo):
2746 edittext.append(_("HG: bookmark '%s'") % repo._activebookmark)
2752 edittext.append(_("HG: bookmark '%s'") % repo._activebookmark)
2747 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2753 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2748 edittext.extend([_("HG: added %s") % f for f in added])
2754 edittext.extend([_("HG: added %s") % f for f in added])
2749 edittext.extend([_("HG: changed %s") % f for f in modified])
2755 edittext.extend([_("HG: changed %s") % f for f in modified])
2750 edittext.extend([_("HG: removed %s") % f for f in removed])
2756 edittext.extend([_("HG: removed %s") % f for f in removed])
2751 if not added and not modified and not removed:
2757 if not added and not modified and not removed:
2752 edittext.append(_("HG: no files changed"))
2758 edittext.append(_("HG: no files changed"))
2753 edittext.append("")
2759 edittext.append("")
2754
2760
2755 return "\n".join(edittext)
2761 return "\n".join(edittext)
2756
2762
2757 def commitstatus(repo, node, branch, bheads=None, opts={}):
2763 def commitstatus(repo, node, branch, bheads=None, opts={}):
2758 ctx = repo[node]
2764 ctx = repo[node]
2759 parents = ctx.parents()
2765 parents = ctx.parents()
2760
2766
2761 if (not opts.get('amend') and bheads and node not in bheads and not
2767 if (not opts.get('amend') and bheads and node not in bheads and not
2762 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2768 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2763 repo.ui.status(_('created new head\n'))
2769 repo.ui.status(_('created new head\n'))
2764 # The message is not printed for initial roots. For the other
2770 # The message is not printed for initial roots. For the other
2765 # changesets, it is printed in the following situations:
2771 # changesets, it is printed in the following situations:
2766 #
2772 #
2767 # Par column: for the 2 parents with ...
2773 # Par column: for the 2 parents with ...
2768 # N: null or no parent
2774 # N: null or no parent
2769 # B: parent is on another named branch
2775 # B: parent is on another named branch
2770 # C: parent is a regular non head changeset
2776 # C: parent is a regular non head changeset
2771 # H: parent was a branch head of the current branch
2777 # H: parent was a branch head of the current branch
2772 # Msg column: whether we print "created new head" message
2778 # Msg column: whether we print "created new head" message
2773 # In the following, it is assumed that there already exists some
2779 # In the following, it is assumed that there already exists some
2774 # initial branch heads of the current branch, otherwise nothing is
2780 # initial branch heads of the current branch, otherwise nothing is
2775 # printed anyway.
2781 # printed anyway.
2776 #
2782 #
2777 # Par Msg Comment
2783 # Par Msg Comment
2778 # N N y additional topo root
2784 # N N y additional topo root
2779 #
2785 #
2780 # B N y additional branch root
2786 # B N y additional branch root
2781 # C N y additional topo head
2787 # C N y additional topo head
2782 # H N n usual case
2788 # H N n usual case
2783 #
2789 #
2784 # B B y weird additional branch root
2790 # B B y weird additional branch root
2785 # C B y branch merge
2791 # C B y branch merge
2786 # H B n merge with named branch
2792 # H B n merge with named branch
2787 #
2793 #
2788 # C C y additional head from merge
2794 # C C y additional head from merge
2789 # C H n merge with a head
2795 # C H n merge with a head
2790 #
2796 #
2791 # H H n head merge: head count decreases
2797 # H H n head merge: head count decreases
2792
2798
2793 if not opts.get('close_branch'):
2799 if not opts.get('close_branch'):
2794 for r in parents:
2800 for r in parents:
2795 if r.closesbranch() and r.branch() == branch:
2801 if r.closesbranch() and r.branch() == branch:
2796 repo.ui.status(_('reopening closed branch head %d\n') % r)
2802 repo.ui.status(_('reopening closed branch head %d\n') % r)
2797
2803
2798 if repo.ui.debugflag:
2804 if repo.ui.debugflag:
2799 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2805 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2800 elif repo.ui.verbose:
2806 elif repo.ui.verbose:
2801 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2807 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2802
2808
2803 def revert(ui, repo, ctx, parents, *pats, **opts):
2809 def revert(ui, repo, ctx, parents, *pats, **opts):
2804 parent, p2 = parents
2810 parent, p2 = parents
2805 node = ctx.node()
2811 node = ctx.node()
2806
2812
2807 mf = ctx.manifest()
2813 mf = ctx.manifest()
2808 if node == p2:
2814 if node == p2:
2809 parent = p2
2815 parent = p2
2810 if node == parent:
2816 if node == parent:
2811 pmf = mf
2817 pmf = mf
2812 else:
2818 else:
2813 pmf = None
2819 pmf = None
2814
2820
2815 # need all matching names in dirstate and manifest of target rev,
2821 # need all matching names in dirstate and manifest of target rev,
2816 # so have to walk both. do not print errors if files exist in one
2822 # so have to walk both. do not print errors if files exist in one
2817 # but not other. in both cases, filesets should be evaluated against
2823 # but not other. in both cases, filesets should be evaluated against
2818 # workingctx to get consistent result (issue4497). this means 'set:**'
2824 # workingctx to get consistent result (issue4497). this means 'set:**'
2819 # cannot be used to select missing files from target rev.
2825 # cannot be used to select missing files from target rev.
2820
2826
2821 # `names` is a mapping for all elements in working copy and target revision
2827 # `names` is a mapping for all elements in working copy and target revision
2822 # The mapping is in the form:
2828 # The mapping is in the form:
2823 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2829 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2824 names = {}
2830 names = {}
2825
2831
2826 wlock = repo.wlock()
2832 wlock = repo.wlock()
2827 try:
2833 try:
2828 ## filling of the `names` mapping
2834 ## filling of the `names` mapping
2829 # walk dirstate to fill `names`
2835 # walk dirstate to fill `names`
2830
2836
2831 interactive = opts.get('interactive', False)
2837 interactive = opts.get('interactive', False)
2832 wctx = repo[None]
2838 wctx = repo[None]
2833 m = scmutil.match(wctx, pats, opts)
2839 m = scmutil.match(wctx, pats, opts)
2834
2840
2835 # we'll need this later
2841 # we'll need this later
2836 targetsubs = sorted(s for s in wctx.substate if m(s))
2842 targetsubs = sorted(s for s in wctx.substate if m(s))
2837
2843
2838 if not m.always():
2844 if not m.always():
2839 m.bad = lambda x, y: False
2845 m.bad = lambda x, y: False
2840 for abs in repo.walk(m):
2846 for abs in repo.walk(m):
2841 names[abs] = m.rel(abs), m.exact(abs)
2847 names[abs] = m.rel(abs), m.exact(abs)
2842
2848
2843 # walk target manifest to fill `names`
2849 # walk target manifest to fill `names`
2844
2850
2845 def badfn(path, msg):
2851 def badfn(path, msg):
2846 if path in names:
2852 if path in names:
2847 return
2853 return
2848 if path in ctx.substate:
2854 if path in ctx.substate:
2849 return
2855 return
2850 path_ = path + '/'
2856 path_ = path + '/'
2851 for f in names:
2857 for f in names:
2852 if f.startswith(path_):
2858 if f.startswith(path_):
2853 return
2859 return
2854 ui.warn("%s: %s\n" % (m.rel(path), msg))
2860 ui.warn("%s: %s\n" % (m.rel(path), msg))
2855
2861
2856 m.bad = badfn
2862 m.bad = badfn
2857 for abs in ctx.walk(m):
2863 for abs in ctx.walk(m):
2858 if abs not in names:
2864 if abs not in names:
2859 names[abs] = m.rel(abs), m.exact(abs)
2865 names[abs] = m.rel(abs), m.exact(abs)
2860
2866
2861 # Find status of all file in `names`.
2867 # Find status of all file in `names`.
2862 m = scmutil.matchfiles(repo, names)
2868 m = scmutil.matchfiles(repo, names)
2863
2869
2864 changes = repo.status(node1=node, match=m,
2870 changes = repo.status(node1=node, match=m,
2865 unknown=True, ignored=True, clean=True)
2871 unknown=True, ignored=True, clean=True)
2866 else:
2872 else:
2867 changes = repo.status(node1=node, match=m)
2873 changes = repo.status(node1=node, match=m)
2868 for kind in changes:
2874 for kind in changes:
2869 for abs in kind:
2875 for abs in kind:
2870 names[abs] = m.rel(abs), m.exact(abs)
2876 names[abs] = m.rel(abs), m.exact(abs)
2871
2877
2872 m = scmutil.matchfiles(repo, names)
2878 m = scmutil.matchfiles(repo, names)
2873
2879
2874 modified = set(changes.modified)
2880 modified = set(changes.modified)
2875 added = set(changes.added)
2881 added = set(changes.added)
2876 removed = set(changes.removed)
2882 removed = set(changes.removed)
2877 _deleted = set(changes.deleted)
2883 _deleted = set(changes.deleted)
2878 unknown = set(changes.unknown)
2884 unknown = set(changes.unknown)
2879 unknown.update(changes.ignored)
2885 unknown.update(changes.ignored)
2880 clean = set(changes.clean)
2886 clean = set(changes.clean)
2881 modadded = set()
2887 modadded = set()
2882
2888
2883 # split between files known in target manifest and the others
2889 # split between files known in target manifest and the others
2884 smf = set(mf)
2890 smf = set(mf)
2885
2891
2886 # determine the exact nature of the deleted changesets
2892 # determine the exact nature of the deleted changesets
2887 deladded = _deleted - smf
2893 deladded = _deleted - smf
2888 deleted = _deleted - deladded
2894 deleted = _deleted - deladded
2889
2895
2890 # We need to account for the state of the file in the dirstate,
2896 # We need to account for the state of the file in the dirstate,
2891 # even when we revert against something else than parent. This will
2897 # even when we revert against something else than parent. This will
2892 # slightly alter the behavior of revert (doing back up or not, delete
2898 # slightly alter the behavior of revert (doing back up or not, delete
2893 # or just forget etc).
2899 # or just forget etc).
2894 if parent == node:
2900 if parent == node:
2895 dsmodified = modified
2901 dsmodified = modified
2896 dsadded = added
2902 dsadded = added
2897 dsremoved = removed
2903 dsremoved = removed
2898 # store all local modifications, useful later for rename detection
2904 # store all local modifications, useful later for rename detection
2899 localchanges = dsmodified | dsadded
2905 localchanges = dsmodified | dsadded
2900 modified, added, removed = set(), set(), set()
2906 modified, added, removed = set(), set(), set()
2901 else:
2907 else:
2902 changes = repo.status(node1=parent, match=m)
2908 changes = repo.status(node1=parent, match=m)
2903 dsmodified = set(changes.modified)
2909 dsmodified = set(changes.modified)
2904 dsadded = set(changes.added)
2910 dsadded = set(changes.added)
2905 dsremoved = set(changes.removed)
2911 dsremoved = set(changes.removed)
2906 # store all local modifications, useful later for rename detection
2912 # store all local modifications, useful later for rename detection
2907 localchanges = dsmodified | dsadded
2913 localchanges = dsmodified | dsadded
2908
2914
2909 # only take into account for removes between wc and target
2915 # only take into account for removes between wc and target
2910 clean |= dsremoved - removed
2916 clean |= dsremoved - removed
2911 dsremoved &= removed
2917 dsremoved &= removed
2912 # distinct between dirstate remove and other
2918 # distinct between dirstate remove and other
2913 removed -= dsremoved
2919 removed -= dsremoved
2914
2920
2915 modadded = added & dsmodified
2921 modadded = added & dsmodified
2916 added -= modadded
2922 added -= modadded
2917
2923
2918 # tell newly modified apart.
2924 # tell newly modified apart.
2919 dsmodified &= modified
2925 dsmodified &= modified
2920 dsmodified |= modified & dsadded # dirstate added may needs backup
2926 dsmodified |= modified & dsadded # dirstate added may needs backup
2921 modified -= dsmodified
2927 modified -= dsmodified
2922
2928
2923 # We need to wait for some post-processing to update this set
2929 # We need to wait for some post-processing to update this set
2924 # before making the distinction. The dirstate will be used for
2930 # before making the distinction. The dirstate will be used for
2925 # that purpose.
2931 # that purpose.
2926 dsadded = added
2932 dsadded = added
2927
2933
2928 # in case of merge, files that are actually added can be reported as
2934 # in case of merge, files that are actually added can be reported as
2929 # modified, we need to post process the result
2935 # modified, we need to post process the result
2930 if p2 != nullid:
2936 if p2 != nullid:
2931 if pmf is None:
2937 if pmf is None:
2932 # only need parent manifest in the merge case,
2938 # only need parent manifest in the merge case,
2933 # so do not read by default
2939 # so do not read by default
2934 pmf = repo[parent].manifest()
2940 pmf = repo[parent].manifest()
2935 mergeadd = dsmodified - set(pmf)
2941 mergeadd = dsmodified - set(pmf)
2936 dsadded |= mergeadd
2942 dsadded |= mergeadd
2937 dsmodified -= mergeadd
2943 dsmodified -= mergeadd
2938
2944
2939 # if f is a rename, update `names` to also revert the source
2945 # if f is a rename, update `names` to also revert the source
2940 cwd = repo.getcwd()
2946 cwd = repo.getcwd()
2941 for f in localchanges:
2947 for f in localchanges:
2942 src = repo.dirstate.copied(f)
2948 src = repo.dirstate.copied(f)
2943 # XXX should we check for rename down to target node?
2949 # XXX should we check for rename down to target node?
2944 if src and src not in names and repo.dirstate[src] == 'r':
2950 if src and src not in names and repo.dirstate[src] == 'r':
2945 dsremoved.add(src)
2951 dsremoved.add(src)
2946 names[src] = (repo.pathto(src, cwd), True)
2952 names[src] = (repo.pathto(src, cwd), True)
2947
2953
2948 # distinguish between file to forget and the other
2954 # distinguish between file to forget and the other
2949 added = set()
2955 added = set()
2950 for abs in dsadded:
2956 for abs in dsadded:
2951 if repo.dirstate[abs] != 'a':
2957 if repo.dirstate[abs] != 'a':
2952 added.add(abs)
2958 added.add(abs)
2953 dsadded -= added
2959 dsadded -= added
2954
2960
2955 for abs in deladded:
2961 for abs in deladded:
2956 if repo.dirstate[abs] == 'a':
2962 if repo.dirstate[abs] == 'a':
2957 dsadded.add(abs)
2963 dsadded.add(abs)
2958 deladded -= dsadded
2964 deladded -= dsadded
2959
2965
2960 # For files marked as removed, we check if an unknown file is present at
2966 # For files marked as removed, we check if an unknown file is present at
2961 # the same path. If a such file exists it may need to be backed up.
2967 # the same path. If a such file exists it may need to be backed up.
2962 # Making the distinction at this stage helps have simpler backup
2968 # Making the distinction at this stage helps have simpler backup
2963 # logic.
2969 # logic.
2964 removunk = set()
2970 removunk = set()
2965 for abs in removed:
2971 for abs in removed:
2966 target = repo.wjoin(abs)
2972 target = repo.wjoin(abs)
2967 if os.path.lexists(target):
2973 if os.path.lexists(target):
2968 removunk.add(abs)
2974 removunk.add(abs)
2969 removed -= removunk
2975 removed -= removunk
2970
2976
2971 dsremovunk = set()
2977 dsremovunk = set()
2972 for abs in dsremoved:
2978 for abs in dsremoved:
2973 target = repo.wjoin(abs)
2979 target = repo.wjoin(abs)
2974 if os.path.lexists(target):
2980 if os.path.lexists(target):
2975 dsremovunk.add(abs)
2981 dsremovunk.add(abs)
2976 dsremoved -= dsremovunk
2982 dsremoved -= dsremovunk
2977
2983
2978 # action to be actually performed by revert
2984 # action to be actually performed by revert
2979 # (<list of file>, message>) tuple
2985 # (<list of file>, message>) tuple
2980 actions = {'revert': ([], _('reverting %s\n')),
2986 actions = {'revert': ([], _('reverting %s\n')),
2981 'add': ([], _('adding %s\n')),
2987 'add': ([], _('adding %s\n')),
2982 'remove': ([], _('removing %s\n')),
2988 'remove': ([], _('removing %s\n')),
2983 'drop': ([], _('removing %s\n')),
2989 'drop': ([], _('removing %s\n')),
2984 'forget': ([], _('forgetting %s\n')),
2990 'forget': ([], _('forgetting %s\n')),
2985 'undelete': ([], _('undeleting %s\n')),
2991 'undelete': ([], _('undeleting %s\n')),
2986 'noop': (None, _('no changes needed to %s\n')),
2992 'noop': (None, _('no changes needed to %s\n')),
2987 'unknown': (None, _('file not managed: %s\n')),
2993 'unknown': (None, _('file not managed: %s\n')),
2988 }
2994 }
2989
2995
2990 # "constant" that convey the backup strategy.
2996 # "constant" that convey the backup strategy.
2991 # All set to `discard` if `no-backup` is set do avoid checking
2997 # All set to `discard` if `no-backup` is set do avoid checking
2992 # no_backup lower in the code.
2998 # no_backup lower in the code.
2993 # These values are ordered for comparison purposes
2999 # These values are ordered for comparison purposes
2994 backup = 2 # unconditionally do backup
3000 backup = 2 # unconditionally do backup
2995 check = 1 # check if the existing file differs from target
3001 check = 1 # check if the existing file differs from target
2996 discard = 0 # never do backup
3002 discard = 0 # never do backup
2997 if opts.get('no_backup'):
3003 if opts.get('no_backup'):
2998 backup = check = discard
3004 backup = check = discard
2999
3005
3000 backupanddel = actions['remove']
3006 backupanddel = actions['remove']
3001 if not opts.get('no_backup'):
3007 if not opts.get('no_backup'):
3002 backupanddel = actions['drop']
3008 backupanddel = actions['drop']
3003
3009
3004 disptable = (
3010 disptable = (
3005 # dispatch table:
3011 # dispatch table:
3006 # file state
3012 # file state
3007 # action
3013 # action
3008 # make backup
3014 # make backup
3009
3015
3010 ## Sets that results that will change file on disk
3016 ## Sets that results that will change file on disk
3011 # Modified compared to target, no local change
3017 # Modified compared to target, no local change
3012 (modified, actions['revert'], discard),
3018 (modified, actions['revert'], discard),
3013 # Modified compared to target, but local file is deleted
3019 # Modified compared to target, but local file is deleted
3014 (deleted, actions['revert'], discard),
3020 (deleted, actions['revert'], discard),
3015 # Modified compared to target, local change
3021 # Modified compared to target, local change
3016 (dsmodified, actions['revert'], backup),
3022 (dsmodified, actions['revert'], backup),
3017 # Added since target
3023 # Added since target
3018 (added, actions['remove'], discard),
3024 (added, actions['remove'], discard),
3019 # Added in working directory
3025 # Added in working directory
3020 (dsadded, actions['forget'], discard),
3026 (dsadded, actions['forget'], discard),
3021 # Added since target, have local modification
3027 # Added since target, have local modification
3022 (modadded, backupanddel, backup),
3028 (modadded, backupanddel, backup),
3023 # Added since target but file is missing in working directory
3029 # Added since target but file is missing in working directory
3024 (deladded, actions['drop'], discard),
3030 (deladded, actions['drop'], discard),
3025 # Removed since target, before working copy parent
3031 # Removed since target, before working copy parent
3026 (removed, actions['add'], discard),
3032 (removed, actions['add'], discard),
3027 # Same as `removed` but an unknown file exists at the same path
3033 # Same as `removed` but an unknown file exists at the same path
3028 (removunk, actions['add'], check),
3034 (removunk, actions['add'], check),
3029 # Removed since targe, marked as such in working copy parent
3035 # Removed since targe, marked as such in working copy parent
3030 (dsremoved, actions['undelete'], discard),
3036 (dsremoved, actions['undelete'], discard),
3031 # Same as `dsremoved` but an unknown file exists at the same path
3037 # Same as `dsremoved` but an unknown file exists at the same path
3032 (dsremovunk, actions['undelete'], check),
3038 (dsremovunk, actions['undelete'], check),
3033 ## the following sets does not result in any file changes
3039 ## the following sets does not result in any file changes
3034 # File with no modification
3040 # File with no modification
3035 (clean, actions['noop'], discard),
3041 (clean, actions['noop'], discard),
3036 # Existing file, not tracked anywhere
3042 # Existing file, not tracked anywhere
3037 (unknown, actions['unknown'], discard),
3043 (unknown, actions['unknown'], discard),
3038 )
3044 )
3039
3045
3040 for abs, (rel, exact) in sorted(names.items()):
3046 for abs, (rel, exact) in sorted(names.items()):
3041 # target file to be touch on disk (relative to cwd)
3047 # target file to be touch on disk (relative to cwd)
3042 target = repo.wjoin(abs)
3048 target = repo.wjoin(abs)
3043 # search the entry in the dispatch table.
3049 # search the entry in the dispatch table.
3044 # if the file is in any of these sets, it was touched in the working
3050 # if the file is in any of these sets, it was touched in the working
3045 # directory parent and we are sure it needs to be reverted.
3051 # directory parent and we are sure it needs to be reverted.
3046 for table, (xlist, msg), dobackup in disptable:
3052 for table, (xlist, msg), dobackup in disptable:
3047 if abs not in table:
3053 if abs not in table:
3048 continue
3054 continue
3049 if xlist is not None:
3055 if xlist is not None:
3050 xlist.append(abs)
3056 xlist.append(abs)
3051 if dobackup and (backup <= dobackup
3057 if dobackup and (backup <= dobackup
3052 or wctx[abs].cmp(ctx[abs])):
3058 or wctx[abs].cmp(ctx[abs])):
3053 bakname = "%s.orig" % rel
3059 bakname = "%s.orig" % rel
3054 ui.note(_('saving current version of %s as %s\n') %
3060 ui.note(_('saving current version of %s as %s\n') %
3055 (rel, bakname))
3061 (rel, bakname))
3056 if not opts.get('dry_run'):
3062 if not opts.get('dry_run'):
3057 if interactive:
3063 if interactive:
3058 util.copyfile(target, bakname)
3064 util.copyfile(target, bakname)
3059 else:
3065 else:
3060 util.rename(target, bakname)
3066 util.rename(target, bakname)
3061 if ui.verbose or not exact:
3067 if ui.verbose or not exact:
3062 if not isinstance(msg, basestring):
3068 if not isinstance(msg, basestring):
3063 msg = msg(abs)
3069 msg = msg(abs)
3064 ui.status(msg % rel)
3070 ui.status(msg % rel)
3065 elif exact:
3071 elif exact:
3066 ui.warn(msg % rel)
3072 ui.warn(msg % rel)
3067 break
3073 break
3068
3074
3069 if not opts.get('dry_run'):
3075 if not opts.get('dry_run'):
3070 needdata = ('revert', 'add', 'undelete')
3076 needdata = ('revert', 'add', 'undelete')
3071 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3077 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3072 _performrevert(repo, parents, ctx, actions, interactive)
3078 _performrevert(repo, parents, ctx, actions, interactive)
3073
3079
3074 if targetsubs:
3080 if targetsubs:
3075 # Revert the subrepos on the revert list
3081 # Revert the subrepos on the revert list
3076 for sub in targetsubs:
3082 for sub in targetsubs:
3077 try:
3083 try:
3078 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3084 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3079 except KeyError:
3085 except KeyError:
3080 raise util.Abort("subrepository '%s' does not exist in %s!"
3086 raise util.Abort("subrepository '%s' does not exist in %s!"
3081 % (sub, short(ctx.node())))
3087 % (sub, short(ctx.node())))
3082 finally:
3088 finally:
3083 wlock.release()
3089 wlock.release()
3084
3090
3085 def _revertprefetch(repo, ctx, *files):
3091 def _revertprefetch(repo, ctx, *files):
3086 """Let extension changing the storage layer prefetch content"""
3092 """Let extension changing the storage layer prefetch content"""
3087 pass
3093 pass
3088
3094
3089 def _performrevert(repo, parents, ctx, actions, interactive=False):
3095 def _performrevert(repo, parents, ctx, actions, interactive=False):
3090 """function that actually perform all the actions computed for revert
3096 """function that actually perform all the actions computed for revert
3091
3097
3092 This is an independent function to let extension to plug in and react to
3098 This is an independent function to let extension to plug in and react to
3093 the imminent revert.
3099 the imminent revert.
3094
3100
3095 Make sure you have the working directory locked when calling this function.
3101 Make sure you have the working directory locked when calling this function.
3096 """
3102 """
3097 parent, p2 = parents
3103 parent, p2 = parents
3098 node = ctx.node()
3104 node = ctx.node()
3099 def checkout(f):
3105 def checkout(f):
3100 fc = ctx[f]
3106 fc = ctx[f]
3101 return repo.wwrite(f, fc.data(), fc.flags())
3107 return repo.wwrite(f, fc.data(), fc.flags())
3102
3108
3103 audit_path = pathutil.pathauditor(repo.root)
3109 audit_path = pathutil.pathauditor(repo.root)
3104 for f in actions['forget'][0]:
3110 for f in actions['forget'][0]:
3105 repo.dirstate.drop(f)
3111 repo.dirstate.drop(f)
3106 for f in actions['remove'][0]:
3112 for f in actions['remove'][0]:
3107 audit_path(f)
3113 audit_path(f)
3108 try:
3114 try:
3109 util.unlinkpath(repo.wjoin(f))
3115 util.unlinkpath(repo.wjoin(f))
3110 except OSError:
3116 except OSError:
3111 pass
3117 pass
3112 repo.dirstate.remove(f)
3118 repo.dirstate.remove(f)
3113 for f in actions['drop'][0]:
3119 for f in actions['drop'][0]:
3114 audit_path(f)
3120 audit_path(f)
3115 repo.dirstate.remove(f)
3121 repo.dirstate.remove(f)
3116
3122
3117 normal = None
3123 normal = None
3118 if node == parent:
3124 if node == parent:
3119 # We're reverting to our parent. If possible, we'd like status
3125 # We're reverting to our parent. If possible, we'd like status
3120 # to report the file as clean. We have to use normallookup for
3126 # to report the file as clean. We have to use normallookup for
3121 # merges to avoid losing information about merged/dirty files.
3127 # merges to avoid losing information about merged/dirty files.
3122 if p2 != nullid:
3128 if p2 != nullid:
3123 normal = repo.dirstate.normallookup
3129 normal = repo.dirstate.normallookup
3124 else:
3130 else:
3125 normal = repo.dirstate.normal
3131 normal = repo.dirstate.normal
3126
3132
3127 newlyaddedandmodifiedfiles = set()
3133 newlyaddedandmodifiedfiles = set()
3128 if interactive:
3134 if interactive:
3129 # Prompt the user for changes to revert
3135 # Prompt the user for changes to revert
3130 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3136 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3131 m = scmutil.match(ctx, torevert, {})
3137 m = scmutil.match(ctx, torevert, {})
3132 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3138 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3133 diffopts.nodates = True
3139 diffopts.nodates = True
3134 diffopts.git = True
3140 diffopts.git = True
3135 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3141 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3136 originalchunks = patch.parsepatch(diff)
3142 originalchunks = patch.parsepatch(diff)
3137 try:
3143 try:
3138 chunks = recordfilter(repo.ui, originalchunks)
3144 chunks = recordfilter(repo.ui, originalchunks)
3139 except patch.PatchError, err:
3145 except patch.PatchError, err:
3140 raise util.Abort(_('error parsing patch: %s') % err)
3146 raise util.Abort(_('error parsing patch: %s') % err)
3141
3147
3142 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3148 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3143 # Apply changes
3149 # Apply changes
3144 fp = cStringIO.StringIO()
3150 fp = cStringIO.StringIO()
3145 for c in chunks:
3151 for c in chunks:
3146 c.write(fp)
3152 c.write(fp)
3147 dopatch = fp.tell()
3153 dopatch = fp.tell()
3148 fp.seek(0)
3154 fp.seek(0)
3149 if dopatch:
3155 if dopatch:
3150 try:
3156 try:
3151 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3157 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3152 except patch.PatchError, err:
3158 except patch.PatchError, err:
3153 raise util.Abort(str(err))
3159 raise util.Abort(str(err))
3154 del fp
3160 del fp
3155 else:
3161 else:
3156 for f in actions['revert'][0]:
3162 for f in actions['revert'][0]:
3157 wsize = checkout(f)
3163 wsize = checkout(f)
3158 if normal:
3164 if normal:
3159 normal(f)
3165 normal(f)
3160 elif wsize == repo.dirstate._map[f][2]:
3166 elif wsize == repo.dirstate._map[f][2]:
3161 # changes may be overlooked without normallookup,
3167 # changes may be overlooked without normallookup,
3162 # if size isn't changed at reverting
3168 # if size isn't changed at reverting
3163 repo.dirstate.normallookup(f)
3169 repo.dirstate.normallookup(f)
3164
3170
3165 for f in actions['add'][0]:
3171 for f in actions['add'][0]:
3166 # Don't checkout modified files, they are already created by the diff
3172 # Don't checkout modified files, they are already created by the diff
3167 if f not in newlyaddedandmodifiedfiles:
3173 if f not in newlyaddedandmodifiedfiles:
3168 checkout(f)
3174 checkout(f)
3169 repo.dirstate.add(f)
3175 repo.dirstate.add(f)
3170
3176
3171 normal = repo.dirstate.normallookup
3177 normal = repo.dirstate.normallookup
3172 if node == parent and p2 == nullid:
3178 if node == parent and p2 == nullid:
3173 normal = repo.dirstate.normal
3179 normal = repo.dirstate.normal
3174 for f in actions['undelete'][0]:
3180 for f in actions['undelete'][0]:
3175 checkout(f)
3181 checkout(f)
3176 normal(f)
3182 normal(f)
3177
3183
3178 copied = copies.pathcopies(repo[parent], ctx)
3184 copied = copies.pathcopies(repo[parent], ctx)
3179
3185
3180 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3186 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3181 if f in copied:
3187 if f in copied:
3182 repo.dirstate.copy(copied[f], f)
3188 repo.dirstate.copy(copied[f], f)
3183
3189
3184 def command(table):
3190 def command(table):
3185 """Returns a function object to be used as a decorator for making commands.
3191 """Returns a function object to be used as a decorator for making commands.
3186
3192
3187 This function receives a command table as its argument. The table should
3193 This function receives a command table as its argument. The table should
3188 be a dict.
3194 be a dict.
3189
3195
3190 The returned function can be used as a decorator for adding commands
3196 The returned function can be used as a decorator for adding commands
3191 to that command table. This function accepts multiple arguments to define
3197 to that command table. This function accepts multiple arguments to define
3192 a command.
3198 a command.
3193
3199
3194 The first argument is the command name.
3200 The first argument is the command name.
3195
3201
3196 The options argument is an iterable of tuples defining command arguments.
3202 The options argument is an iterable of tuples defining command arguments.
3197 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3203 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3198
3204
3199 The synopsis argument defines a short, one line summary of how to use the
3205 The synopsis argument defines a short, one line summary of how to use the
3200 command. This shows up in the help output.
3206 command. This shows up in the help output.
3201
3207
3202 The norepo argument defines whether the command does not require a
3208 The norepo argument defines whether the command does not require a
3203 local repository. Most commands operate against a repository, thus the
3209 local repository. Most commands operate against a repository, thus the
3204 default is False.
3210 default is False.
3205
3211
3206 The optionalrepo argument defines whether the command optionally requires
3212 The optionalrepo argument defines whether the command optionally requires
3207 a local repository.
3213 a local repository.
3208
3214
3209 The inferrepo argument defines whether to try to find a repository from the
3215 The inferrepo argument defines whether to try to find a repository from the
3210 command line arguments. If True, arguments will be examined for potential
3216 command line arguments. If True, arguments will be examined for potential
3211 repository locations. See ``findrepo()``. If a repository is found, it
3217 repository locations. See ``findrepo()``. If a repository is found, it
3212 will be used.
3218 will be used.
3213 """
3219 """
3214 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3220 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3215 inferrepo=False):
3221 inferrepo=False):
3216 def decorator(func):
3222 def decorator(func):
3217 if synopsis:
3223 if synopsis:
3218 table[name] = func, list(options), synopsis
3224 table[name] = func, list(options), synopsis
3219 else:
3225 else:
3220 table[name] = func, list(options)
3226 table[name] = func, list(options)
3221
3227
3222 if norepo:
3228 if norepo:
3223 # Avoid import cycle.
3229 # Avoid import cycle.
3224 import commands
3230 import commands
3225 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3231 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3226
3232
3227 if optionalrepo:
3233 if optionalrepo:
3228 import commands
3234 import commands
3229 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3235 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3230
3236
3231 if inferrepo:
3237 if inferrepo:
3232 import commands
3238 import commands
3233 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3239 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3234
3240
3235 return func
3241 return func
3236 return decorator
3242 return decorator
3237
3243
3238 return cmd
3244 return cmd
3239
3245
3240 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3246 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3241 # commands.outgoing. "missing" is "missing" of the result of
3247 # commands.outgoing. "missing" is "missing" of the result of
3242 # "findcommonoutgoing()"
3248 # "findcommonoutgoing()"
3243 outgoinghooks = util.hooks()
3249 outgoinghooks = util.hooks()
3244
3250
3245 # a list of (ui, repo) functions called by commands.summary
3251 # a list of (ui, repo) functions called by commands.summary
3246 summaryhooks = util.hooks()
3252 summaryhooks = util.hooks()
3247
3253
3248 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3254 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3249 #
3255 #
3250 # functions should return tuple of booleans below, if 'changes' is None:
3256 # functions should return tuple of booleans below, if 'changes' is None:
3251 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3257 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3252 #
3258 #
3253 # otherwise, 'changes' is a tuple of tuples below:
3259 # otherwise, 'changes' is a tuple of tuples below:
3254 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3260 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3255 # - (desturl, destbranch, destpeer, outgoing)
3261 # - (desturl, destbranch, destpeer, outgoing)
3256 summaryremotehooks = util.hooks()
3262 summaryremotehooks = util.hooks()
3257
3263
3258 # A list of state files kept by multistep operations like graft.
3264 # A list of state files kept by multistep operations like graft.
3259 # Since graft cannot be aborted, it is considered 'clearable' by update.
3265 # Since graft cannot be aborted, it is considered 'clearable' by update.
3260 # note: bisect is intentionally excluded
3266 # note: bisect is intentionally excluded
3261 # (state file, clearable, allowcommit, error, hint)
3267 # (state file, clearable, allowcommit, error, hint)
3262 unfinishedstates = [
3268 unfinishedstates = [
3263 ('graftstate', True, False, _('graft in progress'),
3269 ('graftstate', True, False, _('graft in progress'),
3264 _("use 'hg graft --continue' or 'hg update' to abort")),
3270 _("use 'hg graft --continue' or 'hg update' to abort")),
3265 ('updatestate', True, False, _('last update was interrupted'),
3271 ('updatestate', True, False, _('last update was interrupted'),
3266 _("use 'hg update' to get a consistent checkout"))
3272 _("use 'hg update' to get a consistent checkout"))
3267 ]
3273 ]
3268
3274
3269 def checkunfinished(repo, commit=False):
3275 def checkunfinished(repo, commit=False):
3270 '''Look for an unfinished multistep operation, like graft, and abort
3276 '''Look for an unfinished multistep operation, like graft, and abort
3271 if found. It's probably good to check this right before
3277 if found. It's probably good to check this right before
3272 bailifchanged().
3278 bailifchanged().
3273 '''
3279 '''
3274 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3280 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3275 if commit and allowcommit:
3281 if commit and allowcommit:
3276 continue
3282 continue
3277 if repo.vfs.exists(f):
3283 if repo.vfs.exists(f):
3278 raise util.Abort(msg, hint=hint)
3284 raise util.Abort(msg, hint=hint)
3279
3285
3280 def clearunfinished(repo):
3286 def clearunfinished(repo):
3281 '''Check for unfinished operations (as above), and clear the ones
3287 '''Check for unfinished operations (as above), and clear the ones
3282 that are clearable.
3288 that are clearable.
3283 '''
3289 '''
3284 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3290 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3285 if not clearable and repo.vfs.exists(f):
3291 if not clearable and repo.vfs.exists(f):
3286 raise util.Abort(msg, hint=hint)
3292 raise util.Abort(msg, hint=hint)
3287 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3293 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3288 if clearable and repo.vfs.exists(f):
3294 if clearable and repo.vfs.exists(f):
3289 util.unlink(repo.join(f))
3295 util.unlink(repo.join(f))
3290
3296
3291 class dirstateguard(object):
3297 class dirstateguard(object):
3292 '''Restore dirstate at unexpected failure.
3298 '''Restore dirstate at unexpected failure.
3293
3299
3294 At the construction, this class does:
3300 At the construction, this class does:
3295
3301
3296 - write current ``repo.dirstate`` out, and
3302 - write current ``repo.dirstate`` out, and
3297 - save ``.hg/dirstate`` into the backup file
3303 - save ``.hg/dirstate`` into the backup file
3298
3304
3299 This restores ``.hg/dirstate`` from backup file, if ``release()``
3305 This restores ``.hg/dirstate`` from backup file, if ``release()``
3300 is invoked before ``close()``.
3306 is invoked before ``close()``.
3301
3307
3302 This just removes the backup file at ``close()`` before ``release()``.
3308 This just removes the backup file at ``close()`` before ``release()``.
3303 '''
3309 '''
3304
3310
3305 def __init__(self, repo, name):
3311 def __init__(self, repo, name):
3306 repo.dirstate.write()
3312 repo.dirstate.write()
3307 self._repo = repo
3313 self._repo = repo
3308 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3314 self._filename = 'dirstate.backup.%s.%d' % (name, id(self))
3309 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3315 repo.vfs.write(self._filename, repo.vfs.tryread('dirstate'))
3310 self._active = True
3316 self._active = True
3311 self._closed = False
3317 self._closed = False
3312
3318
3313 def __del__(self):
3319 def __del__(self):
3314 if self._active: # still active
3320 if self._active: # still active
3315 # this may occur, even if this class is used correctly:
3321 # this may occur, even if this class is used correctly:
3316 # for example, releasing other resources like transaction
3322 # for example, releasing other resources like transaction
3317 # may raise exception before ``dirstateguard.release`` in
3323 # may raise exception before ``dirstateguard.release`` in
3318 # ``release(tr, ....)``.
3324 # ``release(tr, ....)``.
3319 self._abort()
3325 self._abort()
3320
3326
3321 def close(self):
3327 def close(self):
3322 if not self._active: # already inactivated
3328 if not self._active: # already inactivated
3323 msg = (_("can't close already inactivated backup: %s")
3329 msg = (_("can't close already inactivated backup: %s")
3324 % self._filename)
3330 % self._filename)
3325 raise util.Abort(msg)
3331 raise util.Abort(msg)
3326
3332
3327 self._repo.vfs.unlink(self._filename)
3333 self._repo.vfs.unlink(self._filename)
3328 self._active = False
3334 self._active = False
3329 self._closed = True
3335 self._closed = True
3330
3336
3331 def _abort(self):
3337 def _abort(self):
3332 # this "invalidate()" prevents "wlock.release()" from writing
3338 # this "invalidate()" prevents "wlock.release()" from writing
3333 # changes of dirstate out after restoring to original status
3339 # changes of dirstate out after restoring to original status
3334 self._repo.dirstate.invalidate()
3340 self._repo.dirstate.invalidate()
3335
3341
3336 self._repo.vfs.rename(self._filename, 'dirstate')
3342 self._repo.vfs.rename(self._filename, 'dirstate')
3337 self._active = False
3343 self._active = False
3338
3344
3339 def release(self):
3345 def release(self):
3340 if not self._closed:
3346 if not self._closed:
3341 if not self._active: # already inactivated
3347 if not self._active: # already inactivated
3342 msg = (_("can't release already inactivated backup: %s")
3348 msg = (_("can't release already inactivated backup: %s")
3343 % self._filename)
3349 % self._filename)
3344 raise util.Abort(msg)
3350 raise util.Abort(msg)
3345 self._abort()
3351 self._abort()
@@ -1,1601 +1,1601 b''
1 # stuff related specifically to patch manipulation / parsing
1 # stuff related specifically to patch manipulation / parsing
2 #
2 #
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
3 # Copyright 2008 Mark Edgington <edgimar@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # This code is based on the Mark Edgington's crecord extension.
8 # This code is based on the Mark Edgington's crecord extension.
9 # (Itself based on Bryan O'Sullivan's record extension.)
9 # (Itself based on Bryan O'Sullivan's record extension.)
10
10
11 from i18n import _
11 from i18n import _
12 import patch as patchmod
12 import patch as patchmod
13 import util, encoding
13 import util, encoding
14
14
15 import os, re, sys, struct, signal, tempfile, locale, cStringIO
15 import os, re, sys, struct, signal, tempfile, locale, cStringIO
16
16
17 # This is required for ncurses to display non-ASCII characters in default user
17 # This is required for ncurses to display non-ASCII characters in default user
18 # locale encoding correctly. --immerrr
18 # locale encoding correctly. --immerrr
19 locale.setlocale(locale.LC_ALL, '')
19 locale.setlocale(locale.LC_ALL, '')
20
20
21 # os.name is one of: 'posix', 'nt', 'dos', 'os2', 'mac', or 'ce'
21 # os.name is one of: 'posix', 'nt', 'dos', 'os2', 'mac', or 'ce'
22 if os.name == 'posix':
22 if os.name == 'posix':
23 import curses
23 import curses
24 import fcntl, termios
24 import fcntl, termios
25 else:
25 else:
26 # I have no idea if wcurses works with crecord...
26 # I have no idea if wcurses works with crecord...
27 try:
27 try:
28 import wcurses as curses
28 import wcurses as curses
29 except ImportError:
29 except ImportError:
30 # wcurses is not shipped on Windows by default
30 # wcurses is not shipped on Windows by default
31 pass
31 pass
32
32
33 try:
33 try:
34 curses
34 curses
35 except NameError:
35 except NameError:
36 if os.name != 'nt': # Temporary hack to get running on Windows again
36 if os.name != 'nt': # Temporary hack to get running on Windows again
37 raise util.Abort(
37 raise util.Abort(
38 _('the python curses/wcurses module is not available/installed'))
38 _('the python curses/wcurses module is not available/installed'))
39
39
40 _origstdout = sys.__stdout__ # used by gethw()
40 _origstdout = sys.__stdout__ # used by gethw()
41
41
42 class patchnode(object):
42 class patchnode(object):
43 """abstract class for patch graph nodes
43 """abstract class for patch graph nodes
44 (i.e. patchroot, header, hunk, hunkline)
44 (i.e. patchroot, header, hunk, hunkline)
45 """
45 """
46
46
47 def firstchild(self):
47 def firstchild(self):
48 raise NotImplementedError("method must be implemented by subclass")
48 raise NotImplementedError("method must be implemented by subclass")
49
49
50 def lastchild(self):
50 def lastchild(self):
51 raise NotImplementedError("method must be implemented by subclass")
51 raise NotImplementedError("method must be implemented by subclass")
52
52
53 def allchildren(self):
53 def allchildren(self):
54 "Return a list of all of the direct children of this node"
54 "Return a list of all of the direct children of this node"
55 raise NotImplementedError("method must be implemented by subclass")
55 raise NotImplementedError("method must be implemented by subclass")
56 def nextsibling(self):
56 def nextsibling(self):
57 """
57 """
58 Return the closest next item of the same type where there are no items
58 Return the closest next item of the same type where there are no items
59 of different types between the current item and this closest item.
59 of different types between the current item and this closest item.
60 If no such item exists, return None.
60 If no such item exists, return None.
61
61
62 """
62 """
63 raise NotImplementedError("method must be implemented by subclass")
63 raise NotImplementedError("method must be implemented by subclass")
64
64
65 def prevsibling(self):
65 def prevsibling(self):
66 """
66 """
67 Return the closest previous item of the same type where there are no
67 Return the closest previous item of the same type where there are no
68 items of different types between the current item and this closest item.
68 items of different types between the current item and this closest item.
69 If no such item exists, return None.
69 If no such item exists, return None.
70
70
71 """
71 """
72 raise NotImplementedError("method must be implemented by subclass")
72 raise NotImplementedError("method must be implemented by subclass")
73
73
74 def parentitem(self):
74 def parentitem(self):
75 raise NotImplementedError("method must be implemented by subclass")
75 raise NotImplementedError("method must be implemented by subclass")
76
76
77
77
78 def nextitem(self, constrainlevel=True, skipfolded=True):
78 def nextitem(self, constrainlevel=True, skipfolded=True):
79 """
79 """
80 If constrainLevel == True, return the closest next item
80 If constrainLevel == True, return the closest next item
81 of the same type where there are no items of different types between
81 of the same type where there are no items of different types between
82 the current item and this closest item.
82 the current item and this closest item.
83
83
84 If constrainLevel == False, then try to return the next item
84 If constrainLevel == False, then try to return the next item
85 closest to this item, regardless of item's type (header, hunk, or
85 closest to this item, regardless of item's type (header, hunk, or
86 HunkLine).
86 HunkLine).
87
87
88 If skipFolded == True, and the current item is folded, then the child
88 If skipFolded == True, and the current item is folded, then the child
89 items that are hidden due to folding will be skipped when determining
89 items that are hidden due to folding will be skipped when determining
90 the next item.
90 the next item.
91
91
92 If it is not possible to get the next item, return None.
92 If it is not possible to get the next item, return None.
93
93
94 """
94 """
95 try:
95 try:
96 itemfolded = self.folded
96 itemfolded = self.folded
97 except AttributeError:
97 except AttributeError:
98 itemfolded = False
98 itemfolded = False
99 if constrainlevel:
99 if constrainlevel:
100 return self.nextsibling()
100 return self.nextsibling()
101 elif skipfolded and itemfolded:
101 elif skipfolded and itemfolded:
102 nextitem = self.nextsibling()
102 nextitem = self.nextsibling()
103 if nextitem is None:
103 if nextitem is None:
104 try:
104 try:
105 nextitem = self.parentitem().nextsibling()
105 nextitem = self.parentitem().nextsibling()
106 except AttributeError:
106 except AttributeError:
107 nextitem = None
107 nextitem = None
108 return nextitem
108 return nextitem
109 else:
109 else:
110 # try child
110 # try child
111 item = self.firstchild()
111 item = self.firstchild()
112 if item is not None:
112 if item is not None:
113 return item
113 return item
114
114
115 # else try next sibling
115 # else try next sibling
116 item = self.nextsibling()
116 item = self.nextsibling()
117 if item is not None:
117 if item is not None:
118 return item
118 return item
119
119
120 try:
120 try:
121 # else try parent's next sibling
121 # else try parent's next sibling
122 item = self.parentitem().nextsibling()
122 item = self.parentitem().nextsibling()
123 if item is not None:
123 if item is not None:
124 return item
124 return item
125
125
126 # else return grandparent's next sibling (or None)
126 # else return grandparent's next sibling (or None)
127 return self.parentitem().parentitem().nextsibling()
127 return self.parentitem().parentitem().nextsibling()
128
128
129 except AttributeError: # parent and/or grandparent was None
129 except AttributeError: # parent and/or grandparent was None
130 return None
130 return None
131
131
132 def previtem(self, constrainlevel=True, skipfolded=True):
132 def previtem(self, constrainlevel=True, skipfolded=True):
133 """
133 """
134 If constrainLevel == True, return the closest previous item
134 If constrainLevel == True, return the closest previous item
135 of the same type where there are no items of different types between
135 of the same type where there are no items of different types between
136 the current item and this closest item.
136 the current item and this closest item.
137
137
138 If constrainLevel == False, then try to return the previous item
138 If constrainLevel == False, then try to return the previous item
139 closest to this item, regardless of item's type (header, hunk, or
139 closest to this item, regardless of item's type (header, hunk, or
140 HunkLine).
140 HunkLine).
141
141
142 If skipFolded == True, and the current item is folded, then the items
142 If skipFolded == True, and the current item is folded, then the items
143 that are hidden due to folding will be skipped when determining the
143 that are hidden due to folding will be skipped when determining the
144 next item.
144 next item.
145
145
146 If it is not possible to get the previous item, return None.
146 If it is not possible to get the previous item, return None.
147
147
148 """
148 """
149 if constrainlevel:
149 if constrainlevel:
150 return self.prevsibling()
150 return self.prevsibling()
151 else:
151 else:
152 # try previous sibling's last child's last child,
152 # try previous sibling's last child's last child,
153 # else try previous sibling's last child, else try previous sibling
153 # else try previous sibling's last child, else try previous sibling
154 prevsibling = self.prevsibling()
154 prevsibling = self.prevsibling()
155 if prevsibling is not None:
155 if prevsibling is not None:
156 prevsiblinglastchild = prevsibling.lastchild()
156 prevsiblinglastchild = prevsibling.lastchild()
157 if ((prevsiblinglastchild is not None) and
157 if ((prevsiblinglastchild is not None) and
158 not prevsibling.folded):
158 not prevsibling.folded):
159 prevsiblinglclc = prevsiblinglastchild.lastchild()
159 prevsiblinglclc = prevsiblinglastchild.lastchild()
160 if ((prevsiblinglclc is not None) and
160 if ((prevsiblinglclc is not None) and
161 not prevsiblinglastchild.folded):
161 not prevsiblinglastchild.folded):
162 return prevsiblinglclc
162 return prevsiblinglclc
163 else:
163 else:
164 return prevsiblinglastchild
164 return prevsiblinglastchild
165 else:
165 else:
166 return prevsibling
166 return prevsibling
167
167
168 # try parent (or None)
168 # try parent (or None)
169 return self.parentitem()
169 return self.parentitem()
170
170
171 class patch(patchnode, list): # todo: rename patchroot
171 class patch(patchnode, list): # todo: rename patchroot
172 """
172 """
173 list of header objects representing the patch.
173 list of header objects representing the patch.
174
174
175 """
175 """
176 def __init__(self, headerlist):
176 def __init__(self, headerlist):
177 self.extend(headerlist)
177 self.extend(headerlist)
178 # add parent patch object reference to each header
178 # add parent patch object reference to each header
179 for header in self:
179 for header in self:
180 header.patch = self
180 header.patch = self
181
181
182 class uiheader(patchnode):
182 class uiheader(patchnode):
183 """patch header
183 """patch header
184
184
185 xxx shoudn't we move this to mercurial/patch.py ?
185 xxx shoudn't we move this to mercurial/patch.py ?
186 """
186 """
187
187
188 def __init__(self, header):
188 def __init__(self, header):
189 self.nonuiheader = header
189 self.nonuiheader = header
190 # flag to indicate whether to apply this chunk
190 # flag to indicate whether to apply this chunk
191 self.applied = True
191 self.applied = True
192 # flag which only affects the status display indicating if a node's
192 # flag which only affects the status display indicating if a node's
193 # children are partially applied (i.e. some applied, some not).
193 # children are partially applied (i.e. some applied, some not).
194 self.partial = False
194 self.partial = False
195
195
196 # flag to indicate whether to display as folded/unfolded to user
196 # flag to indicate whether to display as folded/unfolded to user
197 self.folded = True
197 self.folded = True
198
198
199 # list of all headers in patch
199 # list of all headers in patch
200 self.patch = None
200 self.patch = None
201
201
202 # flag is False if this header was ever unfolded from initial state
202 # flag is False if this header was ever unfolded from initial state
203 self.neverunfolded = True
203 self.neverunfolded = True
204 self.hunks = [uihunk(h, self) for h in self.hunks]
204 self.hunks = [uihunk(h, self) for h in self.hunks]
205
205
206
206
207 def prettystr(self):
207 def prettystr(self):
208 x = cStringIO.StringIO()
208 x = cStringIO.StringIO()
209 self.pretty(x)
209 self.pretty(x)
210 return x.getvalue()
210 return x.getvalue()
211
211
212 def nextsibling(self):
212 def nextsibling(self):
213 numheadersinpatch = len(self.patch)
213 numheadersinpatch = len(self.patch)
214 indexofthisheader = self.patch.index(self)
214 indexofthisheader = self.patch.index(self)
215
215
216 if indexofthisheader < numheadersinpatch - 1:
216 if indexofthisheader < numheadersinpatch - 1:
217 nextheader = self.patch[indexofthisheader + 1]
217 nextheader = self.patch[indexofthisheader + 1]
218 return nextheader
218 return nextheader
219 else:
219 else:
220 return None
220 return None
221
221
222 def prevsibling(self):
222 def prevsibling(self):
223 indexofthisheader = self.patch.index(self)
223 indexofthisheader = self.patch.index(self)
224 if indexofthisheader > 0:
224 if indexofthisheader > 0:
225 previousheader = self.patch[indexofthisheader - 1]
225 previousheader = self.patch[indexofthisheader - 1]
226 return previousheader
226 return previousheader
227 else:
227 else:
228 return None
228 return None
229
229
230 def parentitem(self):
230 def parentitem(self):
231 """
231 """
232 there is no 'real' parent item of a header that can be selected,
232 there is no 'real' parent item of a header that can be selected,
233 so return None.
233 so return None.
234 """
234 """
235 return None
235 return None
236
236
237 def firstchild(self):
237 def firstchild(self):
238 "return the first child of this item, if one exists. otherwise None."
238 "return the first child of this item, if one exists. otherwise None."
239 if len(self.hunks) > 0:
239 if len(self.hunks) > 0:
240 return self.hunks[0]
240 return self.hunks[0]
241 else:
241 else:
242 return None
242 return None
243
243
244 def lastchild(self):
244 def lastchild(self):
245 "return the last child of this item, if one exists. otherwise None."
245 "return the last child of this item, if one exists. otherwise None."
246 if len(self.hunks) > 0:
246 if len(self.hunks) > 0:
247 return self.hunks[-1]
247 return self.hunks[-1]
248 else:
248 else:
249 return None
249 return None
250
250
251 def allchildren(self):
251 def allchildren(self):
252 "return a list of all of the direct children of this node"
252 "return a list of all of the direct children of this node"
253 return self.hunks
253 return self.hunks
254
254
255 def __getattr__(self, name):
255 def __getattr__(self, name):
256 return getattr(self.nonuiheader, name)
256 return getattr(self.nonuiheader, name)
257
257
258 class uihunkline(patchnode):
258 class uihunkline(patchnode):
259 "represents a changed line in a hunk"
259 "represents a changed line in a hunk"
260 def __init__(self, linetext, hunk):
260 def __init__(self, linetext, hunk):
261 self.linetext = linetext
261 self.linetext = linetext
262 self.applied = True
262 self.applied = True
263 # the parent hunk to which this line belongs
263 # the parent hunk to which this line belongs
264 self.hunk = hunk
264 self.hunk = hunk
265 # folding lines currently is not used/needed, but this flag is needed
265 # folding lines currently is not used/needed, but this flag is needed
266 # in the previtem method.
266 # in the previtem method.
267 self.folded = False
267 self.folded = False
268
268
269 def prettystr(self):
269 def prettystr(self):
270 return self.linetext
270 return self.linetext
271
271
272 def nextsibling(self):
272 def nextsibling(self):
273 numlinesinhunk = len(self.hunk.changedlines)
273 numlinesinhunk = len(self.hunk.changedlines)
274 indexofthisline = self.hunk.changedlines.index(self)
274 indexofthisline = self.hunk.changedlines.index(self)
275
275
276 if (indexofthisline < numlinesinhunk - 1):
276 if (indexofthisline < numlinesinhunk - 1):
277 nextline = self.hunk.changedlines[indexofthisline + 1]
277 nextline = self.hunk.changedlines[indexofthisline + 1]
278 return nextline
278 return nextline
279 else:
279 else:
280 return None
280 return None
281
281
282 def prevsibling(self):
282 def prevsibling(self):
283 indexofthisline = self.hunk.changedlines.index(self)
283 indexofthisline = self.hunk.changedlines.index(self)
284 if indexofthisline > 0:
284 if indexofthisline > 0:
285 previousline = self.hunk.changedlines[indexofthisline - 1]
285 previousline = self.hunk.changedlines[indexofthisline - 1]
286 return previousline
286 return previousline
287 else:
287 else:
288 return None
288 return None
289
289
290 def parentitem(self):
290 def parentitem(self):
291 "return the parent to the current item"
291 "return the parent to the current item"
292 return self.hunk
292 return self.hunk
293
293
294 def firstchild(self):
294 def firstchild(self):
295 "return the first child of this item, if one exists. otherwise None."
295 "return the first child of this item, if one exists. otherwise None."
296 # hunk-lines don't have children
296 # hunk-lines don't have children
297 return None
297 return None
298
298
299 def lastchild(self):
299 def lastchild(self):
300 "return the last child of this item, if one exists. otherwise None."
300 "return the last child of this item, if one exists. otherwise None."
301 # hunk-lines don't have children
301 # hunk-lines don't have children
302 return None
302 return None
303
303
304 class uihunk(patchnode):
304 class uihunk(patchnode):
305 """ui patch hunk, wraps a hunk and keep track of ui behavior """
305 """ui patch hunk, wraps a hunk and keep track of ui behavior """
306 maxcontext = 3
306 maxcontext = 3
307
307
308 def __init__(self, hunk, header):
308 def __init__(self, hunk, header):
309 self._hunk = hunk
309 self._hunk = hunk
310 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
310 self.changedlines = [uihunkline(line, self) for line in hunk.hunk]
311 self.header = header
311 self.header = header
312 # used at end for detecting how many removed lines were un-applied
312 # used at end for detecting how many removed lines were un-applied
313 self.originalremoved = self.removed
313 self.originalremoved = self.removed
314
314
315 # flag to indicate whether to display as folded/unfolded to user
315 # flag to indicate whether to display as folded/unfolded to user
316 self.folded = True
316 self.folded = True
317 # flag to indicate whether to apply this chunk
317 # flag to indicate whether to apply this chunk
318 self.applied = True
318 self.applied = True
319 # flag which only affects the status display indicating if a node's
319 # flag which only affects the status display indicating if a node's
320 # children are partially applied (i.e. some applied, some not).
320 # children are partially applied (i.e. some applied, some not).
321 self.partial = False
321 self.partial = False
322
322
323 def nextsibling(self):
323 def nextsibling(self):
324 numhunksinheader = len(self.header.hunks)
324 numhunksinheader = len(self.header.hunks)
325 indexofthishunk = self.header.hunks.index(self)
325 indexofthishunk = self.header.hunks.index(self)
326
326
327 if (indexofthishunk < numhunksinheader - 1):
327 if (indexofthishunk < numhunksinheader - 1):
328 nexthunk = self.header.hunks[indexofthishunk + 1]
328 nexthunk = self.header.hunks[indexofthishunk + 1]
329 return nexthunk
329 return nexthunk
330 else:
330 else:
331 return None
331 return None
332
332
333 def prevsibling(self):
333 def prevsibling(self):
334 indexofthishunk = self.header.hunks.index(self)
334 indexofthishunk = self.header.hunks.index(self)
335 if indexofthishunk > 0:
335 if indexofthishunk > 0:
336 previoushunk = self.header.hunks[indexofthishunk - 1]
336 previoushunk = self.header.hunks[indexofthishunk - 1]
337 return previoushunk
337 return previoushunk
338 else:
338 else:
339 return None
339 return None
340
340
341 def parentitem(self):
341 def parentitem(self):
342 "return the parent to the current item"
342 "return the parent to the current item"
343 return self.header
343 return self.header
344
344
345 def firstchild(self):
345 def firstchild(self):
346 "return the first child of this item, if one exists. otherwise None."
346 "return the first child of this item, if one exists. otherwise None."
347 if len(self.changedlines) > 0:
347 if len(self.changedlines) > 0:
348 return self.changedlines[0]
348 return self.changedlines[0]
349 else:
349 else:
350 return None
350 return None
351
351
352 def lastchild(self):
352 def lastchild(self):
353 "return the last child of this item, if one exists. otherwise None."
353 "return the last child of this item, if one exists. otherwise None."
354 if len(self.changedlines) > 0:
354 if len(self.changedlines) > 0:
355 return self.changedlines[-1]
355 return self.changedlines[-1]
356 else:
356 else:
357 return None
357 return None
358
358
359 def allchildren(self):
359 def allchildren(self):
360 "return a list of all of the direct children of this node"
360 "return a list of all of the direct children of this node"
361 return self.changedlines
361 return self.changedlines
362 def countchanges(self):
362 def countchanges(self):
363 """changedlines -> (n+,n-)"""
363 """changedlines -> (n+,n-)"""
364 add = len([l for l in self.changedlines if l.applied
364 add = len([l for l in self.changedlines if l.applied
365 and l.prettystr()[0] == '+'])
365 and l.prettystr()[0] == '+'])
366 rem = len([l for l in self.changedlines if l.applied
366 rem = len([l for l in self.changedlines if l.applied
367 and l.prettystr()[0] == '-'])
367 and l.prettystr()[0] == '-'])
368 return add, rem
368 return add, rem
369
369
370 def getfromtoline(self):
370 def getfromtoline(self):
371 # calculate the number of removed lines converted to context lines
371 # calculate the number of removed lines converted to context lines
372 removedconvertedtocontext = self.originalremoved - self.removed
372 removedconvertedtocontext = self.originalremoved - self.removed
373
373
374 contextlen = (len(self.before) + len(self.after) +
374 contextlen = (len(self.before) + len(self.after) +
375 removedconvertedtocontext)
375 removedconvertedtocontext)
376 if self.after and self.after[-1] == '\\ no newline at end of file\n':
376 if self.after and self.after[-1] == '\\ no newline at end of file\n':
377 contextlen -= 1
377 contextlen -= 1
378 fromlen = contextlen + self.removed
378 fromlen = contextlen + self.removed
379 tolen = contextlen + self.added
379 tolen = contextlen + self.added
380
380
381 # diffutils manual, section "2.2.2.2 detailed description of unified
381 # diffutils manual, section "2.2.2.2 detailed description of unified
382 # format": "an empty hunk is considered to end at the line that
382 # format": "an empty hunk is considered to end at the line that
383 # precedes the hunk."
383 # precedes the hunk."
384 #
384 #
385 # so, if either of hunks is empty, decrease its line start. --immerrr
385 # so, if either of hunks is empty, decrease its line start. --immerrr
386 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
386 # but only do this if fromline > 0, to avoid having, e.g fromline=-1.
387 fromline, toline = self.fromline, self.toline
387 fromline, toline = self.fromline, self.toline
388 if fromline != 0:
388 if fromline != 0:
389 if fromlen == 0:
389 if fromlen == 0:
390 fromline -= 1
390 fromline -= 1
391 if tolen == 0:
391 if tolen == 0:
392 toline -= 1
392 toline -= 1
393
393
394 fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
394 fromtoline = '@@ -%d,%d +%d,%d @@%s\n' % (
395 fromline, fromlen, toline, tolen,
395 fromline, fromlen, toline, tolen,
396 self.proc and (' ' + self.proc))
396 self.proc and (' ' + self.proc))
397 return fromtoline
397 return fromtoline
398
398
399 def write(self, fp):
399 def write(self, fp):
400 # updated self.added/removed, which are used by getfromtoline()
400 # updated self.added/removed, which are used by getfromtoline()
401 self.added, self.removed = self.countchanges()
401 self.added, self.removed = self.countchanges()
402 fp.write(self.getfromtoline())
402 fp.write(self.getfromtoline())
403
403
404 hunklinelist = []
404 hunklinelist = []
405 # add the following to the list: (1) all applied lines, and
405 # add the following to the list: (1) all applied lines, and
406 # (2) all unapplied removal lines (convert these to context lines)
406 # (2) all unapplied removal lines (convert these to context lines)
407 for changedline in self.changedlines:
407 for changedline in self.changedlines:
408 changedlinestr = changedline.prettystr()
408 changedlinestr = changedline.prettystr()
409 if changedline.applied:
409 if changedline.applied:
410 hunklinelist.append(changedlinestr)
410 hunklinelist.append(changedlinestr)
411 elif changedlinestr[0] == "-":
411 elif changedlinestr[0] == "-":
412 hunklinelist.append(" " + changedlinestr[1:])
412 hunklinelist.append(" " + changedlinestr[1:])
413
413
414 fp.write(''.join(self.before + hunklinelist + self.after))
414 fp.write(''.join(self.before + hunklinelist + self.after))
415
415
416 pretty = write
416 pretty = write
417
417
418 def prettystr(self):
418 def prettystr(self):
419 x = cStringIO.StringIO()
419 x = cStringIO.StringIO()
420 self.pretty(x)
420 self.pretty(x)
421 return x.getvalue()
421 return x.getvalue()
422
422
423 def __getattr__(self, name):
423 def __getattr__(self, name):
424 return getattr(self._hunk, name)
424 return getattr(self._hunk, name)
425 def __repr__(self):
425 def __repr__(self):
426 return '<hunk %r@%d>' % (self.filename(), self.fromline)
426 return '<hunk %r@%d>' % (self.filename(), self.fromline)
427
427
428 def filterpatch(ui, chunks, chunkselector):
428 def filterpatch(ui, chunks, chunkselector, operation=None):
429 """interactively filter patch chunks into applied-only chunks"""
429 """interactively filter patch chunks into applied-only chunks"""
430
430
431 chunks = list(chunks)
431 chunks = list(chunks)
432 # convert chunks list into structure suitable for displaying/modifying
432 # convert chunks list into structure suitable for displaying/modifying
433 # with curses. create a list of headers only.
433 # with curses. create a list of headers only.
434 headers = [c for c in chunks if isinstance(c, patchmod.header)]
434 headers = [c for c in chunks if isinstance(c, patchmod.header)]
435
435
436 # if there are no changed files
436 # if there are no changed files
437 if len(headers) == 0:
437 if len(headers) == 0:
438 return []
438 return []
439 uiheaders = [uiheader(h) for h in headers]
439 uiheaders = [uiheader(h) for h in headers]
440 # let user choose headers/hunks/lines, and mark their applied flags
440 # let user choose headers/hunks/lines, and mark their applied flags
441 # accordingly
441 # accordingly
442 chunkselector(ui, uiheaders)
442 chunkselector(ui, uiheaders)
443 appliedhunklist = []
443 appliedhunklist = []
444 for hdr in uiheaders:
444 for hdr in uiheaders:
445 if (hdr.applied and
445 if (hdr.applied and
446 (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
446 (hdr.special() or len([h for h in hdr.hunks if h.applied]) > 0)):
447 appliedhunklist.append(hdr)
447 appliedhunklist.append(hdr)
448 fixoffset = 0
448 fixoffset = 0
449 for hnk in hdr.hunks:
449 for hnk in hdr.hunks:
450 if hnk.applied:
450 if hnk.applied:
451 appliedhunklist.append(hnk)
451 appliedhunklist.append(hnk)
452 # adjust the 'to'-line offset of the hunk to be correct
452 # adjust the 'to'-line offset of the hunk to be correct
453 # after de-activating some of the other hunks for this file
453 # after de-activating some of the other hunks for this file
454 if fixoffset:
454 if fixoffset:
455 #hnk = copy.copy(hnk) # necessary??
455 #hnk = copy.copy(hnk) # necessary??
456 hnk.toline += fixoffset
456 hnk.toline += fixoffset
457 else:
457 else:
458 fixoffset += hnk.removed - hnk.added
458 fixoffset += hnk.removed - hnk.added
459
459
460 return appliedhunklist
460 return appliedhunklist
461
461
462 def gethw():
462 def gethw():
463 """
463 """
464 magically get the current height and width of the window (without initscr)
464 magically get the current height and width of the window (without initscr)
465
465
466 this is a rip-off of a rip-off - taken from the bpython code. it is
466 this is a rip-off of a rip-off - taken from the bpython code. it is
467 useful / necessary because otherwise curses.initscr() must be called,
467 useful / necessary because otherwise curses.initscr() must be called,
468 which can leave the terminal in a nasty state after exiting.
468 which can leave the terminal in a nasty state after exiting.
469
469
470 """
470 """
471 h, w = struct.unpack(
471 h, w = struct.unpack(
472 "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
472 "hhhh", fcntl.ioctl(_origstdout, termios.TIOCGWINSZ, "\000"*8))[0:2]
473 return h, w
473 return h, w
474
474
475 def chunkselector(ui, headerlist):
475 def chunkselector(ui, headerlist):
476 """
476 """
477 curses interface to get selection of chunks, and mark the applied flags
477 curses interface to get selection of chunks, and mark the applied flags
478 of the chosen chunks.
478 of the chosen chunks.
479
479
480 """
480 """
481 ui.write(_('starting interactive selection\n'))
481 ui.write(_('starting interactive selection\n'))
482 chunkselector = curseschunkselector(headerlist, ui)
482 chunkselector = curseschunkselector(headerlist, ui)
483 curses.wrapper(chunkselector.main)
483 curses.wrapper(chunkselector.main)
484
484
485 def testdecorator(testfn, f):
485 def testdecorator(testfn, f):
486 def u(*args, **kwargs):
486 def u(*args, **kwargs):
487 return f(testfn, *args, **kwargs)
487 return f(testfn, *args, **kwargs)
488 return u
488 return u
489
489
490 def testchunkselector(testfn, ui, headerlist):
490 def testchunkselector(testfn, ui, headerlist):
491 """
491 """
492 test interface to get selection of chunks, and mark the applied flags
492 test interface to get selection of chunks, and mark the applied flags
493 of the chosen chunks.
493 of the chosen chunks.
494
494
495 """
495 """
496 chunkselector = curseschunkselector(headerlist, ui)
496 chunkselector = curseschunkselector(headerlist, ui)
497 if testfn and os.path.exists(testfn):
497 if testfn and os.path.exists(testfn):
498 testf = open(testfn)
498 testf = open(testfn)
499 testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
499 testcommands = map(lambda x: x.rstrip('\n'), testf.readlines())
500 testf.close()
500 testf.close()
501 while True:
501 while True:
502 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
502 if chunkselector.handlekeypressed(testcommands.pop(0), test=True):
503 break
503 break
504
504
505 class curseschunkselector(object):
505 class curseschunkselector(object):
506 def __init__(self, headerlist, ui):
506 def __init__(self, headerlist, ui):
507 # put the headers into a patch object
507 # put the headers into a patch object
508 self.headerlist = patch(headerlist)
508 self.headerlist = patch(headerlist)
509
509
510 self.ui = ui
510 self.ui = ui
511
511
512 # list of all chunks
512 # list of all chunks
513 self.chunklist = []
513 self.chunklist = []
514 for h in headerlist:
514 for h in headerlist:
515 self.chunklist.append(h)
515 self.chunklist.append(h)
516 self.chunklist.extend(h.hunks)
516 self.chunklist.extend(h.hunks)
517
517
518 # dictionary mapping (fgcolor, bgcolor) pairs to the
518 # dictionary mapping (fgcolor, bgcolor) pairs to the
519 # corresponding curses color-pair value.
519 # corresponding curses color-pair value.
520 self.colorpairs = {}
520 self.colorpairs = {}
521 # maps custom nicknames of color-pairs to curses color-pair values
521 # maps custom nicknames of color-pairs to curses color-pair values
522 self.colorpairnames = {}
522 self.colorpairnames = {}
523
523
524 # the currently selected header, hunk, or hunk-line
524 # the currently selected header, hunk, or hunk-line
525 self.currentselecteditem = self.headerlist[0]
525 self.currentselecteditem = self.headerlist[0]
526
526
527 # updated when printing out patch-display -- the 'lines' here are the
527 # updated when printing out patch-display -- the 'lines' here are the
528 # line positions *in the pad*, not on the screen.
528 # line positions *in the pad*, not on the screen.
529 self.selecteditemstartline = 0
529 self.selecteditemstartline = 0
530 self.selecteditemendline = None
530 self.selecteditemendline = None
531
531
532 # define indentation levels
532 # define indentation levels
533 self.headerindentnumchars = 0
533 self.headerindentnumchars = 0
534 self.hunkindentnumchars = 3
534 self.hunkindentnumchars = 3
535 self.hunklineindentnumchars = 6
535 self.hunklineindentnumchars = 6
536
536
537 # the first line of the pad to print to the screen
537 # the first line of the pad to print to the screen
538 self.firstlineofpadtoprint = 0
538 self.firstlineofpadtoprint = 0
539
539
540 # keeps track of the number of lines in the pad
540 # keeps track of the number of lines in the pad
541 self.numpadlines = None
541 self.numpadlines = None
542
542
543 self.numstatuslines = 2
543 self.numstatuslines = 2
544
544
545 # keep a running count of the number of lines printed to the pad
545 # keep a running count of the number of lines printed to the pad
546 # (used for determining when the selected item begins/ends)
546 # (used for determining when the selected item begins/ends)
547 self.linesprintedtopadsofar = 0
547 self.linesprintedtopadsofar = 0
548
548
549 # the first line of the pad which is visible on the screen
549 # the first line of the pad which is visible on the screen
550 self.firstlineofpadtoprint = 0
550 self.firstlineofpadtoprint = 0
551
551
552 # stores optional text for a commit comment provided by the user
552 # stores optional text for a commit comment provided by the user
553 self.commenttext = ""
553 self.commenttext = ""
554
554
555 # if the last 'toggle all' command caused all changes to be applied
555 # if the last 'toggle all' command caused all changes to be applied
556 self.waslasttoggleallapplied = True
556 self.waslasttoggleallapplied = True
557
557
558 def uparrowevent(self):
558 def uparrowevent(self):
559 """
559 """
560 try to select the previous item to the current item that has the
560 try to select the previous item to the current item that has the
561 most-indented level. for example, if a hunk is selected, try to select
561 most-indented level. for example, if a hunk is selected, try to select
562 the last hunkline of the hunk prior to the selected hunk. or, if
562 the last hunkline of the hunk prior to the selected hunk. or, if
563 the first hunkline of a hunk is currently selected, then select the
563 the first hunkline of a hunk is currently selected, then select the
564 hunk itself.
564 hunk itself.
565
565
566 if the currently selected item is already at the top of the screen,
566 if the currently selected item is already at the top of the screen,
567 scroll the screen down to show the new-selected item.
567 scroll the screen down to show the new-selected item.
568
568
569 """
569 """
570 currentitem = self.currentselecteditem
570 currentitem = self.currentselecteditem
571
571
572 nextitem = currentitem.previtem(constrainlevel=False)
572 nextitem = currentitem.previtem(constrainlevel=False)
573
573
574 if nextitem is None:
574 if nextitem is None:
575 # if no parent item (i.e. currentitem is the first header), then
575 # if no parent item (i.e. currentitem is the first header), then
576 # no change...
576 # no change...
577 nextitem = currentitem
577 nextitem = currentitem
578
578
579 self.currentselecteditem = nextitem
579 self.currentselecteditem = nextitem
580
580
581 def uparrowshiftevent(self):
581 def uparrowshiftevent(self):
582 """
582 """
583 select (if possible) the previous item on the same level as the
583 select (if possible) the previous item on the same level as the
584 currently selected item. otherwise, select (if possible) the
584 currently selected item. otherwise, select (if possible) the
585 parent-item of the currently selected item.
585 parent-item of the currently selected item.
586
586
587 if the currently selected item is already at the top of the screen,
587 if the currently selected item is already at the top of the screen,
588 scroll the screen down to show the new-selected item.
588 scroll the screen down to show the new-selected item.
589
589
590 """
590 """
591 currentitem = self.currentselecteditem
591 currentitem = self.currentselecteditem
592 nextitem = currentitem.previtem()
592 nextitem = currentitem.previtem()
593 # if there's no previous item on this level, try choosing the parent
593 # if there's no previous item on this level, try choosing the parent
594 if nextitem is None:
594 if nextitem is None:
595 nextitem = currentitem.parentitem()
595 nextitem = currentitem.parentitem()
596 if nextitem is None:
596 if nextitem is None:
597 # if no parent item (i.e. currentitem is the first header), then
597 # if no parent item (i.e. currentitem is the first header), then
598 # no change...
598 # no change...
599 nextitem = currentitem
599 nextitem = currentitem
600
600
601 self.currentselecteditem = nextitem
601 self.currentselecteditem = nextitem
602
602
603 def downarrowevent(self):
603 def downarrowevent(self):
604 """
604 """
605 try to select the next item to the current item that has the
605 try to select the next item to the current item that has the
606 most-indented level. for example, if a hunk is selected, select
606 most-indented level. for example, if a hunk is selected, select
607 the first hunkline of the selected hunk. or, if the last hunkline of
607 the first hunkline of the selected hunk. or, if the last hunkline of
608 a hunk is currently selected, then select the next hunk, if one exists,
608 a hunk is currently selected, then select the next hunk, if one exists,
609 or if not, the next header if one exists.
609 or if not, the next header if one exists.
610
610
611 if the currently selected item is already at the bottom of the screen,
611 if the currently selected item is already at the bottom of the screen,
612 scroll the screen up to show the new-selected item.
612 scroll the screen up to show the new-selected item.
613
613
614 """
614 """
615 #self.startprintline += 1 #debug
615 #self.startprintline += 1 #debug
616 currentitem = self.currentselecteditem
616 currentitem = self.currentselecteditem
617
617
618 nextitem = currentitem.nextitem(constrainlevel=False)
618 nextitem = currentitem.nextitem(constrainlevel=False)
619 # if there's no next item, keep the selection as-is
619 # if there's no next item, keep the selection as-is
620 if nextitem is None:
620 if nextitem is None:
621 nextitem = currentitem
621 nextitem = currentitem
622
622
623 self.currentselecteditem = nextitem
623 self.currentselecteditem = nextitem
624
624
625 def downarrowshiftevent(self):
625 def downarrowshiftevent(self):
626 """
626 """
627 if the cursor is already at the bottom chunk, scroll the screen up and
627 if the cursor is already at the bottom chunk, scroll the screen up and
628 move the cursor-position to the subsequent chunk. otherwise, only move
628 move the cursor-position to the subsequent chunk. otherwise, only move
629 the cursor position down one chunk.
629 the cursor position down one chunk.
630
630
631 """
631 """
632 # todo: update docstring
632 # todo: update docstring
633
633
634 currentitem = self.currentselecteditem
634 currentitem = self.currentselecteditem
635 nextitem = currentitem.nextitem()
635 nextitem = currentitem.nextitem()
636 # if there's no previous item on this level, try choosing the parent's
636 # if there's no previous item on this level, try choosing the parent's
637 # nextitem.
637 # nextitem.
638 if nextitem is None:
638 if nextitem is None:
639 try:
639 try:
640 nextitem = currentitem.parentitem().nextitem()
640 nextitem = currentitem.parentitem().nextitem()
641 except AttributeError:
641 except AttributeError:
642 # parentitem returned None, so nextitem() can't be called
642 # parentitem returned None, so nextitem() can't be called
643 nextitem = None
643 nextitem = None
644 if nextitem is None:
644 if nextitem is None:
645 # if no next item on parent-level, then no change...
645 # if no next item on parent-level, then no change...
646 nextitem = currentitem
646 nextitem = currentitem
647
647
648 self.currentselecteditem = nextitem
648 self.currentselecteditem = nextitem
649
649
650 def rightarrowevent(self):
650 def rightarrowevent(self):
651 """
651 """
652 select (if possible) the first of this item's child-items.
652 select (if possible) the first of this item's child-items.
653
653
654 """
654 """
655 currentitem = self.currentselecteditem
655 currentitem = self.currentselecteditem
656 nextitem = currentitem.firstchild()
656 nextitem = currentitem.firstchild()
657
657
658 # turn off folding if we want to show a child-item
658 # turn off folding if we want to show a child-item
659 if currentitem.folded:
659 if currentitem.folded:
660 self.togglefolded(currentitem)
660 self.togglefolded(currentitem)
661
661
662 if nextitem is None:
662 if nextitem is None:
663 # if no next item on parent-level, then no change...
663 # if no next item on parent-level, then no change...
664 nextitem = currentitem
664 nextitem = currentitem
665
665
666 self.currentselecteditem = nextitem
666 self.currentselecteditem = nextitem
667
667
668 def leftarrowevent(self):
668 def leftarrowevent(self):
669 """
669 """
670 if the current item can be folded (i.e. it is an unfolded header or
670 if the current item can be folded (i.e. it is an unfolded header or
671 hunk), then fold it. otherwise try select (if possible) the parent
671 hunk), then fold it. otherwise try select (if possible) the parent
672 of this item.
672 of this item.
673
673
674 """
674 """
675 currentitem = self.currentselecteditem
675 currentitem = self.currentselecteditem
676
676
677 # try to fold the item
677 # try to fold the item
678 if not isinstance(currentitem, uihunkline):
678 if not isinstance(currentitem, uihunkline):
679 if not currentitem.folded:
679 if not currentitem.folded:
680 self.togglefolded(item=currentitem)
680 self.togglefolded(item=currentitem)
681 return
681 return
682
682
683 # if it can't be folded, try to select the parent item
683 # if it can't be folded, try to select the parent item
684 nextitem = currentitem.parentitem()
684 nextitem = currentitem.parentitem()
685
685
686 if nextitem is None:
686 if nextitem is None:
687 # if no item on parent-level, then no change...
687 # if no item on parent-level, then no change...
688 nextitem = currentitem
688 nextitem = currentitem
689 if not nextitem.folded:
689 if not nextitem.folded:
690 self.togglefolded(item=nextitem)
690 self.togglefolded(item=nextitem)
691
691
692 self.currentselecteditem = nextitem
692 self.currentselecteditem = nextitem
693
693
694 def leftarrowshiftevent(self):
694 def leftarrowshiftevent(self):
695 """
695 """
696 select the header of the current item (or fold current item if the
696 select the header of the current item (or fold current item if the
697 current item is already a header).
697 current item is already a header).
698
698
699 """
699 """
700 currentitem = self.currentselecteditem
700 currentitem = self.currentselecteditem
701
701
702 if isinstance(currentitem, uiheader):
702 if isinstance(currentitem, uiheader):
703 if not currentitem.folded:
703 if not currentitem.folded:
704 self.togglefolded(item=currentitem)
704 self.togglefolded(item=currentitem)
705 return
705 return
706
706
707 # select the parent item recursively until we're at a header
707 # select the parent item recursively until we're at a header
708 while True:
708 while True:
709 nextitem = currentitem.parentitem()
709 nextitem = currentitem.parentitem()
710 if nextitem is None:
710 if nextitem is None:
711 break
711 break
712 else:
712 else:
713 currentitem = nextitem
713 currentitem = nextitem
714
714
715 self.currentselecteditem = currentitem
715 self.currentselecteditem = currentitem
716
716
717 def updatescroll(self):
717 def updatescroll(self):
718 "scroll the screen to fully show the currently-selected"
718 "scroll the screen to fully show the currently-selected"
719 selstart = self.selecteditemstartline
719 selstart = self.selecteditemstartline
720 selend = self.selecteditemendline
720 selend = self.selecteditemendline
721 #selnumlines = selend - selstart
721 #selnumlines = selend - selstart
722 padstart = self.firstlineofpadtoprint
722 padstart = self.firstlineofpadtoprint
723 padend = padstart + self.yscreensize - self.numstatuslines - 1
723 padend = padstart + self.yscreensize - self.numstatuslines - 1
724 # 'buffered' pad start/end values which scroll with a certain
724 # 'buffered' pad start/end values which scroll with a certain
725 # top/bottom context margin
725 # top/bottom context margin
726 padstartbuffered = padstart + 3
726 padstartbuffered = padstart + 3
727 padendbuffered = padend - 3
727 padendbuffered = padend - 3
728
728
729 if selend > padendbuffered:
729 if selend > padendbuffered:
730 self.scrolllines(selend - padendbuffered)
730 self.scrolllines(selend - padendbuffered)
731 elif selstart < padstartbuffered:
731 elif selstart < padstartbuffered:
732 # negative values scroll in pgup direction
732 # negative values scroll in pgup direction
733 self.scrolllines(selstart - padstartbuffered)
733 self.scrolllines(selstart - padstartbuffered)
734
734
735
735
736 def scrolllines(self, numlines):
736 def scrolllines(self, numlines):
737 "scroll the screen up (down) by numlines when numlines >0 (<0)."
737 "scroll the screen up (down) by numlines when numlines >0 (<0)."
738 self.firstlineofpadtoprint += numlines
738 self.firstlineofpadtoprint += numlines
739 if self.firstlineofpadtoprint < 0:
739 if self.firstlineofpadtoprint < 0:
740 self.firstlineofpadtoprint = 0
740 self.firstlineofpadtoprint = 0
741 if self.firstlineofpadtoprint > self.numpadlines - 1:
741 if self.firstlineofpadtoprint > self.numpadlines - 1:
742 self.firstlineofpadtoprint = self.numpadlines - 1
742 self.firstlineofpadtoprint = self.numpadlines - 1
743
743
744 def toggleapply(self, item=None):
744 def toggleapply(self, item=None):
745 """
745 """
746 toggle the applied flag of the specified item. if no item is specified,
746 toggle the applied flag of the specified item. if no item is specified,
747 toggle the flag of the currently selected item.
747 toggle the flag of the currently selected item.
748
748
749 """
749 """
750 if item is None:
750 if item is None:
751 item = self.currentselecteditem
751 item = self.currentselecteditem
752
752
753 item.applied = not item.applied
753 item.applied = not item.applied
754
754
755 if isinstance(item, uiheader):
755 if isinstance(item, uiheader):
756 item.partial = False
756 item.partial = False
757 if item.applied:
757 if item.applied:
758 # apply all its hunks
758 # apply all its hunks
759 for hnk in item.hunks:
759 for hnk in item.hunks:
760 hnk.applied = True
760 hnk.applied = True
761 # apply all their hunklines
761 # apply all their hunklines
762 for hunkline in hnk.changedlines:
762 for hunkline in hnk.changedlines:
763 hunkline.applied = True
763 hunkline.applied = True
764 else:
764 else:
765 # un-apply all its hunks
765 # un-apply all its hunks
766 for hnk in item.hunks:
766 for hnk in item.hunks:
767 hnk.applied = False
767 hnk.applied = False
768 hnk.partial = False
768 hnk.partial = False
769 # un-apply all their hunklines
769 # un-apply all their hunklines
770 for hunkline in hnk.changedlines:
770 for hunkline in hnk.changedlines:
771 hunkline.applied = False
771 hunkline.applied = False
772 elif isinstance(item, uihunk):
772 elif isinstance(item, uihunk):
773 item.partial = False
773 item.partial = False
774 # apply all it's hunklines
774 # apply all it's hunklines
775 for hunkline in item.changedlines:
775 for hunkline in item.changedlines:
776 hunkline.applied = item.applied
776 hunkline.applied = item.applied
777
777
778 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
778 siblingappliedstatus = [hnk.applied for hnk in item.header.hunks]
779 allsiblingsapplied = not (False in siblingappliedstatus)
779 allsiblingsapplied = not (False in siblingappliedstatus)
780 nosiblingsapplied = not (True in siblingappliedstatus)
780 nosiblingsapplied = not (True in siblingappliedstatus)
781
781
782 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
782 siblingspartialstatus = [hnk.partial for hnk in item.header.hunks]
783 somesiblingspartial = (True in siblingspartialstatus)
783 somesiblingspartial = (True in siblingspartialstatus)
784
784
785 #cases where applied or partial should be removed from header
785 #cases where applied or partial should be removed from header
786
786
787 # if no 'sibling' hunks are applied (including this hunk)
787 # if no 'sibling' hunks are applied (including this hunk)
788 if nosiblingsapplied:
788 if nosiblingsapplied:
789 if not item.header.special():
789 if not item.header.special():
790 item.header.applied = False
790 item.header.applied = False
791 item.header.partial = False
791 item.header.partial = False
792 else: # some/all parent siblings are applied
792 else: # some/all parent siblings are applied
793 item.header.applied = True
793 item.header.applied = True
794 item.header.partial = (somesiblingspartial or
794 item.header.partial = (somesiblingspartial or
795 not allsiblingsapplied)
795 not allsiblingsapplied)
796
796
797 elif isinstance(item, uihunkline):
797 elif isinstance(item, uihunkline):
798 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
798 siblingappliedstatus = [ln.applied for ln in item.hunk.changedlines]
799 allsiblingsapplied = not (False in siblingappliedstatus)
799 allsiblingsapplied = not (False in siblingappliedstatus)
800 nosiblingsapplied = not (True in siblingappliedstatus)
800 nosiblingsapplied = not (True in siblingappliedstatus)
801
801
802 # if no 'sibling' lines are applied
802 # if no 'sibling' lines are applied
803 if nosiblingsapplied:
803 if nosiblingsapplied:
804 item.hunk.applied = False
804 item.hunk.applied = False
805 item.hunk.partial = False
805 item.hunk.partial = False
806 elif allsiblingsapplied:
806 elif allsiblingsapplied:
807 item.hunk.applied = True
807 item.hunk.applied = True
808 item.hunk.partial = False
808 item.hunk.partial = False
809 else: # some siblings applied
809 else: # some siblings applied
810 item.hunk.applied = True
810 item.hunk.applied = True
811 item.hunk.partial = True
811 item.hunk.partial = True
812
812
813 parentsiblingsapplied = [hnk.applied for hnk
813 parentsiblingsapplied = [hnk.applied for hnk
814 in item.hunk.header.hunks]
814 in item.hunk.header.hunks]
815 noparentsiblingsapplied = not (True in parentsiblingsapplied)
815 noparentsiblingsapplied = not (True in parentsiblingsapplied)
816 allparentsiblingsapplied = not (False in parentsiblingsapplied)
816 allparentsiblingsapplied = not (False in parentsiblingsapplied)
817
817
818 parentsiblingspartial = [hnk.partial for hnk
818 parentsiblingspartial = [hnk.partial for hnk
819 in item.hunk.header.hunks]
819 in item.hunk.header.hunks]
820 someparentsiblingspartial = (True in parentsiblingspartial)
820 someparentsiblingspartial = (True in parentsiblingspartial)
821
821
822 # if all parent hunks are not applied, un-apply header
822 # if all parent hunks are not applied, un-apply header
823 if noparentsiblingsapplied:
823 if noparentsiblingsapplied:
824 if not item.hunk.header.special():
824 if not item.hunk.header.special():
825 item.hunk.header.applied = False
825 item.hunk.header.applied = False
826 item.hunk.header.partial = False
826 item.hunk.header.partial = False
827 # set the applied and partial status of the header if needed
827 # set the applied and partial status of the header if needed
828 else: # some/all parent siblings are applied
828 else: # some/all parent siblings are applied
829 item.hunk.header.applied = True
829 item.hunk.header.applied = True
830 item.hunk.header.partial = (someparentsiblingspartial or
830 item.hunk.header.partial = (someparentsiblingspartial or
831 not allparentsiblingsapplied)
831 not allparentsiblingsapplied)
832
832
833 def toggleall(self):
833 def toggleall(self):
834 "toggle the applied flag of all items."
834 "toggle the applied flag of all items."
835 if self.waslasttoggleallapplied: # then unapply them this time
835 if self.waslasttoggleallapplied: # then unapply them this time
836 for item in self.headerlist:
836 for item in self.headerlist:
837 if item.applied:
837 if item.applied:
838 self.toggleapply(item)
838 self.toggleapply(item)
839 else:
839 else:
840 for item in self.headerlist:
840 for item in self.headerlist:
841 if not item.applied:
841 if not item.applied:
842 self.toggleapply(item)
842 self.toggleapply(item)
843 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
843 self.waslasttoggleallapplied = not self.waslasttoggleallapplied
844
844
845 def togglefolded(self, item=None, foldparent=False):
845 def togglefolded(self, item=None, foldparent=False):
846 "toggle folded flag of specified item (defaults to currently selected)"
846 "toggle folded flag of specified item (defaults to currently selected)"
847 if item is None:
847 if item is None:
848 item = self.currentselecteditem
848 item = self.currentselecteditem
849 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
849 if foldparent or (isinstance(item, uiheader) and item.neverunfolded):
850 if not isinstance(item, uiheader):
850 if not isinstance(item, uiheader):
851 # we need to select the parent item in this case
851 # we need to select the parent item in this case
852 self.currentselecteditem = item = item.parentitem()
852 self.currentselecteditem = item = item.parentitem()
853 elif item.neverunfolded:
853 elif item.neverunfolded:
854 item.neverunfolded = False
854 item.neverunfolded = False
855
855
856 # also fold any foldable children of the parent/current item
856 # also fold any foldable children of the parent/current item
857 if isinstance(item, uiheader): # the original or 'new' item
857 if isinstance(item, uiheader): # the original or 'new' item
858 for child in item.allchildren():
858 for child in item.allchildren():
859 child.folded = not item.folded
859 child.folded = not item.folded
860
860
861 if isinstance(item, (uiheader, uihunk)):
861 if isinstance(item, (uiheader, uihunk)):
862 item.folded = not item.folded
862 item.folded = not item.folded
863
863
864
864
865 def alignstring(self, instr, window):
865 def alignstring(self, instr, window):
866 """
866 """
867 add whitespace to the end of a string in order to make it fill
867 add whitespace to the end of a string in order to make it fill
868 the screen in the x direction. the current cursor position is
868 the screen in the x direction. the current cursor position is
869 taken into account when making this calculation. the string can span
869 taken into account when making this calculation. the string can span
870 multiple lines.
870 multiple lines.
871
871
872 """
872 """
873 y, xstart = window.getyx()
873 y, xstart = window.getyx()
874 width = self.xscreensize
874 width = self.xscreensize
875 # turn tabs into spaces
875 # turn tabs into spaces
876 instr = instr.expandtabs(4)
876 instr = instr.expandtabs(4)
877 strwidth = encoding.colwidth(instr)
877 strwidth = encoding.colwidth(instr)
878 numspaces = (width - ((strwidth + xstart) % width) - 1)
878 numspaces = (width - ((strwidth + xstart) % width) - 1)
879 return instr + " " * numspaces + "\n"
879 return instr + " " * numspaces + "\n"
880
880
881 def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
881 def printstring(self, window, text, fgcolor=None, bgcolor=None, pair=None,
882 pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
882 pairname=None, attrlist=None, towin=True, align=True, showwhtspc=False):
883 """
883 """
884 print the string, text, with the specified colors and attributes, to
884 print the string, text, with the specified colors and attributes, to
885 the specified curses window object.
885 the specified curses window object.
886
886
887 the foreground and background colors are of the form
887 the foreground and background colors are of the form
888 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
888 curses.color_xxxx, where xxxx is one of: [black, blue, cyan, green,
889 magenta, red, white, yellow]. if pairname is provided, a color
889 magenta, red, white, yellow]. if pairname is provided, a color
890 pair will be looked up in the self.colorpairnames dictionary.
890 pair will be looked up in the self.colorpairnames dictionary.
891
891
892 attrlist is a list containing text attributes in the form of
892 attrlist is a list containing text attributes in the form of
893 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
893 curses.a_xxxx, where xxxx can be: [bold, dim, normal, standout,
894 underline].
894 underline].
895
895
896 if align == True, whitespace is added to the printed string such that
896 if align == True, whitespace is added to the printed string such that
897 the string stretches to the right border of the window.
897 the string stretches to the right border of the window.
898
898
899 if showwhtspc == True, trailing whitespace of a string is highlighted.
899 if showwhtspc == True, trailing whitespace of a string is highlighted.
900
900
901 """
901 """
902 # preprocess the text, converting tabs to spaces
902 # preprocess the text, converting tabs to spaces
903 text = text.expandtabs(4)
903 text = text.expandtabs(4)
904 # strip \n, and convert control characters to ^[char] representation
904 # strip \n, and convert control characters to ^[char] representation
905 text = re.sub(r'[\x00-\x08\x0a-\x1f]',
905 text = re.sub(r'[\x00-\x08\x0a-\x1f]',
906 lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
906 lambda m:'^' + chr(ord(m.group()) + 64), text.strip('\n'))
907
907
908 if pair is not None:
908 if pair is not None:
909 colorpair = pair
909 colorpair = pair
910 elif pairname is not None:
910 elif pairname is not None:
911 colorpair = self.colorpairnames[pairname]
911 colorpair = self.colorpairnames[pairname]
912 else:
912 else:
913 if fgcolor is None:
913 if fgcolor is None:
914 fgcolor = -1
914 fgcolor = -1
915 if bgcolor is None:
915 if bgcolor is None:
916 bgcolor = -1
916 bgcolor = -1
917 if (fgcolor, bgcolor) in self.colorpairs:
917 if (fgcolor, bgcolor) in self.colorpairs:
918 colorpair = self.colorpairs[(fgcolor, bgcolor)]
918 colorpair = self.colorpairs[(fgcolor, bgcolor)]
919 else:
919 else:
920 colorpair = self.getcolorpair(fgcolor, bgcolor)
920 colorpair = self.getcolorpair(fgcolor, bgcolor)
921 # add attributes if possible
921 # add attributes if possible
922 if attrlist is None:
922 if attrlist is None:
923 attrlist = []
923 attrlist = []
924 if colorpair < 256:
924 if colorpair < 256:
925 # then it is safe to apply all attributes
925 # then it is safe to apply all attributes
926 for textattr in attrlist:
926 for textattr in attrlist:
927 colorpair |= textattr
927 colorpair |= textattr
928 else:
928 else:
929 # just apply a select few (safe?) attributes
929 # just apply a select few (safe?) attributes
930 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
930 for textattr in (curses.A_UNDERLINE, curses.A_BOLD):
931 if textattr in attrlist:
931 if textattr in attrlist:
932 colorpair |= textattr
932 colorpair |= textattr
933
933
934 y, xstart = self.chunkpad.getyx()
934 y, xstart = self.chunkpad.getyx()
935 t = "" # variable for counting lines printed
935 t = "" # variable for counting lines printed
936 # if requested, show trailing whitespace
936 # if requested, show trailing whitespace
937 if showwhtspc:
937 if showwhtspc:
938 origlen = len(text)
938 origlen = len(text)
939 text = text.rstrip(' \n') # tabs have already been expanded
939 text = text.rstrip(' \n') # tabs have already been expanded
940 strippedlen = len(text)
940 strippedlen = len(text)
941 numtrailingspaces = origlen - strippedlen
941 numtrailingspaces = origlen - strippedlen
942
942
943 if towin:
943 if towin:
944 window.addstr(text, colorpair)
944 window.addstr(text, colorpair)
945 t += text
945 t += text
946
946
947 if showwhtspc:
947 if showwhtspc:
948 wscolorpair = colorpair | curses.A_REVERSE
948 wscolorpair = colorpair | curses.A_REVERSE
949 if towin:
949 if towin:
950 for i in range(numtrailingspaces):
950 for i in range(numtrailingspaces):
951 window.addch(curses.ACS_CKBOARD, wscolorpair)
951 window.addch(curses.ACS_CKBOARD, wscolorpair)
952 t += " " * numtrailingspaces
952 t += " " * numtrailingspaces
953
953
954 if align:
954 if align:
955 if towin:
955 if towin:
956 extrawhitespace = self.alignstring("", window)
956 extrawhitespace = self.alignstring("", window)
957 window.addstr(extrawhitespace, colorpair)
957 window.addstr(extrawhitespace, colorpair)
958 else:
958 else:
959 # need to use t, since the x position hasn't incremented
959 # need to use t, since the x position hasn't incremented
960 extrawhitespace = self.alignstring(t, window)
960 extrawhitespace = self.alignstring(t, window)
961 t += extrawhitespace
961 t += extrawhitespace
962
962
963 # is reset to 0 at the beginning of printitem()
963 # is reset to 0 at the beginning of printitem()
964
964
965 linesprinted = (xstart + len(t)) / self.xscreensize
965 linesprinted = (xstart + len(t)) / self.xscreensize
966 self.linesprintedtopadsofar += linesprinted
966 self.linesprintedtopadsofar += linesprinted
967 return t
967 return t
968
968
969 def updatescreen(self):
969 def updatescreen(self):
970 self.statuswin.erase()
970 self.statuswin.erase()
971 self.chunkpad.erase()
971 self.chunkpad.erase()
972
972
973 printstring = self.printstring
973 printstring = self.printstring
974
974
975 # print out the status lines at the top
975 # print out the status lines at the top
976 try:
976 try:
977 printstring(self.statuswin,
977 printstring(self.statuswin,
978 "SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
978 "SELECT CHUNKS: (j/k/up/dn/pgup/pgdn) move cursor; "
979 "(space/A) toggle hunk/all; (e)dit hunk;",
979 "(space/A) toggle hunk/all; (e)dit hunk;",
980 pairname="legend")
980 pairname="legend")
981 printstring(self.statuswin,
981 printstring(self.statuswin,
982 " (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
982 " (f)old/unfold; (c)onfirm applied; (q)uit; (?) help "
983 "| [X]=hunk applied **=folded",
983 "| [X]=hunk applied **=folded",
984 pairname="legend")
984 pairname="legend")
985 except curses.error:
985 except curses.error:
986 pass
986 pass
987
987
988 # print out the patch in the remaining part of the window
988 # print out the patch in the remaining part of the window
989 try:
989 try:
990 self.printitem()
990 self.printitem()
991 self.updatescroll()
991 self.updatescroll()
992 self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
992 self.chunkpad.refresh(self.firstlineofpadtoprint, 0,
993 self.numstatuslines, 0,
993 self.numstatuslines, 0,
994 self.yscreensize + 1 - self.numstatuslines,
994 self.yscreensize + 1 - self.numstatuslines,
995 self.xscreensize)
995 self.xscreensize)
996 except curses.error:
996 except curses.error:
997 pass
997 pass
998
998
999 # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
999 # refresh([pminrow, pmincol, sminrow, smincol, smaxrow, smaxcol])
1000 self.statuswin.refresh()
1000 self.statuswin.refresh()
1001
1001
1002 def getstatusprefixstring(self, item):
1002 def getstatusprefixstring(self, item):
1003 """
1003 """
1004 create a string to prefix a line with which indicates whether 'item'
1004 create a string to prefix a line with which indicates whether 'item'
1005 is applied and/or folded.
1005 is applied and/or folded.
1006
1006
1007 """
1007 """
1008 # create checkbox string
1008 # create checkbox string
1009 if item.applied:
1009 if item.applied:
1010 if not isinstance(item, uihunkline) and item.partial:
1010 if not isinstance(item, uihunkline) and item.partial:
1011 checkbox = "[~]"
1011 checkbox = "[~]"
1012 else:
1012 else:
1013 checkbox = "[x]"
1013 checkbox = "[x]"
1014 else:
1014 else:
1015 checkbox = "[ ]"
1015 checkbox = "[ ]"
1016
1016
1017 try:
1017 try:
1018 if item.folded:
1018 if item.folded:
1019 checkbox += "**"
1019 checkbox += "**"
1020 if isinstance(item, uiheader):
1020 if isinstance(item, uiheader):
1021 # one of "m", "a", or "d" (modified, added, deleted)
1021 # one of "m", "a", or "d" (modified, added, deleted)
1022 filestatus = item.changetype
1022 filestatus = item.changetype
1023
1023
1024 checkbox += filestatus + " "
1024 checkbox += filestatus + " "
1025 else:
1025 else:
1026 checkbox += " "
1026 checkbox += " "
1027 if isinstance(item, uiheader):
1027 if isinstance(item, uiheader):
1028 # add two more spaces for headers
1028 # add two more spaces for headers
1029 checkbox += " "
1029 checkbox += " "
1030 except AttributeError: # not foldable
1030 except AttributeError: # not foldable
1031 checkbox += " "
1031 checkbox += " "
1032
1032
1033 return checkbox
1033 return checkbox
1034
1034
1035 def printheader(self, header, selected=False, towin=True,
1035 def printheader(self, header, selected=False, towin=True,
1036 ignorefolding=False):
1036 ignorefolding=False):
1037 """
1037 """
1038 print the header to the pad. if countlines is True, don't print
1038 print the header to the pad. if countlines is True, don't print
1039 anything, but just count the number of lines which would be printed.
1039 anything, but just count the number of lines which would be printed.
1040
1040
1041 """
1041 """
1042 outstr = ""
1042 outstr = ""
1043 text = header.prettystr()
1043 text = header.prettystr()
1044 chunkindex = self.chunklist.index(header)
1044 chunkindex = self.chunklist.index(header)
1045
1045
1046 if chunkindex != 0 and not header.folded:
1046 if chunkindex != 0 and not header.folded:
1047 # add separating line before headers
1047 # add separating line before headers
1048 outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
1048 outstr += self.printstring(self.chunkpad, '_' * self.xscreensize,
1049 towin=towin, align=False)
1049 towin=towin, align=False)
1050 # select color-pair based on if the header is selected
1050 # select color-pair based on if the header is selected
1051 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1051 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1052 attrlist=[curses.A_BOLD])
1052 attrlist=[curses.A_BOLD])
1053
1053
1054 # print out each line of the chunk, expanding it to screen width
1054 # print out each line of the chunk, expanding it to screen width
1055
1055
1056 # number of characters to indent lines on this level by
1056 # number of characters to indent lines on this level by
1057 indentnumchars = 0
1057 indentnumchars = 0
1058 checkbox = self.getstatusprefixstring(header)
1058 checkbox = self.getstatusprefixstring(header)
1059 if not header.folded or ignorefolding:
1059 if not header.folded or ignorefolding:
1060 textlist = text.split("\n")
1060 textlist = text.split("\n")
1061 linestr = checkbox + textlist[0]
1061 linestr = checkbox + textlist[0]
1062 else:
1062 else:
1063 linestr = checkbox + header.filename()
1063 linestr = checkbox + header.filename()
1064 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1064 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1065 towin=towin)
1065 towin=towin)
1066 if not header.folded or ignorefolding:
1066 if not header.folded or ignorefolding:
1067 if len(textlist) > 1:
1067 if len(textlist) > 1:
1068 for line in textlist[1:]:
1068 for line in textlist[1:]:
1069 linestr = " "*(indentnumchars + len(checkbox)) + line
1069 linestr = " "*(indentnumchars + len(checkbox)) + line
1070 outstr += self.printstring(self.chunkpad, linestr,
1070 outstr += self.printstring(self.chunkpad, linestr,
1071 pair=colorpair, towin=towin)
1071 pair=colorpair, towin=towin)
1072
1072
1073 return outstr
1073 return outstr
1074
1074
1075 def printhunklinesbefore(self, hunk, selected=False, towin=True,
1075 def printhunklinesbefore(self, hunk, selected=False, towin=True,
1076 ignorefolding=False):
1076 ignorefolding=False):
1077 "includes start/end line indicator"
1077 "includes start/end line indicator"
1078 outstr = ""
1078 outstr = ""
1079 # where hunk is in list of siblings
1079 # where hunk is in list of siblings
1080 hunkindex = hunk.header.hunks.index(hunk)
1080 hunkindex = hunk.header.hunks.index(hunk)
1081
1081
1082 if hunkindex != 0:
1082 if hunkindex != 0:
1083 # add separating line before headers
1083 # add separating line before headers
1084 outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
1084 outstr += self.printstring(self.chunkpad, ' '*self.xscreensize,
1085 towin=towin, align=False)
1085 towin=towin, align=False)
1086
1086
1087 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1087 colorpair = self.getcolorpair(name=selected and "selected" or "normal",
1088 attrlist=[curses.A_BOLD])
1088 attrlist=[curses.A_BOLD])
1089
1089
1090 # print out from-to line with checkbox
1090 # print out from-to line with checkbox
1091 checkbox = self.getstatusprefixstring(hunk)
1091 checkbox = self.getstatusprefixstring(hunk)
1092
1092
1093 lineprefix = " "*self.hunkindentnumchars + checkbox
1093 lineprefix = " "*self.hunkindentnumchars + checkbox
1094 frtoline = " " + hunk.getfromtoline().strip("\n")
1094 frtoline = " " + hunk.getfromtoline().strip("\n")
1095
1095
1096
1096
1097 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1097 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1098 align=False) # add uncolored checkbox/indent
1098 align=False) # add uncolored checkbox/indent
1099 outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
1099 outstr += self.printstring(self.chunkpad, frtoline, pair=colorpair,
1100 towin=towin)
1100 towin=towin)
1101
1101
1102 if hunk.folded and not ignorefolding:
1102 if hunk.folded and not ignorefolding:
1103 # skip remainder of output
1103 # skip remainder of output
1104 return outstr
1104 return outstr
1105
1105
1106 # print out lines of the chunk preceeding changed-lines
1106 # print out lines of the chunk preceeding changed-lines
1107 for line in hunk.before:
1107 for line in hunk.before:
1108 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1108 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1109 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1109 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1110
1110
1111 return outstr
1111 return outstr
1112
1112
1113 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1113 def printhunklinesafter(self, hunk, towin=True, ignorefolding=False):
1114 outstr = ""
1114 outstr = ""
1115 if hunk.folded and not ignorefolding:
1115 if hunk.folded and not ignorefolding:
1116 return outstr
1116 return outstr
1117
1117
1118 # a bit superfluous, but to avoid hard-coding indent amount
1118 # a bit superfluous, but to avoid hard-coding indent amount
1119 checkbox = self.getstatusprefixstring(hunk)
1119 checkbox = self.getstatusprefixstring(hunk)
1120 for line in hunk.after:
1120 for line in hunk.after:
1121 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1121 linestr = " "*(self.hunklineindentnumchars + len(checkbox)) + line
1122 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1122 outstr += self.printstring(self.chunkpad, linestr, towin=towin)
1123
1123
1124 return outstr
1124 return outstr
1125
1125
1126 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1126 def printhunkchangedline(self, hunkline, selected=False, towin=True):
1127 outstr = ""
1127 outstr = ""
1128 checkbox = self.getstatusprefixstring(hunkline)
1128 checkbox = self.getstatusprefixstring(hunkline)
1129
1129
1130 linestr = hunkline.prettystr().strip("\n")
1130 linestr = hunkline.prettystr().strip("\n")
1131
1131
1132 # select color-pair based on whether line is an addition/removal
1132 # select color-pair based on whether line is an addition/removal
1133 if selected:
1133 if selected:
1134 colorpair = self.getcolorpair(name="selected")
1134 colorpair = self.getcolorpair(name="selected")
1135 elif linestr.startswith("+"):
1135 elif linestr.startswith("+"):
1136 colorpair = self.getcolorpair(name="addition")
1136 colorpair = self.getcolorpair(name="addition")
1137 elif linestr.startswith("-"):
1137 elif linestr.startswith("-"):
1138 colorpair = self.getcolorpair(name="deletion")
1138 colorpair = self.getcolorpair(name="deletion")
1139 elif linestr.startswith("\\"):
1139 elif linestr.startswith("\\"):
1140 colorpair = self.getcolorpair(name="normal")
1140 colorpair = self.getcolorpair(name="normal")
1141
1141
1142 lineprefix = " "*self.hunklineindentnumchars + checkbox
1142 lineprefix = " "*self.hunklineindentnumchars + checkbox
1143 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1143 outstr += self.printstring(self.chunkpad, lineprefix, towin=towin,
1144 align=False) # add uncolored checkbox/indent
1144 align=False) # add uncolored checkbox/indent
1145 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1145 outstr += self.printstring(self.chunkpad, linestr, pair=colorpair,
1146 towin=towin, showwhtspc=True)
1146 towin=towin, showwhtspc=True)
1147 return outstr
1147 return outstr
1148
1148
1149 def printitem(self, item=None, ignorefolding=False, recursechildren=True,
1149 def printitem(self, item=None, ignorefolding=False, recursechildren=True,
1150 towin=True):
1150 towin=True):
1151 """
1151 """
1152 use __printitem() to print the the specified item.applied.
1152 use __printitem() to print the the specified item.applied.
1153 if item is not specified, then print the entire patch.
1153 if item is not specified, then print the entire patch.
1154 (hiding folded elements, etc. -- see __printitem() docstring)
1154 (hiding folded elements, etc. -- see __printitem() docstring)
1155 """
1155 """
1156 if item is None:
1156 if item is None:
1157 item = self.headerlist
1157 item = self.headerlist
1158 if recursechildren:
1158 if recursechildren:
1159 self.linesprintedtopadsofar = 0
1159 self.linesprintedtopadsofar = 0
1160
1160
1161 outstr = []
1161 outstr = []
1162 self.__printitem(item, ignorefolding, recursechildren, outstr,
1162 self.__printitem(item, ignorefolding, recursechildren, outstr,
1163 towin=towin)
1163 towin=towin)
1164 return ''.join(outstr)
1164 return ''.join(outstr)
1165
1165
1166 def outofdisplayedarea(self):
1166 def outofdisplayedarea(self):
1167 y, _ = self.chunkpad.getyx() # cursor location
1167 y, _ = self.chunkpad.getyx() # cursor location
1168 # * 2 here works but an optimization would be the max number of
1168 # * 2 here works but an optimization would be the max number of
1169 # consecutive non selectable lines
1169 # consecutive non selectable lines
1170 # i.e the max number of context line for any hunk in the patch
1170 # i.e the max number of context line for any hunk in the patch
1171 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1171 miny = min(0, self.firstlineofpadtoprint - self.yscreensize)
1172 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1172 maxy = self.firstlineofpadtoprint + self.yscreensize * 2
1173 return y < miny or y > maxy
1173 return y < miny or y > maxy
1174
1174
1175 def handleselection(self, item, recursechildren):
1175 def handleselection(self, item, recursechildren):
1176 selected = (item is self.currentselecteditem)
1176 selected = (item is self.currentselecteditem)
1177 if selected and recursechildren:
1177 if selected and recursechildren:
1178 # assumes line numbering starting from line 0
1178 # assumes line numbering starting from line 0
1179 self.selecteditemstartline = self.linesprintedtopadsofar
1179 self.selecteditemstartline = self.linesprintedtopadsofar
1180 selecteditemlines = self.getnumlinesdisplayed(item,
1180 selecteditemlines = self.getnumlinesdisplayed(item,
1181 recursechildren=False)
1181 recursechildren=False)
1182 self.selecteditemendline = (self.selecteditemstartline +
1182 self.selecteditemendline = (self.selecteditemstartline +
1183 selecteditemlines - 1)
1183 selecteditemlines - 1)
1184 return selected
1184 return selected
1185
1185
1186 def __printitem(self, item, ignorefolding, recursechildren, outstr,
1186 def __printitem(self, item, ignorefolding, recursechildren, outstr,
1187 towin=True):
1187 towin=True):
1188 """
1188 """
1189 recursive method for printing out patch/header/hunk/hunk-line data to
1189 recursive method for printing out patch/header/hunk/hunk-line data to
1190 screen. also returns a string with all of the content of the displayed
1190 screen. also returns a string with all of the content of the displayed
1191 patch (not including coloring, etc.).
1191 patch (not including coloring, etc.).
1192
1192
1193 if ignorefolding is True, then folded items are printed out.
1193 if ignorefolding is True, then folded items are printed out.
1194
1194
1195 if recursechildren is False, then only print the item without its
1195 if recursechildren is False, then only print the item without its
1196 child items.
1196 child items.
1197
1197
1198 """
1198 """
1199 if towin and self.outofdisplayedarea():
1199 if towin and self.outofdisplayedarea():
1200 return
1200 return
1201
1201
1202 selected = self.handleselection(item, recursechildren)
1202 selected = self.handleselection(item, recursechildren)
1203
1203
1204 # patch object is a list of headers
1204 # patch object is a list of headers
1205 if isinstance(item, patch):
1205 if isinstance(item, patch):
1206 if recursechildren:
1206 if recursechildren:
1207 for hdr in item:
1207 for hdr in item:
1208 self.__printitem(hdr, ignorefolding,
1208 self.__printitem(hdr, ignorefolding,
1209 recursechildren, outstr, towin)
1209 recursechildren, outstr, towin)
1210 # todo: eliminate all isinstance() calls
1210 # todo: eliminate all isinstance() calls
1211 if isinstance(item, uiheader):
1211 if isinstance(item, uiheader):
1212 outstr.append(self.printheader(item, selected, towin=towin,
1212 outstr.append(self.printheader(item, selected, towin=towin,
1213 ignorefolding=ignorefolding))
1213 ignorefolding=ignorefolding))
1214 if recursechildren:
1214 if recursechildren:
1215 for hnk in item.hunks:
1215 for hnk in item.hunks:
1216 self.__printitem(hnk, ignorefolding,
1216 self.__printitem(hnk, ignorefolding,
1217 recursechildren, outstr, towin)
1217 recursechildren, outstr, towin)
1218 elif (isinstance(item, uihunk) and
1218 elif (isinstance(item, uihunk) and
1219 ((not item.header.folded) or ignorefolding)):
1219 ((not item.header.folded) or ignorefolding)):
1220 # print the hunk data which comes before the changed-lines
1220 # print the hunk data which comes before the changed-lines
1221 outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
1221 outstr.append(self.printhunklinesbefore(item, selected, towin=towin,
1222 ignorefolding=ignorefolding))
1222 ignorefolding=ignorefolding))
1223 if recursechildren:
1223 if recursechildren:
1224 for l in item.changedlines:
1224 for l in item.changedlines:
1225 self.__printitem(l, ignorefolding,
1225 self.__printitem(l, ignorefolding,
1226 recursechildren, outstr, towin)
1226 recursechildren, outstr, towin)
1227 outstr.append(self.printhunklinesafter(item, towin=towin,
1227 outstr.append(self.printhunklinesafter(item, towin=towin,
1228 ignorefolding=ignorefolding))
1228 ignorefolding=ignorefolding))
1229 elif (isinstance(item, uihunkline) and
1229 elif (isinstance(item, uihunkline) and
1230 ((not item.hunk.folded) or ignorefolding)):
1230 ((not item.hunk.folded) or ignorefolding)):
1231 outstr.append(self.printhunkchangedline(item, selected,
1231 outstr.append(self.printhunkchangedline(item, selected,
1232 towin=towin))
1232 towin=towin))
1233
1233
1234 return outstr
1234 return outstr
1235
1235
1236 def getnumlinesdisplayed(self, item=None, ignorefolding=False,
1236 def getnumlinesdisplayed(self, item=None, ignorefolding=False,
1237 recursechildren=True):
1237 recursechildren=True):
1238 """
1238 """
1239 return the number of lines which would be displayed if the item were
1239 return the number of lines which would be displayed if the item were
1240 to be printed to the display. the item will not be printed to the
1240 to be printed to the display. the item will not be printed to the
1241 display (pad).
1241 display (pad).
1242 if no item is given, assume the entire patch.
1242 if no item is given, assume the entire patch.
1243 if ignorefolding is True, folded items will be unfolded when counting
1243 if ignorefolding is True, folded items will be unfolded when counting
1244 the number of lines.
1244 the number of lines.
1245
1245
1246 """
1246 """
1247 # temporarily disable printing to windows by printstring
1247 # temporarily disable printing to windows by printstring
1248 patchdisplaystring = self.printitem(item, ignorefolding,
1248 patchdisplaystring = self.printitem(item, ignorefolding,
1249 recursechildren, towin=False)
1249 recursechildren, towin=False)
1250 numlines = len(patchdisplaystring) / self.xscreensize
1250 numlines = len(patchdisplaystring) / self.xscreensize
1251 return numlines
1251 return numlines
1252
1252
1253 def sigwinchhandler(self, n, frame):
1253 def sigwinchhandler(self, n, frame):
1254 "handle window resizing"
1254 "handle window resizing"
1255 try:
1255 try:
1256 curses.endwin()
1256 curses.endwin()
1257 self.yscreensize, self.xscreensize = gethw()
1257 self.yscreensize, self.xscreensize = gethw()
1258 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1258 self.statuswin.resize(self.numstatuslines, self.xscreensize)
1259 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1259 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1260 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1260 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1261 # todo: try to resize commit message window if possible
1261 # todo: try to resize commit message window if possible
1262 except curses.error:
1262 except curses.error:
1263 pass
1263 pass
1264
1264
1265 def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
1265 def getcolorpair(self, fgcolor=None, bgcolor=None, name=None,
1266 attrlist=None):
1266 attrlist=None):
1267 """
1267 """
1268 get a curses color pair, adding it to self.colorpairs if it is not
1268 get a curses color pair, adding it to self.colorpairs if it is not
1269 already defined. an optional string, name, can be passed as a shortcut
1269 already defined. an optional string, name, can be passed as a shortcut
1270 for referring to the color-pair. by default, if no arguments are
1270 for referring to the color-pair. by default, if no arguments are
1271 specified, the white foreground / black background color-pair is
1271 specified, the white foreground / black background color-pair is
1272 returned.
1272 returned.
1273
1273
1274 it is expected that this function will be used exclusively for
1274 it is expected that this function will be used exclusively for
1275 initializing color pairs, and not curses.init_pair().
1275 initializing color pairs, and not curses.init_pair().
1276
1276
1277 attrlist is used to 'flavor' the returned color-pair. this information
1277 attrlist is used to 'flavor' the returned color-pair. this information
1278 is not stored in self.colorpairs. it contains attribute values like
1278 is not stored in self.colorpairs. it contains attribute values like
1279 curses.A_BOLD.
1279 curses.A_BOLD.
1280
1280
1281 """
1281 """
1282 if (name is not None) and name in self.colorpairnames:
1282 if (name is not None) and name in self.colorpairnames:
1283 # then get the associated color pair and return it
1283 # then get the associated color pair and return it
1284 colorpair = self.colorpairnames[name]
1284 colorpair = self.colorpairnames[name]
1285 else:
1285 else:
1286 if fgcolor is None:
1286 if fgcolor is None:
1287 fgcolor = -1
1287 fgcolor = -1
1288 if bgcolor is None:
1288 if bgcolor is None:
1289 bgcolor = -1
1289 bgcolor = -1
1290 if (fgcolor, bgcolor) in self.colorpairs:
1290 if (fgcolor, bgcolor) in self.colorpairs:
1291 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1291 colorpair = self.colorpairs[(fgcolor, bgcolor)]
1292 else:
1292 else:
1293 pairindex = len(self.colorpairs) + 1
1293 pairindex = len(self.colorpairs) + 1
1294 curses.init_pair(pairindex, fgcolor, bgcolor)
1294 curses.init_pair(pairindex, fgcolor, bgcolor)
1295 colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
1295 colorpair = self.colorpairs[(fgcolor, bgcolor)] = (
1296 curses.color_pair(pairindex))
1296 curses.color_pair(pairindex))
1297 if name is not None:
1297 if name is not None:
1298 self.colorpairnames[name] = curses.color_pair(pairindex)
1298 self.colorpairnames[name] = curses.color_pair(pairindex)
1299
1299
1300 # add attributes if possible
1300 # add attributes if possible
1301 if attrlist is None:
1301 if attrlist is None:
1302 attrlist = []
1302 attrlist = []
1303 if colorpair < 256:
1303 if colorpair < 256:
1304 # then it is safe to apply all attributes
1304 # then it is safe to apply all attributes
1305 for textattr in attrlist:
1305 for textattr in attrlist:
1306 colorpair |= textattr
1306 colorpair |= textattr
1307 else:
1307 else:
1308 # just apply a select few (safe?) attributes
1308 # just apply a select few (safe?) attributes
1309 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1309 for textattrib in (curses.A_UNDERLINE, curses.A_BOLD):
1310 if textattrib in attrlist:
1310 if textattrib in attrlist:
1311 colorpair |= textattrib
1311 colorpair |= textattrib
1312 return colorpair
1312 return colorpair
1313
1313
1314 def initcolorpair(self, *args, **kwargs):
1314 def initcolorpair(self, *args, **kwargs):
1315 "same as getcolorpair."
1315 "same as getcolorpair."
1316 self.getcolorpair(*args, **kwargs)
1316 self.getcolorpair(*args, **kwargs)
1317
1317
1318 def helpwindow(self):
1318 def helpwindow(self):
1319 "print a help window to the screen. exit after any keypress."
1319 "print a help window to the screen. exit after any keypress."
1320 helptext = """ [press any key to return to the patch-display]
1320 helptext = """ [press any key to return to the patch-display]
1321
1321
1322 crecord allows you to interactively choose among the changes you have made,
1322 crecord allows you to interactively choose among the changes you have made,
1323 and confirm only those changes you select for further processing by the command
1323 and confirm only those changes you select for further processing by the command
1324 you are running (commit/shelve/revert), after confirming the selected
1324 you are running (commit/shelve/revert), after confirming the selected
1325 changes, the unselected changes are still present in your working copy, so you
1325 changes, the unselected changes are still present in your working copy, so you
1326 can use crecord multiple times to split large changes into smaller changesets.
1326 can use crecord multiple times to split large changes into smaller changesets.
1327 the following are valid keystrokes:
1327 the following are valid keystrokes:
1328
1328
1329 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1329 [space] : (un-)select item ([~]/[x] = partly/fully applied)
1330 a : (un-)select all items
1330 a : (un-)select all items
1331 up/down-arrow [k/j] : go to previous/next unfolded item
1331 up/down-arrow [k/j] : go to previous/next unfolded item
1332 pgup/pgdn [k/j] : go to previous/next item of same type
1332 pgup/pgdn [k/j] : go to previous/next item of same type
1333 right/left-arrow [l/h] : go to child item / parent item
1333 right/left-arrow [l/h] : go to child item / parent item
1334 shift-left-arrow [h] : go to parent header / fold selected header
1334 shift-left-arrow [h] : go to parent header / fold selected header
1335 f : fold / unfold item, hiding/revealing its children
1335 f : fold / unfold item, hiding/revealing its children
1336 f : fold / unfold parent item and all of its ancestors
1336 f : fold / unfold parent item and all of its ancestors
1337 m : edit / resume editing the commit message
1337 m : edit / resume editing the commit message
1338 e : edit the currently selected hunk
1338 e : edit the currently selected hunk
1339 a : toggle amend mode (hg rev >= 2.2)
1339 a : toggle amend mode (hg rev >= 2.2)
1340 c : confirm selected changes
1340 c : confirm selected changes
1341 r : review/edit and confirm selected changes
1341 r : review/edit and confirm selected changes
1342 q : quit without confirming (no changes will be made)
1342 q : quit without confirming (no changes will be made)
1343 ? : help (what you're currently reading)"""
1343 ? : help (what you're currently reading)"""
1344
1344
1345 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1345 helpwin = curses.newwin(self.yscreensize, 0, 0, 0)
1346 helplines = helptext.split("\n")
1346 helplines = helptext.split("\n")
1347 helplines = helplines + [" "]*(
1347 helplines = helplines + [" "]*(
1348 self.yscreensize - self.numstatuslines - len(helplines) - 1)
1348 self.yscreensize - self.numstatuslines - len(helplines) - 1)
1349 try:
1349 try:
1350 for line in helplines:
1350 for line in helplines:
1351 self.printstring(helpwin, line, pairname="legend")
1351 self.printstring(helpwin, line, pairname="legend")
1352 except curses.error:
1352 except curses.error:
1353 pass
1353 pass
1354 helpwin.refresh()
1354 helpwin.refresh()
1355 try:
1355 try:
1356 helpwin.getkey()
1356 helpwin.getkey()
1357 except curses.error:
1357 except curses.error:
1358 pass
1358 pass
1359
1359
1360 def confirmationwindow(self, windowtext):
1360 def confirmationwindow(self, windowtext):
1361 "display an informational window, then wait for and return a keypress."
1361 "display an informational window, then wait for and return a keypress."
1362
1362
1363 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1363 confirmwin = curses.newwin(self.yscreensize, 0, 0, 0)
1364 try:
1364 try:
1365 lines = windowtext.split("\n")
1365 lines = windowtext.split("\n")
1366 for line in lines:
1366 for line in lines:
1367 self.printstring(confirmwin, line, pairname="selected")
1367 self.printstring(confirmwin, line, pairname="selected")
1368 except curses.error:
1368 except curses.error:
1369 pass
1369 pass
1370 self.stdscr.refresh()
1370 self.stdscr.refresh()
1371 confirmwin.refresh()
1371 confirmwin.refresh()
1372 try:
1372 try:
1373 response = chr(self.stdscr.getch())
1373 response = chr(self.stdscr.getch())
1374 except ValueError:
1374 except ValueError:
1375 response = None
1375 response = None
1376
1376
1377 return response
1377 return response
1378
1378
1379 def confirmcommit(self, review=False):
1379 def confirmcommit(self, review=False):
1380 """ask for 'y' to be pressed to confirm selected. return True if
1380 """ask for 'y' to be pressed to confirm selected. return True if
1381 confirmed."""
1381 confirmed."""
1382 if review:
1382 if review:
1383 confirmtext = (
1383 confirmtext = (
1384 """if you answer yes to the following, the your currently chosen patch chunks
1384 """if you answer yes to the following, the your currently chosen patch chunks
1385 will be loaded into an editor. you may modify the patch from the editor, and
1385 will be loaded into an editor. you may modify the patch from the editor, and
1386 save the changes if you wish to change the patch. otherwise, you can just
1386 save the changes if you wish to change the patch. otherwise, you can just
1387 close the editor without saving to accept the current patch as-is.
1387 close the editor without saving to accept the current patch as-is.
1388
1388
1389 note: don't add/remove lines unless you also modify the range information.
1389 note: don't add/remove lines unless you also modify the range information.
1390 failing to follow this rule will result in the commit aborting.
1390 failing to follow this rule will result in the commit aborting.
1391
1391
1392 are you sure you want to review/edit and confirm the selected changes [yn]?
1392 are you sure you want to review/edit and confirm the selected changes [yn]?
1393 """)
1393 """)
1394 else:
1394 else:
1395 confirmtext = (
1395 confirmtext = (
1396 "are you sure you want to confirm the selected changes [yn]? ")
1396 "are you sure you want to confirm the selected changes [yn]? ")
1397
1397
1398 response = self.confirmationwindow(confirmtext)
1398 response = self.confirmationwindow(confirmtext)
1399 if response is None:
1399 if response is None:
1400 response = "n"
1400 response = "n"
1401 if response.lower().startswith("y"):
1401 if response.lower().startswith("y"):
1402 return True
1402 return True
1403 else:
1403 else:
1404 return False
1404 return False
1405
1405
1406 def recenterdisplayedarea(self):
1406 def recenterdisplayedarea(self):
1407 """
1407 """
1408 once we scrolled with pg up pg down we can be pointing outside of the
1408 once we scrolled with pg up pg down we can be pointing outside of the
1409 display zone. we print the patch with towin=False to compute the
1409 display zone. we print the patch with towin=False to compute the
1410 location of the selected item eventhough it is outside of the displayed
1410 location of the selected item eventhough it is outside of the displayed
1411 zone and then update the scroll.
1411 zone and then update the scroll.
1412 """
1412 """
1413 self.printitem(towin=False)
1413 self.printitem(towin=False)
1414 self.updatescroll()
1414 self.updatescroll()
1415
1415
1416 def toggleedit(self, item=None, test=False):
1416 def toggleedit(self, item=None, test=False):
1417 """
1417 """
1418 edit the currently chelected chunk
1418 edit the currently chelected chunk
1419 """
1419 """
1420
1420
1421 def editpatchwitheditor(self, chunk):
1421 def editpatchwitheditor(self, chunk):
1422 if chunk is None:
1422 if chunk is None:
1423 self.ui.write(_('cannot edit patch for whole file'))
1423 self.ui.write(_('cannot edit patch for whole file'))
1424 self.ui.write("\n")
1424 self.ui.write("\n")
1425 return None
1425 return None
1426 if chunk.header.binary():
1426 if chunk.header.binary():
1427 self.ui.write(_('cannot edit patch for binary file'))
1427 self.ui.write(_('cannot edit patch for binary file'))
1428 self.ui.write("\n")
1428 self.ui.write("\n")
1429 return None
1429 return None
1430 # patch comment based on the git one (based on comment at end of
1430 # patch comment based on the git one (based on comment at end of
1431 # http://mercurial.selenic.com/wiki/recordextension)
1431 # http://mercurial.selenic.com/wiki/recordextension)
1432 phelp = '---' + _("""
1432 phelp = '---' + _("""
1433 to remove '-' lines, make them ' ' lines (context).
1433 to remove '-' lines, make them ' ' lines (context).
1434 to remove '+' lines, delete them.
1434 to remove '+' lines, delete them.
1435 lines starting with # will be removed from the patch.
1435 lines starting with # will be removed from the patch.
1436
1436
1437 if the patch applies cleanly, the edited hunk will immediately be
1437 if the patch applies cleanly, the edited hunk will immediately be
1438 added to the record list. if it does not apply cleanly, a rejects
1438 added to the record list. if it does not apply cleanly, a rejects
1439 file will be generated: you can use that when you try again. if
1439 file will be generated: you can use that when you try again. if
1440 all lines of the hunk are removed, then the edit is aborted and
1440 all lines of the hunk are removed, then the edit is aborted and
1441 the hunk is left unchanged.
1441 the hunk is left unchanged.
1442 """)
1442 """)
1443 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1443 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1444 suffix=".diff", text=True)
1444 suffix=".diff", text=True)
1445 ncpatchfp = None
1445 ncpatchfp = None
1446 try:
1446 try:
1447 # write the initial patch
1447 # write the initial patch
1448 f = os.fdopen(patchfd, "w")
1448 f = os.fdopen(patchfd, "w")
1449 chunk.header.write(f)
1449 chunk.header.write(f)
1450 chunk.write(f)
1450 chunk.write(f)
1451 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1451 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1452 f.close()
1452 f.close()
1453 # start the editor and wait for it to complete
1453 # start the editor and wait for it to complete
1454 editor = self.ui.geteditor()
1454 editor = self.ui.geteditor()
1455 self.ui.system("%s \"%s\"" % (editor, patchfn),
1455 self.ui.system("%s \"%s\"" % (editor, patchfn),
1456 environ={'hguser': self.ui.username()},
1456 environ={'hguser': self.ui.username()},
1457 onerr=util.Abort, errprefix=_("edit failed"))
1457 onerr=util.Abort, errprefix=_("edit failed"))
1458 # remove comment lines
1458 # remove comment lines
1459 patchfp = open(patchfn)
1459 patchfp = open(patchfn)
1460 ncpatchfp = cStringIO.StringIO()
1460 ncpatchfp = cStringIO.StringIO()
1461 for line in patchfp:
1461 for line in patchfp:
1462 if not line.startswith('#'):
1462 if not line.startswith('#'):
1463 ncpatchfp.write(line)
1463 ncpatchfp.write(line)
1464 patchfp.close()
1464 patchfp.close()
1465 ncpatchfp.seek(0)
1465 ncpatchfp.seek(0)
1466 newpatches = patchmod.parsepatch(ncpatchfp)
1466 newpatches = patchmod.parsepatch(ncpatchfp)
1467 finally:
1467 finally:
1468 os.unlink(patchfn)
1468 os.unlink(patchfn)
1469 del ncpatchfp
1469 del ncpatchfp
1470 return newpatches
1470 return newpatches
1471 if item is None:
1471 if item is None:
1472 item = self.currentselecteditem
1472 item = self.currentselecteditem
1473 if isinstance(item, uiheader):
1473 if isinstance(item, uiheader):
1474 return
1474 return
1475 if isinstance(item, uihunkline):
1475 if isinstance(item, uihunkline):
1476 item = item.parentitem()
1476 item = item.parentitem()
1477 if not isinstance(item, uihunk):
1477 if not isinstance(item, uihunk):
1478 return
1478 return
1479
1479
1480 beforeadded, beforeremoved = item.added, item.removed
1480 beforeadded, beforeremoved = item.added, item.removed
1481 newpatches = editpatchwitheditor(self, item)
1481 newpatches = editpatchwitheditor(self, item)
1482 header = item.header
1482 header = item.header
1483 editedhunkindex = header.hunks.index(item)
1483 editedhunkindex = header.hunks.index(item)
1484 hunksbefore = header.hunks[:editedhunkindex]
1484 hunksbefore = header.hunks[:editedhunkindex]
1485 hunksafter = header.hunks[editedhunkindex + 1:]
1485 hunksafter = header.hunks[editedhunkindex + 1:]
1486 newpatchheader = newpatches[0]
1486 newpatchheader = newpatches[0]
1487 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1487 newhunks = [uihunk(h, header) for h in newpatchheader.hunks]
1488 newadded = sum([h.added for h in newhunks])
1488 newadded = sum([h.added for h in newhunks])
1489 newremoved = sum([h.removed for h in newhunks])
1489 newremoved = sum([h.removed for h in newhunks])
1490 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1490 offset = (newadded - beforeadded) - (newremoved - beforeremoved)
1491
1491
1492 for h in hunksafter:
1492 for h in hunksafter:
1493 h.toline += offset
1493 h.toline += offset
1494 for h in newhunks:
1494 for h in newhunks:
1495 h.folded = False
1495 h.folded = False
1496 header.hunks = hunksbefore + newhunks + hunksafter
1496 header.hunks = hunksbefore + newhunks + hunksafter
1497 if self.emptypatch():
1497 if self.emptypatch():
1498 header.hunks = hunksbefore + [item] + hunksafter
1498 header.hunks = hunksbefore + [item] + hunksafter
1499 self.currentselecteditem = header
1499 self.currentselecteditem = header
1500
1500
1501 if not test:
1501 if not test:
1502 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1502 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1503 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1503 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1504 self.updatescroll()
1504 self.updatescroll()
1505 self.stdscr.refresh()
1505 self.stdscr.refresh()
1506 self.statuswin.refresh()
1506 self.statuswin.refresh()
1507 self.stdscr.keypad(1)
1507 self.stdscr.keypad(1)
1508
1508
1509 def emptypatch(self):
1509 def emptypatch(self):
1510 item = self.headerlist
1510 item = self.headerlist
1511 if not item:
1511 if not item:
1512 return True
1512 return True
1513 for header in item:
1513 for header in item:
1514 if header.hunks:
1514 if header.hunks:
1515 return False
1515 return False
1516 return True
1516 return True
1517
1517
1518 def handlekeypressed(self, keypressed, test=False):
1518 def handlekeypressed(self, keypressed, test=False):
1519 if keypressed in ["k", "KEY_UP"]:
1519 if keypressed in ["k", "KEY_UP"]:
1520 self.uparrowevent()
1520 self.uparrowevent()
1521 if keypressed in ["k", "KEY_PPAGE"]:
1521 if keypressed in ["k", "KEY_PPAGE"]:
1522 self.uparrowshiftevent()
1522 self.uparrowshiftevent()
1523 elif keypressed in ["j", "KEY_DOWN"]:
1523 elif keypressed in ["j", "KEY_DOWN"]:
1524 self.downarrowevent()
1524 self.downarrowevent()
1525 elif keypressed in ["j", "KEY_NPAGE"]:
1525 elif keypressed in ["j", "KEY_NPAGE"]:
1526 self.downarrowshiftevent()
1526 self.downarrowshiftevent()
1527 elif keypressed in ["l", "KEY_RIGHT"]:
1527 elif keypressed in ["l", "KEY_RIGHT"]:
1528 self.rightarrowevent()
1528 self.rightarrowevent()
1529 elif keypressed in ["h", "KEY_LEFT"]:
1529 elif keypressed in ["h", "KEY_LEFT"]:
1530 self.leftarrowevent()
1530 self.leftarrowevent()
1531 elif keypressed in ["h", "KEY_SLEFT"]:
1531 elif keypressed in ["h", "KEY_SLEFT"]:
1532 self.leftarrowshiftevent()
1532 self.leftarrowshiftevent()
1533 elif keypressed in ["q"]:
1533 elif keypressed in ["q"]:
1534 raise util.Abort(_('user quit'))
1534 raise util.Abort(_('user quit'))
1535 elif keypressed in ["c"]:
1535 elif keypressed in ["c"]:
1536 if self.confirmcommit():
1536 if self.confirmcommit():
1537 return True
1537 return True
1538 elif keypressed in ["r"]:
1538 elif keypressed in ["r"]:
1539 if self.confirmcommit(review=True):
1539 if self.confirmcommit(review=True):
1540 return True
1540 return True
1541 elif test and keypressed in ['X']:
1541 elif test and keypressed in ['X']:
1542 return True
1542 return True
1543 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1543 elif keypressed in [' '] or (test and keypressed in ["TOGGLE"]):
1544 self.toggleapply()
1544 self.toggleapply()
1545 elif keypressed in ['A']:
1545 elif keypressed in ['A']:
1546 self.toggleall()
1546 self.toggleall()
1547 elif keypressed in ['e']:
1547 elif keypressed in ['e']:
1548 self.toggleedit(test=test)
1548 self.toggleedit(test=test)
1549 elif keypressed in ["f"]:
1549 elif keypressed in ["f"]:
1550 self.togglefolded()
1550 self.togglefolded()
1551 elif keypressed in ["f"]:
1551 elif keypressed in ["f"]:
1552 self.togglefolded(foldparent=True)
1552 self.togglefolded(foldparent=True)
1553 elif keypressed in ["?"]:
1553 elif keypressed in ["?"]:
1554 self.helpwindow()
1554 self.helpwindow()
1555
1555
1556 def main(self, stdscr):
1556 def main(self, stdscr):
1557 """
1557 """
1558 method to be wrapped by curses.wrapper() for selecting chunks.
1558 method to be wrapped by curses.wrapper() for selecting chunks.
1559
1559
1560 """
1560 """
1561 signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1561 signal.signal(signal.SIGWINCH, self.sigwinchhandler)
1562 self.stdscr = stdscr
1562 self.stdscr = stdscr
1563 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1563 self.yscreensize, self.xscreensize = self.stdscr.getmaxyx()
1564
1564
1565 curses.start_color()
1565 curses.start_color()
1566 curses.use_default_colors()
1566 curses.use_default_colors()
1567
1567
1568 # available colors: black, blue, cyan, green, magenta, white, yellow
1568 # available colors: black, blue, cyan, green, magenta, white, yellow
1569 # init_pair(color_id, foreground_color, background_color)
1569 # init_pair(color_id, foreground_color, background_color)
1570 self.initcolorpair(None, None, name="normal")
1570 self.initcolorpair(None, None, name="normal")
1571 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
1571 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_MAGENTA,
1572 name="selected")
1572 name="selected")
1573 self.initcolorpair(curses.COLOR_RED, None, name="deletion")
1573 self.initcolorpair(curses.COLOR_RED, None, name="deletion")
1574 self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
1574 self.initcolorpair(curses.COLOR_GREEN, None, name="addition")
1575 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
1575 self.initcolorpair(curses.COLOR_WHITE, curses.COLOR_BLUE, name="legend")
1576 # newwin([height, width,] begin_y, begin_x)
1576 # newwin([height, width,] begin_y, begin_x)
1577 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1577 self.statuswin = curses.newwin(self.numstatuslines, 0, 0, 0)
1578 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1578 self.statuswin.keypad(1) # interpret arrow-key, etc. esc sequences
1579
1579
1580 # figure out how much space to allocate for the chunk-pad which is
1580 # figure out how much space to allocate for the chunk-pad which is
1581 # used for displaying the patch
1581 # used for displaying the patch
1582
1582
1583 # stupid hack to prevent getnumlinesdisplayed from failing
1583 # stupid hack to prevent getnumlinesdisplayed from failing
1584 self.chunkpad = curses.newpad(1, self.xscreensize)
1584 self.chunkpad = curses.newpad(1, self.xscreensize)
1585
1585
1586 # add 1 so to account for last line text reaching end of line
1586 # add 1 so to account for last line text reaching end of line
1587 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1587 self.numpadlines = self.getnumlinesdisplayed(ignorefolding=True) + 1
1588 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1588 self.chunkpad = curses.newpad(self.numpadlines, self.xscreensize)
1589
1589
1590 # initialize selecteitemendline (initial start-line is 0)
1590 # initialize selecteitemendline (initial start-line is 0)
1591 self.selecteditemendline = self.getnumlinesdisplayed(
1591 self.selecteditemendline = self.getnumlinesdisplayed(
1592 self.currentselecteditem, recursechildren=False)
1592 self.currentselecteditem, recursechildren=False)
1593
1593
1594 while True:
1594 while True:
1595 self.updatescreen()
1595 self.updatescreen()
1596 try:
1596 try:
1597 keypressed = self.statuswin.getkey()
1597 keypressed = self.statuswin.getkey()
1598 except curses.error:
1598 except curses.error:
1599 keypressed = "foobar"
1599 keypressed = "foobar"
1600 if self.handlekeypressed(keypressed):
1600 if self.handlekeypressed(keypressed):
1601 break
1601 break
@@ -1,2478 +1,2478 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import collections
9 import collections
10 import cStringIO, email, os, errno, re, posixpath, copy
10 import cStringIO, email, os, errno, re, posixpath, copy
11 import tempfile, zlib, shutil
11 import tempfile, zlib, shutil
12 # On python2.4 you have to import these by name or they fail to
12 # On python2.4 you have to import these by name or they fail to
13 # load. This was not a problem on Python 2.7.
13 # load. This was not a problem on Python 2.7.
14 import email.Generator
14 import email.Generator
15 import email.Parser
15 import email.Parser
16
16
17 from i18n import _
17 from i18n import _
18 from node import hex, short
18 from node import hex, short
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
20 import pathutil
20 import pathutil
21
21
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
22 gitre = re.compile('diff --git a/(.*) b/(.*)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
23 tabsplitter = re.compile(r'(\t+|[^\t]+)')
24
24
25 class PatchError(Exception):
25 class PatchError(Exception):
26 pass
26 pass
27
27
28
28
29 # public functions
29 # public functions
30
30
31 def split(stream):
31 def split(stream):
32 '''return an iterator of individual patches from a stream'''
32 '''return an iterator of individual patches from a stream'''
33 def isheader(line, inheader):
33 def isheader(line, inheader):
34 if inheader and line[0] in (' ', '\t'):
34 if inheader and line[0] in (' ', '\t'):
35 # continuation
35 # continuation
36 return True
36 return True
37 if line[0] in (' ', '-', '+'):
37 if line[0] in (' ', '-', '+'):
38 # diff line - don't check for header pattern in there
38 # diff line - don't check for header pattern in there
39 return False
39 return False
40 l = line.split(': ', 1)
40 l = line.split(': ', 1)
41 return len(l) == 2 and ' ' not in l[0]
41 return len(l) == 2 and ' ' not in l[0]
42
42
43 def chunk(lines):
43 def chunk(lines):
44 return cStringIO.StringIO(''.join(lines))
44 return cStringIO.StringIO(''.join(lines))
45
45
46 def hgsplit(stream, cur):
46 def hgsplit(stream, cur):
47 inheader = True
47 inheader = True
48
48
49 for line in stream:
49 for line in stream:
50 if not line.strip():
50 if not line.strip():
51 inheader = False
51 inheader = False
52 if not inheader and line.startswith('# HG changeset patch'):
52 if not inheader and line.startswith('# HG changeset patch'):
53 yield chunk(cur)
53 yield chunk(cur)
54 cur = []
54 cur = []
55 inheader = True
55 inheader = True
56
56
57 cur.append(line)
57 cur.append(line)
58
58
59 if cur:
59 if cur:
60 yield chunk(cur)
60 yield chunk(cur)
61
61
62 def mboxsplit(stream, cur):
62 def mboxsplit(stream, cur):
63 for line in stream:
63 for line in stream:
64 if line.startswith('From '):
64 if line.startswith('From '):
65 for c in split(chunk(cur[1:])):
65 for c in split(chunk(cur[1:])):
66 yield c
66 yield c
67 cur = []
67 cur = []
68
68
69 cur.append(line)
69 cur.append(line)
70
70
71 if cur:
71 if cur:
72 for c in split(chunk(cur[1:])):
72 for c in split(chunk(cur[1:])):
73 yield c
73 yield c
74
74
75 def mimesplit(stream, cur):
75 def mimesplit(stream, cur):
76 def msgfp(m):
76 def msgfp(m):
77 fp = cStringIO.StringIO()
77 fp = cStringIO.StringIO()
78 g = email.Generator.Generator(fp, mangle_from_=False)
78 g = email.Generator.Generator(fp, mangle_from_=False)
79 g.flatten(m)
79 g.flatten(m)
80 fp.seek(0)
80 fp.seek(0)
81 return fp
81 return fp
82
82
83 for line in stream:
83 for line in stream:
84 cur.append(line)
84 cur.append(line)
85 c = chunk(cur)
85 c = chunk(cur)
86
86
87 m = email.Parser.Parser().parse(c)
87 m = email.Parser.Parser().parse(c)
88 if not m.is_multipart():
88 if not m.is_multipart():
89 yield msgfp(m)
89 yield msgfp(m)
90 else:
90 else:
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
91 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
92 for part in m.walk():
92 for part in m.walk():
93 ct = part.get_content_type()
93 ct = part.get_content_type()
94 if ct not in ok_types:
94 if ct not in ok_types:
95 continue
95 continue
96 yield msgfp(part)
96 yield msgfp(part)
97
97
98 def headersplit(stream, cur):
98 def headersplit(stream, cur):
99 inheader = False
99 inheader = False
100
100
101 for line in stream:
101 for line in stream:
102 if not inheader and isheader(line, inheader):
102 if not inheader and isheader(line, inheader):
103 yield chunk(cur)
103 yield chunk(cur)
104 cur = []
104 cur = []
105 inheader = True
105 inheader = True
106 if inheader and not isheader(line, inheader):
106 if inheader and not isheader(line, inheader):
107 inheader = False
107 inheader = False
108
108
109 cur.append(line)
109 cur.append(line)
110
110
111 if cur:
111 if cur:
112 yield chunk(cur)
112 yield chunk(cur)
113
113
114 def remainder(cur):
114 def remainder(cur):
115 yield chunk(cur)
115 yield chunk(cur)
116
116
117 class fiter(object):
117 class fiter(object):
118 def __init__(self, fp):
118 def __init__(self, fp):
119 self.fp = fp
119 self.fp = fp
120
120
121 def __iter__(self):
121 def __iter__(self):
122 return self
122 return self
123
123
124 def next(self):
124 def next(self):
125 l = self.fp.readline()
125 l = self.fp.readline()
126 if not l:
126 if not l:
127 raise StopIteration
127 raise StopIteration
128 return l
128 return l
129
129
130 inheader = False
130 inheader = False
131 cur = []
131 cur = []
132
132
133 mimeheaders = ['content-type']
133 mimeheaders = ['content-type']
134
134
135 if not util.safehasattr(stream, 'next'):
135 if not util.safehasattr(stream, 'next'):
136 # http responses, for example, have readline but not next
136 # http responses, for example, have readline but not next
137 stream = fiter(stream)
137 stream = fiter(stream)
138
138
139 for line in stream:
139 for line in stream:
140 cur.append(line)
140 cur.append(line)
141 if line.startswith('# HG changeset patch'):
141 if line.startswith('# HG changeset patch'):
142 return hgsplit(stream, cur)
142 return hgsplit(stream, cur)
143 elif line.startswith('From '):
143 elif line.startswith('From '):
144 return mboxsplit(stream, cur)
144 return mboxsplit(stream, cur)
145 elif isheader(line, inheader):
145 elif isheader(line, inheader):
146 inheader = True
146 inheader = True
147 if line.split(':', 1)[0].lower() in mimeheaders:
147 if line.split(':', 1)[0].lower() in mimeheaders:
148 # let email parser handle this
148 # let email parser handle this
149 return mimesplit(stream, cur)
149 return mimesplit(stream, cur)
150 elif line.startswith('--- ') and inheader:
150 elif line.startswith('--- ') and inheader:
151 # No evil headers seen by diff start, split by hand
151 # No evil headers seen by diff start, split by hand
152 return headersplit(stream, cur)
152 return headersplit(stream, cur)
153 # Not enough info, keep reading
153 # Not enough info, keep reading
154
154
155 # if we are here, we have a very plain patch
155 # if we are here, we have a very plain patch
156 return remainder(cur)
156 return remainder(cur)
157
157
158 def extract(ui, fileobj):
158 def extract(ui, fileobj):
159 '''extract patch from data read from fileobj.
159 '''extract patch from data read from fileobj.
160
160
161 patch can be a normal patch or contained in an email message.
161 patch can be a normal patch or contained in an email message.
162
162
163 return tuple (filename, message, user, date, branch, node, p1, p2).
163 return tuple (filename, message, user, date, branch, node, p1, p2).
164 Any item in the returned tuple can be None. If filename is None,
164 Any item in the returned tuple can be None. If filename is None,
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
165 fileobj did not contain a patch. Caller must unlink filename when done.'''
166
166
167 # attempt to detect the start of a patch
167 # attempt to detect the start of a patch
168 # (this heuristic is borrowed from quilt)
168 # (this heuristic is borrowed from quilt)
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
169 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
170 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
171 r'---[ \t].*?^\+\+\+[ \t]|'
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
172 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
173
173
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
174 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
175 tmpfp = os.fdopen(fd, 'w')
175 tmpfp = os.fdopen(fd, 'w')
176 try:
176 try:
177 msg = email.Parser.Parser().parse(fileobj)
177 msg = email.Parser.Parser().parse(fileobj)
178
178
179 subject = msg['Subject']
179 subject = msg['Subject']
180 user = msg['From']
180 user = msg['From']
181 if not subject and not user:
181 if not subject and not user:
182 # Not an email, restore parsed headers if any
182 # Not an email, restore parsed headers if any
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
183 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
184
184
185 # should try to parse msg['Date']
185 # should try to parse msg['Date']
186 date = None
186 date = None
187 nodeid = None
187 nodeid = None
188 branch = None
188 branch = None
189 parents = []
189 parents = []
190
190
191 if subject:
191 if subject:
192 if subject.startswith('[PATCH'):
192 if subject.startswith('[PATCH'):
193 pend = subject.find(']')
193 pend = subject.find(']')
194 if pend >= 0:
194 if pend >= 0:
195 subject = subject[pend + 1:].lstrip()
195 subject = subject[pend + 1:].lstrip()
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
196 subject = re.sub(r'\n[ \t]+', ' ', subject)
197 ui.debug('Subject: %s\n' % subject)
197 ui.debug('Subject: %s\n' % subject)
198 if user:
198 if user:
199 ui.debug('From: %s\n' % user)
199 ui.debug('From: %s\n' % user)
200 diffs_seen = 0
200 diffs_seen = 0
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
201 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
202 message = ''
202 message = ''
203 for part in msg.walk():
203 for part in msg.walk():
204 content_type = part.get_content_type()
204 content_type = part.get_content_type()
205 ui.debug('Content-Type: %s\n' % content_type)
205 ui.debug('Content-Type: %s\n' % content_type)
206 if content_type not in ok_types:
206 if content_type not in ok_types:
207 continue
207 continue
208 payload = part.get_payload(decode=True)
208 payload = part.get_payload(decode=True)
209 m = diffre.search(payload)
209 m = diffre.search(payload)
210 if m:
210 if m:
211 hgpatch = False
211 hgpatch = False
212 hgpatchheader = False
212 hgpatchheader = False
213 ignoretext = False
213 ignoretext = False
214
214
215 ui.debug('found patch at byte %d\n' % m.start(0))
215 ui.debug('found patch at byte %d\n' % m.start(0))
216 diffs_seen += 1
216 diffs_seen += 1
217 cfp = cStringIO.StringIO()
217 cfp = cStringIO.StringIO()
218 for line in payload[:m.start(0)].splitlines():
218 for line in payload[:m.start(0)].splitlines():
219 if line.startswith('# HG changeset patch') and not hgpatch:
219 if line.startswith('# HG changeset patch') and not hgpatch:
220 ui.debug('patch generated by hg export\n')
220 ui.debug('patch generated by hg export\n')
221 hgpatch = True
221 hgpatch = True
222 hgpatchheader = True
222 hgpatchheader = True
223 # drop earlier commit message content
223 # drop earlier commit message content
224 cfp.seek(0)
224 cfp.seek(0)
225 cfp.truncate()
225 cfp.truncate()
226 subject = None
226 subject = None
227 elif hgpatchheader:
227 elif hgpatchheader:
228 if line.startswith('# User '):
228 if line.startswith('# User '):
229 user = line[7:]
229 user = line[7:]
230 ui.debug('From: %s\n' % user)
230 ui.debug('From: %s\n' % user)
231 elif line.startswith("# Date "):
231 elif line.startswith("# Date "):
232 date = line[7:]
232 date = line[7:]
233 elif line.startswith("# Branch "):
233 elif line.startswith("# Branch "):
234 branch = line[9:]
234 branch = line[9:]
235 elif line.startswith("# Node ID "):
235 elif line.startswith("# Node ID "):
236 nodeid = line[10:]
236 nodeid = line[10:]
237 elif line.startswith("# Parent "):
237 elif line.startswith("# Parent "):
238 parents.append(line[9:].lstrip())
238 parents.append(line[9:].lstrip())
239 elif not line.startswith("# "):
239 elif not line.startswith("# "):
240 hgpatchheader = False
240 hgpatchheader = False
241 elif line == '---':
241 elif line == '---':
242 ignoretext = True
242 ignoretext = True
243 if not hgpatchheader and not ignoretext:
243 if not hgpatchheader and not ignoretext:
244 cfp.write(line)
244 cfp.write(line)
245 cfp.write('\n')
245 cfp.write('\n')
246 message = cfp.getvalue()
246 message = cfp.getvalue()
247 if tmpfp:
247 if tmpfp:
248 tmpfp.write(payload)
248 tmpfp.write(payload)
249 if not payload.endswith('\n'):
249 if not payload.endswith('\n'):
250 tmpfp.write('\n')
250 tmpfp.write('\n')
251 elif not diffs_seen and message and content_type == 'text/plain':
251 elif not diffs_seen and message and content_type == 'text/plain':
252 message += '\n' + payload
252 message += '\n' + payload
253 except: # re-raises
253 except: # re-raises
254 tmpfp.close()
254 tmpfp.close()
255 os.unlink(tmpname)
255 os.unlink(tmpname)
256 raise
256 raise
257
257
258 if subject and not message.startswith(subject):
258 if subject and not message.startswith(subject):
259 message = '%s\n%s' % (subject, message)
259 message = '%s\n%s' % (subject, message)
260 tmpfp.close()
260 tmpfp.close()
261 if not diffs_seen:
261 if not diffs_seen:
262 os.unlink(tmpname)
262 os.unlink(tmpname)
263 return None, message, user, date, branch, None, None, None
263 return None, message, user, date, branch, None, None, None
264
264
265 if parents:
265 if parents:
266 p1 = parents.pop(0)
266 p1 = parents.pop(0)
267 else:
267 else:
268 p1 = None
268 p1 = None
269
269
270 if parents:
270 if parents:
271 p2 = parents.pop(0)
271 p2 = parents.pop(0)
272 else:
272 else:
273 p2 = None
273 p2 = None
274
274
275 return tmpname, message, user, date, branch, nodeid, p1, p2
275 return tmpname, message, user, date, branch, nodeid, p1, p2
276
276
277 class patchmeta(object):
277 class patchmeta(object):
278 """Patched file metadata
278 """Patched file metadata
279
279
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
280 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
281 or COPY. 'path' is patched file path. 'oldpath' is set to the
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
282 origin file when 'op' is either COPY or RENAME, None otherwise. If
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
283 file mode is changed, 'mode' is a tuple (islink, isexec) where
284 'islink' is True if the file is a symlink and 'isexec' is True if
284 'islink' is True if the file is a symlink and 'isexec' is True if
285 the file is executable. Otherwise, 'mode' is None.
285 the file is executable. Otherwise, 'mode' is None.
286 """
286 """
287 def __init__(self, path):
287 def __init__(self, path):
288 self.path = path
288 self.path = path
289 self.oldpath = None
289 self.oldpath = None
290 self.mode = None
290 self.mode = None
291 self.op = 'MODIFY'
291 self.op = 'MODIFY'
292 self.binary = False
292 self.binary = False
293
293
294 def setmode(self, mode):
294 def setmode(self, mode):
295 islink = mode & 020000
295 islink = mode & 020000
296 isexec = mode & 0100
296 isexec = mode & 0100
297 self.mode = (islink, isexec)
297 self.mode = (islink, isexec)
298
298
299 def copy(self):
299 def copy(self):
300 other = patchmeta(self.path)
300 other = patchmeta(self.path)
301 other.oldpath = self.oldpath
301 other.oldpath = self.oldpath
302 other.mode = self.mode
302 other.mode = self.mode
303 other.op = self.op
303 other.op = self.op
304 other.binary = self.binary
304 other.binary = self.binary
305 return other
305 return other
306
306
307 def _ispatchinga(self, afile):
307 def _ispatchinga(self, afile):
308 if afile == '/dev/null':
308 if afile == '/dev/null':
309 return self.op == 'ADD'
309 return self.op == 'ADD'
310 return afile == 'a/' + (self.oldpath or self.path)
310 return afile == 'a/' + (self.oldpath or self.path)
311
311
312 def _ispatchingb(self, bfile):
312 def _ispatchingb(self, bfile):
313 if bfile == '/dev/null':
313 if bfile == '/dev/null':
314 return self.op == 'DELETE'
314 return self.op == 'DELETE'
315 return bfile == 'b/' + self.path
315 return bfile == 'b/' + self.path
316
316
317 def ispatching(self, afile, bfile):
317 def ispatching(self, afile, bfile):
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
318 return self._ispatchinga(afile) and self._ispatchingb(bfile)
319
319
320 def __repr__(self):
320 def __repr__(self):
321 return "<patchmeta %s %r>" % (self.op, self.path)
321 return "<patchmeta %s %r>" % (self.op, self.path)
322
322
323 def readgitpatch(lr):
323 def readgitpatch(lr):
324 """extract git-style metadata about patches from <patchname>"""
324 """extract git-style metadata about patches from <patchname>"""
325
325
326 # Filter patch for git information
326 # Filter patch for git information
327 gp = None
327 gp = None
328 gitpatches = []
328 gitpatches = []
329 for line in lr:
329 for line in lr:
330 line = line.rstrip(' \r\n')
330 line = line.rstrip(' \r\n')
331 if line.startswith('diff --git a/'):
331 if line.startswith('diff --git a/'):
332 m = gitre.match(line)
332 m = gitre.match(line)
333 if m:
333 if m:
334 if gp:
334 if gp:
335 gitpatches.append(gp)
335 gitpatches.append(gp)
336 dst = m.group(2)
336 dst = m.group(2)
337 gp = patchmeta(dst)
337 gp = patchmeta(dst)
338 elif gp:
338 elif gp:
339 if line.startswith('--- '):
339 if line.startswith('--- '):
340 gitpatches.append(gp)
340 gitpatches.append(gp)
341 gp = None
341 gp = None
342 continue
342 continue
343 if line.startswith('rename from '):
343 if line.startswith('rename from '):
344 gp.op = 'RENAME'
344 gp.op = 'RENAME'
345 gp.oldpath = line[12:]
345 gp.oldpath = line[12:]
346 elif line.startswith('rename to '):
346 elif line.startswith('rename to '):
347 gp.path = line[10:]
347 gp.path = line[10:]
348 elif line.startswith('copy from '):
348 elif line.startswith('copy from '):
349 gp.op = 'COPY'
349 gp.op = 'COPY'
350 gp.oldpath = line[10:]
350 gp.oldpath = line[10:]
351 elif line.startswith('copy to '):
351 elif line.startswith('copy to '):
352 gp.path = line[8:]
352 gp.path = line[8:]
353 elif line.startswith('deleted file'):
353 elif line.startswith('deleted file'):
354 gp.op = 'DELETE'
354 gp.op = 'DELETE'
355 elif line.startswith('new file mode '):
355 elif line.startswith('new file mode '):
356 gp.op = 'ADD'
356 gp.op = 'ADD'
357 gp.setmode(int(line[-6:], 8))
357 gp.setmode(int(line[-6:], 8))
358 elif line.startswith('new mode '):
358 elif line.startswith('new mode '):
359 gp.setmode(int(line[-6:], 8))
359 gp.setmode(int(line[-6:], 8))
360 elif line.startswith('GIT binary patch'):
360 elif line.startswith('GIT binary patch'):
361 gp.binary = True
361 gp.binary = True
362 if gp:
362 if gp:
363 gitpatches.append(gp)
363 gitpatches.append(gp)
364
364
365 return gitpatches
365 return gitpatches
366
366
367 class linereader(object):
367 class linereader(object):
368 # simple class to allow pushing lines back into the input stream
368 # simple class to allow pushing lines back into the input stream
369 def __init__(self, fp):
369 def __init__(self, fp):
370 self.fp = fp
370 self.fp = fp
371 self.buf = []
371 self.buf = []
372
372
373 def push(self, line):
373 def push(self, line):
374 if line is not None:
374 if line is not None:
375 self.buf.append(line)
375 self.buf.append(line)
376
376
377 def readline(self):
377 def readline(self):
378 if self.buf:
378 if self.buf:
379 l = self.buf[0]
379 l = self.buf[0]
380 del self.buf[0]
380 del self.buf[0]
381 return l
381 return l
382 return self.fp.readline()
382 return self.fp.readline()
383
383
384 def __iter__(self):
384 def __iter__(self):
385 while True:
385 while True:
386 l = self.readline()
386 l = self.readline()
387 if not l:
387 if not l:
388 break
388 break
389 yield l
389 yield l
390
390
391 class abstractbackend(object):
391 class abstractbackend(object):
392 def __init__(self, ui):
392 def __init__(self, ui):
393 self.ui = ui
393 self.ui = ui
394
394
395 def getfile(self, fname):
395 def getfile(self, fname):
396 """Return target file data and flags as a (data, (islink,
396 """Return target file data and flags as a (data, (islink,
397 isexec)) tuple. Data is None if file is missing/deleted.
397 isexec)) tuple. Data is None if file is missing/deleted.
398 """
398 """
399 raise NotImplementedError
399 raise NotImplementedError
400
400
401 def setfile(self, fname, data, mode, copysource):
401 def setfile(self, fname, data, mode, copysource):
402 """Write data to target file fname and set its mode. mode is a
402 """Write data to target file fname and set its mode. mode is a
403 (islink, isexec) tuple. If data is None, the file content should
403 (islink, isexec) tuple. If data is None, the file content should
404 be left unchanged. If the file is modified after being copied,
404 be left unchanged. If the file is modified after being copied,
405 copysource is set to the original file name.
405 copysource is set to the original file name.
406 """
406 """
407 raise NotImplementedError
407 raise NotImplementedError
408
408
409 def unlink(self, fname):
409 def unlink(self, fname):
410 """Unlink target file."""
410 """Unlink target file."""
411 raise NotImplementedError
411 raise NotImplementedError
412
412
413 def writerej(self, fname, failed, total, lines):
413 def writerej(self, fname, failed, total, lines):
414 """Write rejected lines for fname. total is the number of hunks
414 """Write rejected lines for fname. total is the number of hunks
415 which failed to apply and total the total number of hunks for this
415 which failed to apply and total the total number of hunks for this
416 files.
416 files.
417 """
417 """
418 pass
418 pass
419
419
420 def exists(self, fname):
420 def exists(self, fname):
421 raise NotImplementedError
421 raise NotImplementedError
422
422
423 class fsbackend(abstractbackend):
423 class fsbackend(abstractbackend):
424 def __init__(self, ui, basedir):
424 def __init__(self, ui, basedir):
425 super(fsbackend, self).__init__(ui)
425 super(fsbackend, self).__init__(ui)
426 self.opener = scmutil.opener(basedir)
426 self.opener = scmutil.opener(basedir)
427
427
428 def _join(self, f):
428 def _join(self, f):
429 return os.path.join(self.opener.base, f)
429 return os.path.join(self.opener.base, f)
430
430
431 def getfile(self, fname):
431 def getfile(self, fname):
432 if self.opener.islink(fname):
432 if self.opener.islink(fname):
433 return (self.opener.readlink(fname), (True, False))
433 return (self.opener.readlink(fname), (True, False))
434
434
435 isexec = False
435 isexec = False
436 try:
436 try:
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
437 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
438 except OSError, e:
438 except OSError, e:
439 if e.errno != errno.ENOENT:
439 if e.errno != errno.ENOENT:
440 raise
440 raise
441 try:
441 try:
442 return (self.opener.read(fname), (False, isexec))
442 return (self.opener.read(fname), (False, isexec))
443 except IOError, e:
443 except IOError, e:
444 if e.errno != errno.ENOENT:
444 if e.errno != errno.ENOENT:
445 raise
445 raise
446 return None, None
446 return None, None
447
447
448 def setfile(self, fname, data, mode, copysource):
448 def setfile(self, fname, data, mode, copysource):
449 islink, isexec = mode
449 islink, isexec = mode
450 if data is None:
450 if data is None:
451 self.opener.setflags(fname, islink, isexec)
451 self.opener.setflags(fname, islink, isexec)
452 return
452 return
453 if islink:
453 if islink:
454 self.opener.symlink(data, fname)
454 self.opener.symlink(data, fname)
455 else:
455 else:
456 self.opener.write(fname, data)
456 self.opener.write(fname, data)
457 if isexec:
457 if isexec:
458 self.opener.setflags(fname, False, True)
458 self.opener.setflags(fname, False, True)
459
459
460 def unlink(self, fname):
460 def unlink(self, fname):
461 self.opener.unlinkpath(fname, ignoremissing=True)
461 self.opener.unlinkpath(fname, ignoremissing=True)
462
462
463 def writerej(self, fname, failed, total, lines):
463 def writerej(self, fname, failed, total, lines):
464 fname = fname + ".rej"
464 fname = fname + ".rej"
465 self.ui.warn(
465 self.ui.warn(
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
466 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
467 (failed, total, fname))
467 (failed, total, fname))
468 fp = self.opener(fname, 'w')
468 fp = self.opener(fname, 'w')
469 fp.writelines(lines)
469 fp.writelines(lines)
470 fp.close()
470 fp.close()
471
471
472 def exists(self, fname):
472 def exists(self, fname):
473 return self.opener.lexists(fname)
473 return self.opener.lexists(fname)
474
474
475 class workingbackend(fsbackend):
475 class workingbackend(fsbackend):
476 def __init__(self, ui, repo, similarity):
476 def __init__(self, ui, repo, similarity):
477 super(workingbackend, self).__init__(ui, repo.root)
477 super(workingbackend, self).__init__(ui, repo.root)
478 self.repo = repo
478 self.repo = repo
479 self.similarity = similarity
479 self.similarity = similarity
480 self.removed = set()
480 self.removed = set()
481 self.changed = set()
481 self.changed = set()
482 self.copied = []
482 self.copied = []
483
483
484 def _checkknown(self, fname):
484 def _checkknown(self, fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
485 if self.repo.dirstate[fname] == '?' and self.exists(fname):
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
486 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
487
487
488 def setfile(self, fname, data, mode, copysource):
488 def setfile(self, fname, data, mode, copysource):
489 self._checkknown(fname)
489 self._checkknown(fname)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
490 super(workingbackend, self).setfile(fname, data, mode, copysource)
491 if copysource is not None:
491 if copysource is not None:
492 self.copied.append((copysource, fname))
492 self.copied.append((copysource, fname))
493 self.changed.add(fname)
493 self.changed.add(fname)
494
494
495 def unlink(self, fname):
495 def unlink(self, fname):
496 self._checkknown(fname)
496 self._checkknown(fname)
497 super(workingbackend, self).unlink(fname)
497 super(workingbackend, self).unlink(fname)
498 self.removed.add(fname)
498 self.removed.add(fname)
499 self.changed.add(fname)
499 self.changed.add(fname)
500
500
501 def close(self):
501 def close(self):
502 wctx = self.repo[None]
502 wctx = self.repo[None]
503 changed = set(self.changed)
503 changed = set(self.changed)
504 for src, dst in self.copied:
504 for src, dst in self.copied:
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
505 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
506 if self.removed:
506 if self.removed:
507 wctx.forget(sorted(self.removed))
507 wctx.forget(sorted(self.removed))
508 for f in self.removed:
508 for f in self.removed:
509 if f not in self.repo.dirstate:
509 if f not in self.repo.dirstate:
510 # File was deleted and no longer belongs to the
510 # File was deleted and no longer belongs to the
511 # dirstate, it was probably marked added then
511 # dirstate, it was probably marked added then
512 # deleted, and should not be considered by
512 # deleted, and should not be considered by
513 # marktouched().
513 # marktouched().
514 changed.discard(f)
514 changed.discard(f)
515 if changed:
515 if changed:
516 scmutil.marktouched(self.repo, changed, self.similarity)
516 scmutil.marktouched(self.repo, changed, self.similarity)
517 return sorted(self.changed)
517 return sorted(self.changed)
518
518
519 class filestore(object):
519 class filestore(object):
520 def __init__(self, maxsize=None):
520 def __init__(self, maxsize=None):
521 self.opener = None
521 self.opener = None
522 self.files = {}
522 self.files = {}
523 self.created = 0
523 self.created = 0
524 self.maxsize = maxsize
524 self.maxsize = maxsize
525 if self.maxsize is None:
525 if self.maxsize is None:
526 self.maxsize = 4*(2**20)
526 self.maxsize = 4*(2**20)
527 self.size = 0
527 self.size = 0
528 self.data = {}
528 self.data = {}
529
529
530 def setfile(self, fname, data, mode, copied=None):
530 def setfile(self, fname, data, mode, copied=None):
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
531 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
532 self.data[fname] = (data, mode, copied)
532 self.data[fname] = (data, mode, copied)
533 self.size += len(data)
533 self.size += len(data)
534 else:
534 else:
535 if self.opener is None:
535 if self.opener is None:
536 root = tempfile.mkdtemp(prefix='hg-patch-')
536 root = tempfile.mkdtemp(prefix='hg-patch-')
537 self.opener = scmutil.opener(root)
537 self.opener = scmutil.opener(root)
538 # Avoid filename issues with these simple names
538 # Avoid filename issues with these simple names
539 fn = str(self.created)
539 fn = str(self.created)
540 self.opener.write(fn, data)
540 self.opener.write(fn, data)
541 self.created += 1
541 self.created += 1
542 self.files[fname] = (fn, mode, copied)
542 self.files[fname] = (fn, mode, copied)
543
543
544 def getfile(self, fname):
544 def getfile(self, fname):
545 if fname in self.data:
545 if fname in self.data:
546 return self.data[fname]
546 return self.data[fname]
547 if not self.opener or fname not in self.files:
547 if not self.opener or fname not in self.files:
548 return None, None, None
548 return None, None, None
549 fn, mode, copied = self.files[fname]
549 fn, mode, copied = self.files[fname]
550 return self.opener.read(fn), mode, copied
550 return self.opener.read(fn), mode, copied
551
551
552 def close(self):
552 def close(self):
553 if self.opener:
553 if self.opener:
554 shutil.rmtree(self.opener.base)
554 shutil.rmtree(self.opener.base)
555
555
556 class repobackend(abstractbackend):
556 class repobackend(abstractbackend):
557 def __init__(self, ui, repo, ctx, store):
557 def __init__(self, ui, repo, ctx, store):
558 super(repobackend, self).__init__(ui)
558 super(repobackend, self).__init__(ui)
559 self.repo = repo
559 self.repo = repo
560 self.ctx = ctx
560 self.ctx = ctx
561 self.store = store
561 self.store = store
562 self.changed = set()
562 self.changed = set()
563 self.removed = set()
563 self.removed = set()
564 self.copied = {}
564 self.copied = {}
565
565
566 def _checkknown(self, fname):
566 def _checkknown(self, fname):
567 if fname not in self.ctx:
567 if fname not in self.ctx:
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
568 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 try:
571 try:
572 fctx = self.ctx[fname]
572 fctx = self.ctx[fname]
573 except error.LookupError:
573 except error.LookupError:
574 return None, None
574 return None, None
575 flags = fctx.flags()
575 flags = fctx.flags()
576 return fctx.data(), ('l' in flags, 'x' in flags)
576 return fctx.data(), ('l' in flags, 'x' in flags)
577
577
578 def setfile(self, fname, data, mode, copysource):
578 def setfile(self, fname, data, mode, copysource):
579 if copysource:
579 if copysource:
580 self._checkknown(copysource)
580 self._checkknown(copysource)
581 if data is None:
581 if data is None:
582 data = self.ctx[fname].data()
582 data = self.ctx[fname].data()
583 self.store.setfile(fname, data, mode, copysource)
583 self.store.setfile(fname, data, mode, copysource)
584 self.changed.add(fname)
584 self.changed.add(fname)
585 if copysource:
585 if copysource:
586 self.copied[fname] = copysource
586 self.copied[fname] = copysource
587
587
588 def unlink(self, fname):
588 def unlink(self, fname):
589 self._checkknown(fname)
589 self._checkknown(fname)
590 self.removed.add(fname)
590 self.removed.add(fname)
591
591
592 def exists(self, fname):
592 def exists(self, fname):
593 return fname in self.ctx
593 return fname in self.ctx
594
594
595 def close(self):
595 def close(self):
596 return self.changed | self.removed
596 return self.changed | self.removed
597
597
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
598 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
599 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
600 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
601 eolmodes = ['strict', 'crlf', 'lf', 'auto']
602
602
603 class patchfile(object):
603 class patchfile(object):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
604 def __init__(self, ui, gp, backend, store, eolmode='strict'):
605 self.fname = gp.path
605 self.fname = gp.path
606 self.eolmode = eolmode
606 self.eolmode = eolmode
607 self.eol = None
607 self.eol = None
608 self.backend = backend
608 self.backend = backend
609 self.ui = ui
609 self.ui = ui
610 self.lines = []
610 self.lines = []
611 self.exists = False
611 self.exists = False
612 self.missing = True
612 self.missing = True
613 self.mode = gp.mode
613 self.mode = gp.mode
614 self.copysource = gp.oldpath
614 self.copysource = gp.oldpath
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
615 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
616 self.remove = gp.op == 'DELETE'
616 self.remove = gp.op == 'DELETE'
617 if self.copysource is None:
617 if self.copysource is None:
618 data, mode = backend.getfile(self.fname)
618 data, mode = backend.getfile(self.fname)
619 else:
619 else:
620 data, mode = store.getfile(self.copysource)[:2]
620 data, mode = store.getfile(self.copysource)[:2]
621 if data is not None:
621 if data is not None:
622 self.exists = self.copysource is None or backend.exists(self.fname)
622 self.exists = self.copysource is None or backend.exists(self.fname)
623 self.missing = False
623 self.missing = False
624 if data:
624 if data:
625 self.lines = mdiff.splitnewlines(data)
625 self.lines = mdiff.splitnewlines(data)
626 if self.mode is None:
626 if self.mode is None:
627 self.mode = mode
627 self.mode = mode
628 if self.lines:
628 if self.lines:
629 # Normalize line endings
629 # Normalize line endings
630 if self.lines[0].endswith('\r\n'):
630 if self.lines[0].endswith('\r\n'):
631 self.eol = '\r\n'
631 self.eol = '\r\n'
632 elif self.lines[0].endswith('\n'):
632 elif self.lines[0].endswith('\n'):
633 self.eol = '\n'
633 self.eol = '\n'
634 if eolmode != 'strict':
634 if eolmode != 'strict':
635 nlines = []
635 nlines = []
636 for l in self.lines:
636 for l in self.lines:
637 if l.endswith('\r\n'):
637 if l.endswith('\r\n'):
638 l = l[:-2] + '\n'
638 l = l[:-2] + '\n'
639 nlines.append(l)
639 nlines.append(l)
640 self.lines = nlines
640 self.lines = nlines
641 else:
641 else:
642 if self.create:
642 if self.create:
643 self.missing = False
643 self.missing = False
644 if self.mode is None:
644 if self.mode is None:
645 self.mode = (False, False)
645 self.mode = (False, False)
646 if self.missing:
646 if self.missing:
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
647 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
648
648
649 self.hash = {}
649 self.hash = {}
650 self.dirty = 0
650 self.dirty = 0
651 self.offset = 0
651 self.offset = 0
652 self.skew = 0
652 self.skew = 0
653 self.rej = []
653 self.rej = []
654 self.fileprinted = False
654 self.fileprinted = False
655 self.printfile(False)
655 self.printfile(False)
656 self.hunks = 0
656 self.hunks = 0
657
657
658 def writelines(self, fname, lines, mode):
658 def writelines(self, fname, lines, mode):
659 if self.eolmode == 'auto':
659 if self.eolmode == 'auto':
660 eol = self.eol
660 eol = self.eol
661 elif self.eolmode == 'crlf':
661 elif self.eolmode == 'crlf':
662 eol = '\r\n'
662 eol = '\r\n'
663 else:
663 else:
664 eol = '\n'
664 eol = '\n'
665
665
666 if self.eolmode != 'strict' and eol and eol != '\n':
666 if self.eolmode != 'strict' and eol and eol != '\n':
667 rawlines = []
667 rawlines = []
668 for l in lines:
668 for l in lines:
669 if l and l[-1] == '\n':
669 if l and l[-1] == '\n':
670 l = l[:-1] + eol
670 l = l[:-1] + eol
671 rawlines.append(l)
671 rawlines.append(l)
672 lines = rawlines
672 lines = rawlines
673
673
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
674 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
675
675
676 def printfile(self, warn):
676 def printfile(self, warn):
677 if self.fileprinted:
677 if self.fileprinted:
678 return
678 return
679 if warn or self.ui.verbose:
679 if warn or self.ui.verbose:
680 self.fileprinted = True
680 self.fileprinted = True
681 s = _("patching file %s\n") % self.fname
681 s = _("patching file %s\n") % self.fname
682 if warn:
682 if warn:
683 self.ui.warn(s)
683 self.ui.warn(s)
684 else:
684 else:
685 self.ui.note(s)
685 self.ui.note(s)
686
686
687
687
688 def findlines(self, l, linenum):
688 def findlines(self, l, linenum):
689 # looks through the hash and finds candidate lines. The
689 # looks through the hash and finds candidate lines. The
690 # result is a list of line numbers sorted based on distance
690 # result is a list of line numbers sorted based on distance
691 # from linenum
691 # from linenum
692
692
693 cand = self.hash.get(l, [])
693 cand = self.hash.get(l, [])
694 if len(cand) > 1:
694 if len(cand) > 1:
695 # resort our list of potentials forward then back.
695 # resort our list of potentials forward then back.
696 cand.sort(key=lambda x: abs(x - linenum))
696 cand.sort(key=lambda x: abs(x - linenum))
697 return cand
697 return cand
698
698
699 def write_rej(self):
699 def write_rej(self):
700 # our rejects are a little different from patch(1). This always
700 # our rejects are a little different from patch(1). This always
701 # creates rejects in the same form as the original patch. A file
701 # creates rejects in the same form as the original patch. A file
702 # header is inserted so that you can run the reject through patch again
702 # header is inserted so that you can run the reject through patch again
703 # without having to type the filename.
703 # without having to type the filename.
704 if not self.rej:
704 if not self.rej:
705 return
705 return
706 base = os.path.basename(self.fname)
706 base = os.path.basename(self.fname)
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
707 lines = ["--- %s\n+++ %s\n" % (base, base)]
708 for x in self.rej:
708 for x in self.rej:
709 for l in x.hunk:
709 for l in x.hunk:
710 lines.append(l)
710 lines.append(l)
711 if l[-1] != '\n':
711 if l[-1] != '\n':
712 lines.append("\n\ No newline at end of file\n")
712 lines.append("\n\ No newline at end of file\n")
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
713 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
714
714
715 def apply(self, h):
715 def apply(self, h):
716 if not h.complete():
716 if not h.complete():
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
717 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
718 (h.number, h.desc, len(h.a), h.lena, len(h.b),
719 h.lenb))
719 h.lenb))
720
720
721 self.hunks += 1
721 self.hunks += 1
722
722
723 if self.missing:
723 if self.missing:
724 self.rej.append(h)
724 self.rej.append(h)
725 return -1
725 return -1
726
726
727 if self.exists and self.create:
727 if self.exists and self.create:
728 if self.copysource:
728 if self.copysource:
729 self.ui.warn(_("cannot create %s: destination already "
729 self.ui.warn(_("cannot create %s: destination already "
730 "exists\n") % self.fname)
730 "exists\n") % self.fname)
731 else:
731 else:
732 self.ui.warn(_("file %s already exists\n") % self.fname)
732 self.ui.warn(_("file %s already exists\n") % self.fname)
733 self.rej.append(h)
733 self.rej.append(h)
734 return -1
734 return -1
735
735
736 if isinstance(h, binhunk):
736 if isinstance(h, binhunk):
737 if self.remove:
737 if self.remove:
738 self.backend.unlink(self.fname)
738 self.backend.unlink(self.fname)
739 else:
739 else:
740 l = h.new(self.lines)
740 l = h.new(self.lines)
741 self.lines[:] = l
741 self.lines[:] = l
742 self.offset += len(l)
742 self.offset += len(l)
743 self.dirty = True
743 self.dirty = True
744 return 0
744 return 0
745
745
746 horig = h
746 horig = h
747 if (self.eolmode in ('crlf', 'lf')
747 if (self.eolmode in ('crlf', 'lf')
748 or self.eolmode == 'auto' and self.eol):
748 or self.eolmode == 'auto' and self.eol):
749 # If new eols are going to be normalized, then normalize
749 # If new eols are going to be normalized, then normalize
750 # hunk data before patching. Otherwise, preserve input
750 # hunk data before patching. Otherwise, preserve input
751 # line-endings.
751 # line-endings.
752 h = h.getnormalized()
752 h = h.getnormalized()
753
753
754 # fast case first, no offsets, no fuzz
754 # fast case first, no offsets, no fuzz
755 old, oldstart, new, newstart = h.fuzzit(0, False)
755 old, oldstart, new, newstart = h.fuzzit(0, False)
756 oldstart += self.offset
756 oldstart += self.offset
757 orig_start = oldstart
757 orig_start = oldstart
758 # if there's skew we want to emit the "(offset %d lines)" even
758 # if there's skew we want to emit the "(offset %d lines)" even
759 # when the hunk cleanly applies at start + skew, so skip the
759 # when the hunk cleanly applies at start + skew, so skip the
760 # fast case code
760 # fast case code
761 if (self.skew == 0 and
761 if (self.skew == 0 and
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
762 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
763 if self.remove:
763 if self.remove:
764 self.backend.unlink(self.fname)
764 self.backend.unlink(self.fname)
765 else:
765 else:
766 self.lines[oldstart:oldstart + len(old)] = new
766 self.lines[oldstart:oldstart + len(old)] = new
767 self.offset += len(new) - len(old)
767 self.offset += len(new) - len(old)
768 self.dirty = True
768 self.dirty = True
769 return 0
769 return 0
770
770
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
771 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
772 self.hash = {}
772 self.hash = {}
773 for x, s in enumerate(self.lines):
773 for x, s in enumerate(self.lines):
774 self.hash.setdefault(s, []).append(x)
774 self.hash.setdefault(s, []).append(x)
775
775
776 for fuzzlen in xrange(3):
776 for fuzzlen in xrange(3):
777 for toponly in [True, False]:
777 for toponly in [True, False]:
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
778 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
779 oldstart = oldstart + self.offset + self.skew
779 oldstart = oldstart + self.offset + self.skew
780 oldstart = min(oldstart, len(self.lines))
780 oldstart = min(oldstart, len(self.lines))
781 if old:
781 if old:
782 cand = self.findlines(old[0][1:], oldstart)
782 cand = self.findlines(old[0][1:], oldstart)
783 else:
783 else:
784 # Only adding lines with no or fuzzed context, just
784 # Only adding lines with no or fuzzed context, just
785 # take the skew in account
785 # take the skew in account
786 cand = [oldstart]
786 cand = [oldstart]
787
787
788 for l in cand:
788 for l in cand:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
789 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
790 self.lines[l : l + len(old)] = new
790 self.lines[l : l + len(old)] = new
791 self.offset += len(new) - len(old)
791 self.offset += len(new) - len(old)
792 self.skew = l - orig_start
792 self.skew = l - orig_start
793 self.dirty = True
793 self.dirty = True
794 offset = l - orig_start - fuzzlen
794 offset = l - orig_start - fuzzlen
795 if fuzzlen:
795 if fuzzlen:
796 msg = _("Hunk #%d succeeded at %d "
796 msg = _("Hunk #%d succeeded at %d "
797 "with fuzz %d "
797 "with fuzz %d "
798 "(offset %d lines).\n")
798 "(offset %d lines).\n")
799 self.printfile(True)
799 self.printfile(True)
800 self.ui.warn(msg %
800 self.ui.warn(msg %
801 (h.number, l + 1, fuzzlen, offset))
801 (h.number, l + 1, fuzzlen, offset))
802 else:
802 else:
803 msg = _("Hunk #%d succeeded at %d "
803 msg = _("Hunk #%d succeeded at %d "
804 "(offset %d lines).\n")
804 "(offset %d lines).\n")
805 self.ui.note(msg % (h.number, l + 1, offset))
805 self.ui.note(msg % (h.number, l + 1, offset))
806 return fuzzlen
806 return fuzzlen
807 self.printfile(True)
807 self.printfile(True)
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
808 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
809 self.rej.append(horig)
809 self.rej.append(horig)
810 return -1
810 return -1
811
811
812 def close(self):
812 def close(self):
813 if self.dirty:
813 if self.dirty:
814 self.writelines(self.fname, self.lines, self.mode)
814 self.writelines(self.fname, self.lines, self.mode)
815 self.write_rej()
815 self.write_rej()
816 return len(self.rej)
816 return len(self.rej)
817
817
818 class header(object):
818 class header(object):
819 """patch header
819 """patch header
820 """
820 """
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
821 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
822 diff_re = re.compile('diff -r .* (.*)$')
823 allhunks_re = re.compile('(?:index|deleted file) ')
823 allhunks_re = re.compile('(?:index|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
824 pretty_re = re.compile('(?:new file|deleted file) ')
825 special_re = re.compile('(?:index|deleted|copy|rename) ')
825 special_re = re.compile('(?:index|deleted|copy|rename) ')
826 newfile_re = re.compile('(?:new file)')
826 newfile_re = re.compile('(?:new file)')
827
827
828 def __init__(self, header):
828 def __init__(self, header):
829 self.header = header
829 self.header = header
830 self.hunks = []
830 self.hunks = []
831
831
832 def binary(self):
832 def binary(self):
833 return any(h.startswith('index ') for h in self.header)
833 return any(h.startswith('index ') for h in self.header)
834
834
835 def pretty(self, fp):
835 def pretty(self, fp):
836 for h in self.header:
836 for h in self.header:
837 if h.startswith('index '):
837 if h.startswith('index '):
838 fp.write(_('this modifies a binary file (all or nothing)\n'))
838 fp.write(_('this modifies a binary file (all or nothing)\n'))
839 break
839 break
840 if self.pretty_re.match(h):
840 if self.pretty_re.match(h):
841 fp.write(h)
841 fp.write(h)
842 if self.binary():
842 if self.binary():
843 fp.write(_('this is a binary file\n'))
843 fp.write(_('this is a binary file\n'))
844 break
844 break
845 if h.startswith('---'):
845 if h.startswith('---'):
846 fp.write(_('%d hunks, %d lines changed\n') %
846 fp.write(_('%d hunks, %d lines changed\n') %
847 (len(self.hunks),
847 (len(self.hunks),
848 sum([max(h.added, h.removed) for h in self.hunks])))
848 sum([max(h.added, h.removed) for h in self.hunks])))
849 break
849 break
850 fp.write(h)
850 fp.write(h)
851
851
852 def write(self, fp):
852 def write(self, fp):
853 fp.write(''.join(self.header))
853 fp.write(''.join(self.header))
854
854
855 def allhunks(self):
855 def allhunks(self):
856 return any(self.allhunks_re.match(h) for h in self.header)
856 return any(self.allhunks_re.match(h) for h in self.header)
857
857
858 def files(self):
858 def files(self):
859 match = self.diffgit_re.match(self.header[0])
859 match = self.diffgit_re.match(self.header[0])
860 if match:
860 if match:
861 fromfile, tofile = match.groups()
861 fromfile, tofile = match.groups()
862 if fromfile == tofile:
862 if fromfile == tofile:
863 return [fromfile]
863 return [fromfile]
864 return [fromfile, tofile]
864 return [fromfile, tofile]
865 else:
865 else:
866 return self.diff_re.match(self.header[0]).groups()
866 return self.diff_re.match(self.header[0]).groups()
867
867
868 def filename(self):
868 def filename(self):
869 return self.files()[-1]
869 return self.files()[-1]
870
870
871 def __repr__(self):
871 def __repr__(self):
872 return '<header %s>' % (' '.join(map(repr, self.files())))
872 return '<header %s>' % (' '.join(map(repr, self.files())))
873
873
874 def isnewfile(self):
874 def isnewfile(self):
875 return any(self.newfile_re.match(h) for h in self.header)
875 return any(self.newfile_re.match(h) for h in self.header)
876
876
877 def special(self):
877 def special(self):
878 # Special files are shown only at the header level and not at the hunk
878 # Special files are shown only at the header level and not at the hunk
879 # level for example a file that has been deleted is a special file.
879 # level for example a file that has been deleted is a special file.
880 # The user cannot change the content of the operation, in the case of
880 # The user cannot change the content of the operation, in the case of
881 # the deleted file he has to take the deletion or not take it, he
881 # the deleted file he has to take the deletion or not take it, he
882 # cannot take some of it.
882 # cannot take some of it.
883 # Newly added files are special if they are empty, they are not special
883 # Newly added files are special if they are empty, they are not special
884 # if they have some content as we want to be able to change it
884 # if they have some content as we want to be able to change it
885 nocontent = len(self.header) == 2
885 nocontent = len(self.header) == 2
886 emptynewfile = self.isnewfile() and nocontent
886 emptynewfile = self.isnewfile() and nocontent
887 return emptynewfile or \
887 return emptynewfile or \
888 any(self.special_re.match(h) for h in self.header)
888 any(self.special_re.match(h) for h in self.header)
889
889
890 class recordhunk(object):
890 class recordhunk(object):
891 """patch hunk
891 """patch hunk
892
892
893 XXX shouldn't we merge this with the other hunk class?
893 XXX shouldn't we merge this with the other hunk class?
894 """
894 """
895 maxcontext = 3
895 maxcontext = 3
896
896
897 def __init__(self, header, fromline, toline, proc, before, hunk, after):
897 def __init__(self, header, fromline, toline, proc, before, hunk, after):
898 def trimcontext(number, lines):
898 def trimcontext(number, lines):
899 delta = len(lines) - self.maxcontext
899 delta = len(lines) - self.maxcontext
900 if False and delta > 0:
900 if False and delta > 0:
901 return number + delta, lines[:self.maxcontext]
901 return number + delta, lines[:self.maxcontext]
902 return number, lines
902 return number, lines
903
903
904 self.header = header
904 self.header = header
905 self.fromline, self.before = trimcontext(fromline, before)
905 self.fromline, self.before = trimcontext(fromline, before)
906 self.toline, self.after = trimcontext(toline, after)
906 self.toline, self.after = trimcontext(toline, after)
907 self.proc = proc
907 self.proc = proc
908 self.hunk = hunk
908 self.hunk = hunk
909 self.added, self.removed = self.countchanges(self.hunk)
909 self.added, self.removed = self.countchanges(self.hunk)
910
910
911 def __eq__(self, v):
911 def __eq__(self, v):
912 if not isinstance(v, recordhunk):
912 if not isinstance(v, recordhunk):
913 return False
913 return False
914
914
915 return ((v.hunk == self.hunk) and
915 return ((v.hunk == self.hunk) and
916 (v.proc == self.proc) and
916 (v.proc == self.proc) and
917 (self.fromline == v.fromline) and
917 (self.fromline == v.fromline) and
918 (self.header.files() == v.header.files()))
918 (self.header.files() == v.header.files()))
919
919
920 def __hash__(self):
920 def __hash__(self):
921 return hash((tuple(self.hunk),
921 return hash((tuple(self.hunk),
922 tuple(self.header.files()),
922 tuple(self.header.files()),
923 self.fromline,
923 self.fromline,
924 self.proc))
924 self.proc))
925
925
926 def countchanges(self, hunk):
926 def countchanges(self, hunk):
927 """hunk -> (n+,n-)"""
927 """hunk -> (n+,n-)"""
928 add = len([h for h in hunk if h[0] == '+'])
928 add = len([h for h in hunk if h[0] == '+'])
929 rem = len([h for h in hunk if h[0] == '-'])
929 rem = len([h for h in hunk if h[0] == '-'])
930 return add, rem
930 return add, rem
931
931
932 def write(self, fp):
932 def write(self, fp):
933 delta = len(self.before) + len(self.after)
933 delta = len(self.before) + len(self.after)
934 if self.after and self.after[-1] == '\\ No newline at end of file\n':
934 if self.after and self.after[-1] == '\\ No newline at end of file\n':
935 delta -= 1
935 delta -= 1
936 fromlen = delta + self.removed
936 fromlen = delta + self.removed
937 tolen = delta + self.added
937 tolen = delta + self.added
938 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
938 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
939 (self.fromline, fromlen, self.toline, tolen,
939 (self.fromline, fromlen, self.toline, tolen,
940 self.proc and (' ' + self.proc)))
940 self.proc and (' ' + self.proc)))
941 fp.write(''.join(self.before + self.hunk + self.after))
941 fp.write(''.join(self.before + self.hunk + self.after))
942
942
943 pretty = write
943 pretty = write
944
944
945 def filename(self):
945 def filename(self):
946 return self.header.filename()
946 return self.header.filename()
947
947
948 def __repr__(self):
948 def __repr__(self):
949 return '<hunk %r@%d>' % (self.filename(), self.fromline)
949 return '<hunk %r@%d>' % (self.filename(), self.fromline)
950
950
951 def filterpatch(ui, headers):
951 def filterpatch(ui, headers, operation=None):
952 """Interactively filter patch chunks into applied-only chunks"""
952 """Interactively filter patch chunks into applied-only chunks"""
953
953
954 def prompt(skipfile, skipall, query, chunk):
954 def prompt(skipfile, skipall, query, chunk):
955 """prompt query, and process base inputs
955 """prompt query, and process base inputs
956
956
957 - y/n for the rest of file
957 - y/n for the rest of file
958 - y/n for the rest
958 - y/n for the rest
959 - ? (help)
959 - ? (help)
960 - q (quit)
960 - q (quit)
961
961
962 Return True/False and possibly updated skipfile and skipall.
962 Return True/False and possibly updated skipfile and skipall.
963 """
963 """
964 newpatches = None
964 newpatches = None
965 if skipall is not None:
965 if skipall is not None:
966 return skipall, skipfile, skipall, newpatches
966 return skipall, skipfile, skipall, newpatches
967 if skipfile is not None:
967 if skipfile is not None:
968 return skipfile, skipfile, skipall, newpatches
968 return skipfile, skipfile, skipall, newpatches
969 while True:
969 while True:
970 resps = _('[Ynesfdaq?]'
970 resps = _('[Ynesfdaq?]'
971 '$$ &Yes, record this change'
971 '$$ &Yes, record this change'
972 '$$ &No, skip this change'
972 '$$ &No, skip this change'
973 '$$ &Edit this change manually'
973 '$$ &Edit this change manually'
974 '$$ &Skip remaining changes to this file'
974 '$$ &Skip remaining changes to this file'
975 '$$ Record remaining changes to this &file'
975 '$$ Record remaining changes to this &file'
976 '$$ &Done, skip remaining changes and files'
976 '$$ &Done, skip remaining changes and files'
977 '$$ Record &all changes to all remaining files'
977 '$$ Record &all changes to all remaining files'
978 '$$ &Quit, recording no changes'
978 '$$ &Quit, recording no changes'
979 '$$ &? (display help)')
979 '$$ &? (display help)')
980 r = ui.promptchoice("%s %s" % (query, resps))
980 r = ui.promptchoice("%s %s" % (query, resps))
981 ui.write("\n")
981 ui.write("\n")
982 if r == 8: # ?
982 if r == 8: # ?
983 for c, t in ui.extractchoices(resps)[1]:
983 for c, t in ui.extractchoices(resps)[1]:
984 ui.write('%s - %s\n' % (c, t.lower()))
984 ui.write('%s - %s\n' % (c, t.lower()))
985 continue
985 continue
986 elif r == 0: # yes
986 elif r == 0: # yes
987 ret = True
987 ret = True
988 elif r == 1: # no
988 elif r == 1: # no
989 ret = False
989 ret = False
990 elif r == 2: # Edit patch
990 elif r == 2: # Edit patch
991 if chunk is None:
991 if chunk is None:
992 ui.write(_('cannot edit patch for whole file'))
992 ui.write(_('cannot edit patch for whole file'))
993 ui.write("\n")
993 ui.write("\n")
994 continue
994 continue
995 if chunk.header.binary():
995 if chunk.header.binary():
996 ui.write(_('cannot edit patch for binary file'))
996 ui.write(_('cannot edit patch for binary file'))
997 ui.write("\n")
997 ui.write("\n")
998 continue
998 continue
999 # Patch comment based on the Git one (based on comment at end of
999 # Patch comment based on the Git one (based on comment at end of
1000 # http://mercurial.selenic.com/wiki/RecordExtension)
1000 # http://mercurial.selenic.com/wiki/RecordExtension)
1001 phelp = '---' + _("""
1001 phelp = '---' + _("""
1002 To remove '-' lines, make them ' ' lines (context).
1002 To remove '-' lines, make them ' ' lines (context).
1003 To remove '+' lines, delete them.
1003 To remove '+' lines, delete them.
1004 Lines starting with # will be removed from the patch.
1004 Lines starting with # will be removed from the patch.
1005
1005
1006 If the patch applies cleanly, the edited hunk will immediately be
1006 If the patch applies cleanly, the edited hunk will immediately be
1007 added to the record list. If it does not apply cleanly, a rejects
1007 added to the record list. If it does not apply cleanly, a rejects
1008 file will be generated: you can use that when you try again. If
1008 file will be generated: you can use that when you try again. If
1009 all lines of the hunk are removed, then the edit is aborted and
1009 all lines of the hunk are removed, then the edit is aborted and
1010 the hunk is left unchanged.
1010 the hunk is left unchanged.
1011 """)
1011 """)
1012 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1012 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1013 suffix=".diff", text=True)
1013 suffix=".diff", text=True)
1014 ncpatchfp = None
1014 ncpatchfp = None
1015 try:
1015 try:
1016 # Write the initial patch
1016 # Write the initial patch
1017 f = os.fdopen(patchfd, "w")
1017 f = os.fdopen(patchfd, "w")
1018 chunk.header.write(f)
1018 chunk.header.write(f)
1019 chunk.write(f)
1019 chunk.write(f)
1020 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1020 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1021 f.close()
1021 f.close()
1022 # Start the editor and wait for it to complete
1022 # Start the editor and wait for it to complete
1023 editor = ui.geteditor()
1023 editor = ui.geteditor()
1024 ui.system("%s \"%s\"" % (editor, patchfn),
1024 ui.system("%s \"%s\"" % (editor, patchfn),
1025 environ={'HGUSER': ui.username()},
1025 environ={'HGUSER': ui.username()},
1026 onerr=util.Abort, errprefix=_("edit failed"))
1026 onerr=util.Abort, errprefix=_("edit failed"))
1027 # Remove comment lines
1027 # Remove comment lines
1028 patchfp = open(patchfn)
1028 patchfp = open(patchfn)
1029 ncpatchfp = cStringIO.StringIO()
1029 ncpatchfp = cStringIO.StringIO()
1030 for line in patchfp:
1030 for line in patchfp:
1031 if not line.startswith('#'):
1031 if not line.startswith('#'):
1032 ncpatchfp.write(line)
1032 ncpatchfp.write(line)
1033 patchfp.close()
1033 patchfp.close()
1034 ncpatchfp.seek(0)
1034 ncpatchfp.seek(0)
1035 newpatches = parsepatch(ncpatchfp)
1035 newpatches = parsepatch(ncpatchfp)
1036 finally:
1036 finally:
1037 os.unlink(patchfn)
1037 os.unlink(patchfn)
1038 del ncpatchfp
1038 del ncpatchfp
1039 # Signal that the chunk shouldn't be applied as-is, but
1039 # Signal that the chunk shouldn't be applied as-is, but
1040 # provide the new patch to be used instead.
1040 # provide the new patch to be used instead.
1041 ret = False
1041 ret = False
1042 elif r == 3: # Skip
1042 elif r == 3: # Skip
1043 ret = skipfile = False
1043 ret = skipfile = False
1044 elif r == 4: # file (Record remaining)
1044 elif r == 4: # file (Record remaining)
1045 ret = skipfile = True
1045 ret = skipfile = True
1046 elif r == 5: # done, skip remaining
1046 elif r == 5: # done, skip remaining
1047 ret = skipall = False
1047 ret = skipall = False
1048 elif r == 6: # all
1048 elif r == 6: # all
1049 ret = skipall = True
1049 ret = skipall = True
1050 elif r == 7: # quit
1050 elif r == 7: # quit
1051 raise util.Abort(_('user quit'))
1051 raise util.Abort(_('user quit'))
1052 return ret, skipfile, skipall, newpatches
1052 return ret, skipfile, skipall, newpatches
1053
1053
1054 seen = set()
1054 seen = set()
1055 applied = {} # 'filename' -> [] of chunks
1055 applied = {} # 'filename' -> [] of chunks
1056 skipfile, skipall = None, None
1056 skipfile, skipall = None, None
1057 pos, total = 1, sum(len(h.hunks) for h in headers)
1057 pos, total = 1, sum(len(h.hunks) for h in headers)
1058 for h in headers:
1058 for h in headers:
1059 pos += len(h.hunks)
1059 pos += len(h.hunks)
1060 skipfile = None
1060 skipfile = None
1061 fixoffset = 0
1061 fixoffset = 0
1062 hdr = ''.join(h.header)
1062 hdr = ''.join(h.header)
1063 if hdr in seen:
1063 if hdr in seen:
1064 continue
1064 continue
1065 seen.add(hdr)
1065 seen.add(hdr)
1066 if skipall is None:
1066 if skipall is None:
1067 h.pretty(ui)
1067 h.pretty(ui)
1068 msg = (_('examine changes to %s?') %
1068 msg = (_('examine changes to %s?') %
1069 _(' and ').join("'%s'" % f for f in h.files()))
1069 _(' and ').join("'%s'" % f for f in h.files()))
1070 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1070 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1071 if not r:
1071 if not r:
1072 continue
1072 continue
1073 applied[h.filename()] = [h]
1073 applied[h.filename()] = [h]
1074 if h.allhunks():
1074 if h.allhunks():
1075 applied[h.filename()] += h.hunks
1075 applied[h.filename()] += h.hunks
1076 continue
1076 continue
1077 for i, chunk in enumerate(h.hunks):
1077 for i, chunk in enumerate(h.hunks):
1078 if skipfile is None and skipall is None:
1078 if skipfile is None and skipall is None:
1079 chunk.pretty(ui)
1079 chunk.pretty(ui)
1080 if total == 1:
1080 if total == 1:
1081 msg = _("record this change to '%s'?") % chunk.filename()
1081 msg = _("record this change to '%s'?") % chunk.filename()
1082 else:
1082 else:
1083 idx = pos - len(h.hunks) + i
1083 idx = pos - len(h.hunks) + i
1084 msg = _("record change %d/%d to '%s'?") % (idx, total,
1084 msg = _("record change %d/%d to '%s'?") % (idx, total,
1085 chunk.filename())
1085 chunk.filename())
1086 r, skipfile, skipall, newpatches = prompt(skipfile,
1086 r, skipfile, skipall, newpatches = prompt(skipfile,
1087 skipall, msg, chunk)
1087 skipall, msg, chunk)
1088 if r:
1088 if r:
1089 if fixoffset:
1089 if fixoffset:
1090 chunk = copy.copy(chunk)
1090 chunk = copy.copy(chunk)
1091 chunk.toline += fixoffset
1091 chunk.toline += fixoffset
1092 applied[chunk.filename()].append(chunk)
1092 applied[chunk.filename()].append(chunk)
1093 elif newpatches is not None:
1093 elif newpatches is not None:
1094 for newpatch in newpatches:
1094 for newpatch in newpatches:
1095 for newhunk in newpatch.hunks:
1095 for newhunk in newpatch.hunks:
1096 if fixoffset:
1096 if fixoffset:
1097 newhunk.toline += fixoffset
1097 newhunk.toline += fixoffset
1098 applied[newhunk.filename()].append(newhunk)
1098 applied[newhunk.filename()].append(newhunk)
1099 else:
1099 else:
1100 fixoffset += chunk.removed - chunk.added
1100 fixoffset += chunk.removed - chunk.added
1101 return sum([h for h in applied.itervalues()
1101 return sum([h for h in applied.itervalues()
1102 if h[0].special() or len(h) > 1], [])
1102 if h[0].special() or len(h) > 1], [])
1103 class hunk(object):
1103 class hunk(object):
1104 def __init__(self, desc, num, lr, context):
1104 def __init__(self, desc, num, lr, context):
1105 self.number = num
1105 self.number = num
1106 self.desc = desc
1106 self.desc = desc
1107 self.hunk = [desc]
1107 self.hunk = [desc]
1108 self.a = []
1108 self.a = []
1109 self.b = []
1109 self.b = []
1110 self.starta = self.lena = None
1110 self.starta = self.lena = None
1111 self.startb = self.lenb = None
1111 self.startb = self.lenb = None
1112 if lr is not None:
1112 if lr is not None:
1113 if context:
1113 if context:
1114 self.read_context_hunk(lr)
1114 self.read_context_hunk(lr)
1115 else:
1115 else:
1116 self.read_unified_hunk(lr)
1116 self.read_unified_hunk(lr)
1117
1117
1118 def getnormalized(self):
1118 def getnormalized(self):
1119 """Return a copy with line endings normalized to LF."""
1119 """Return a copy with line endings normalized to LF."""
1120
1120
1121 def normalize(lines):
1121 def normalize(lines):
1122 nlines = []
1122 nlines = []
1123 for line in lines:
1123 for line in lines:
1124 if line.endswith('\r\n'):
1124 if line.endswith('\r\n'):
1125 line = line[:-2] + '\n'
1125 line = line[:-2] + '\n'
1126 nlines.append(line)
1126 nlines.append(line)
1127 return nlines
1127 return nlines
1128
1128
1129 # Dummy object, it is rebuilt manually
1129 # Dummy object, it is rebuilt manually
1130 nh = hunk(self.desc, self.number, None, None)
1130 nh = hunk(self.desc, self.number, None, None)
1131 nh.number = self.number
1131 nh.number = self.number
1132 nh.desc = self.desc
1132 nh.desc = self.desc
1133 nh.hunk = self.hunk
1133 nh.hunk = self.hunk
1134 nh.a = normalize(self.a)
1134 nh.a = normalize(self.a)
1135 nh.b = normalize(self.b)
1135 nh.b = normalize(self.b)
1136 nh.starta = self.starta
1136 nh.starta = self.starta
1137 nh.startb = self.startb
1137 nh.startb = self.startb
1138 nh.lena = self.lena
1138 nh.lena = self.lena
1139 nh.lenb = self.lenb
1139 nh.lenb = self.lenb
1140 return nh
1140 return nh
1141
1141
1142 def read_unified_hunk(self, lr):
1142 def read_unified_hunk(self, lr):
1143 m = unidesc.match(self.desc)
1143 m = unidesc.match(self.desc)
1144 if not m:
1144 if not m:
1145 raise PatchError(_("bad hunk #%d") % self.number)
1145 raise PatchError(_("bad hunk #%d") % self.number)
1146 self.starta, self.lena, self.startb, self.lenb = m.groups()
1146 self.starta, self.lena, self.startb, self.lenb = m.groups()
1147 if self.lena is None:
1147 if self.lena is None:
1148 self.lena = 1
1148 self.lena = 1
1149 else:
1149 else:
1150 self.lena = int(self.lena)
1150 self.lena = int(self.lena)
1151 if self.lenb is None:
1151 if self.lenb is None:
1152 self.lenb = 1
1152 self.lenb = 1
1153 else:
1153 else:
1154 self.lenb = int(self.lenb)
1154 self.lenb = int(self.lenb)
1155 self.starta = int(self.starta)
1155 self.starta = int(self.starta)
1156 self.startb = int(self.startb)
1156 self.startb = int(self.startb)
1157 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1157 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1158 self.b)
1158 self.b)
1159 # if we hit eof before finishing out the hunk, the last line will
1159 # if we hit eof before finishing out the hunk, the last line will
1160 # be zero length. Lets try to fix it up.
1160 # be zero length. Lets try to fix it up.
1161 while len(self.hunk[-1]) == 0:
1161 while len(self.hunk[-1]) == 0:
1162 del self.hunk[-1]
1162 del self.hunk[-1]
1163 del self.a[-1]
1163 del self.a[-1]
1164 del self.b[-1]
1164 del self.b[-1]
1165 self.lena -= 1
1165 self.lena -= 1
1166 self.lenb -= 1
1166 self.lenb -= 1
1167 self._fixnewline(lr)
1167 self._fixnewline(lr)
1168
1168
1169 def read_context_hunk(self, lr):
1169 def read_context_hunk(self, lr):
1170 self.desc = lr.readline()
1170 self.desc = lr.readline()
1171 m = contextdesc.match(self.desc)
1171 m = contextdesc.match(self.desc)
1172 if not m:
1172 if not m:
1173 raise PatchError(_("bad hunk #%d") % self.number)
1173 raise PatchError(_("bad hunk #%d") % self.number)
1174 self.starta, aend = m.groups()
1174 self.starta, aend = m.groups()
1175 self.starta = int(self.starta)
1175 self.starta = int(self.starta)
1176 if aend is None:
1176 if aend is None:
1177 aend = self.starta
1177 aend = self.starta
1178 self.lena = int(aend) - self.starta
1178 self.lena = int(aend) - self.starta
1179 if self.starta:
1179 if self.starta:
1180 self.lena += 1
1180 self.lena += 1
1181 for x in xrange(self.lena):
1181 for x in xrange(self.lena):
1182 l = lr.readline()
1182 l = lr.readline()
1183 if l.startswith('---'):
1183 if l.startswith('---'):
1184 # lines addition, old block is empty
1184 # lines addition, old block is empty
1185 lr.push(l)
1185 lr.push(l)
1186 break
1186 break
1187 s = l[2:]
1187 s = l[2:]
1188 if l.startswith('- ') or l.startswith('! '):
1188 if l.startswith('- ') or l.startswith('! '):
1189 u = '-' + s
1189 u = '-' + s
1190 elif l.startswith(' '):
1190 elif l.startswith(' '):
1191 u = ' ' + s
1191 u = ' ' + s
1192 else:
1192 else:
1193 raise PatchError(_("bad hunk #%d old text line %d") %
1193 raise PatchError(_("bad hunk #%d old text line %d") %
1194 (self.number, x))
1194 (self.number, x))
1195 self.a.append(u)
1195 self.a.append(u)
1196 self.hunk.append(u)
1196 self.hunk.append(u)
1197
1197
1198 l = lr.readline()
1198 l = lr.readline()
1199 if l.startswith('\ '):
1199 if l.startswith('\ '):
1200 s = self.a[-1][:-1]
1200 s = self.a[-1][:-1]
1201 self.a[-1] = s
1201 self.a[-1] = s
1202 self.hunk[-1] = s
1202 self.hunk[-1] = s
1203 l = lr.readline()
1203 l = lr.readline()
1204 m = contextdesc.match(l)
1204 m = contextdesc.match(l)
1205 if not m:
1205 if not m:
1206 raise PatchError(_("bad hunk #%d") % self.number)
1206 raise PatchError(_("bad hunk #%d") % self.number)
1207 self.startb, bend = m.groups()
1207 self.startb, bend = m.groups()
1208 self.startb = int(self.startb)
1208 self.startb = int(self.startb)
1209 if bend is None:
1209 if bend is None:
1210 bend = self.startb
1210 bend = self.startb
1211 self.lenb = int(bend) - self.startb
1211 self.lenb = int(bend) - self.startb
1212 if self.startb:
1212 if self.startb:
1213 self.lenb += 1
1213 self.lenb += 1
1214 hunki = 1
1214 hunki = 1
1215 for x in xrange(self.lenb):
1215 for x in xrange(self.lenb):
1216 l = lr.readline()
1216 l = lr.readline()
1217 if l.startswith('\ '):
1217 if l.startswith('\ '):
1218 # XXX: the only way to hit this is with an invalid line range.
1218 # XXX: the only way to hit this is with an invalid line range.
1219 # The no-eol marker is not counted in the line range, but I
1219 # The no-eol marker is not counted in the line range, but I
1220 # guess there are diff(1) out there which behave differently.
1220 # guess there are diff(1) out there which behave differently.
1221 s = self.b[-1][:-1]
1221 s = self.b[-1][:-1]
1222 self.b[-1] = s
1222 self.b[-1] = s
1223 self.hunk[hunki - 1] = s
1223 self.hunk[hunki - 1] = s
1224 continue
1224 continue
1225 if not l:
1225 if not l:
1226 # line deletions, new block is empty and we hit EOF
1226 # line deletions, new block is empty and we hit EOF
1227 lr.push(l)
1227 lr.push(l)
1228 break
1228 break
1229 s = l[2:]
1229 s = l[2:]
1230 if l.startswith('+ ') or l.startswith('! '):
1230 if l.startswith('+ ') or l.startswith('! '):
1231 u = '+' + s
1231 u = '+' + s
1232 elif l.startswith(' '):
1232 elif l.startswith(' '):
1233 u = ' ' + s
1233 u = ' ' + s
1234 elif len(self.b) == 0:
1234 elif len(self.b) == 0:
1235 # line deletions, new block is empty
1235 # line deletions, new block is empty
1236 lr.push(l)
1236 lr.push(l)
1237 break
1237 break
1238 else:
1238 else:
1239 raise PatchError(_("bad hunk #%d old text line %d") %
1239 raise PatchError(_("bad hunk #%d old text line %d") %
1240 (self.number, x))
1240 (self.number, x))
1241 self.b.append(s)
1241 self.b.append(s)
1242 while True:
1242 while True:
1243 if hunki >= len(self.hunk):
1243 if hunki >= len(self.hunk):
1244 h = ""
1244 h = ""
1245 else:
1245 else:
1246 h = self.hunk[hunki]
1246 h = self.hunk[hunki]
1247 hunki += 1
1247 hunki += 1
1248 if h == u:
1248 if h == u:
1249 break
1249 break
1250 elif h.startswith('-'):
1250 elif h.startswith('-'):
1251 continue
1251 continue
1252 else:
1252 else:
1253 self.hunk.insert(hunki - 1, u)
1253 self.hunk.insert(hunki - 1, u)
1254 break
1254 break
1255
1255
1256 if not self.a:
1256 if not self.a:
1257 # this happens when lines were only added to the hunk
1257 # this happens when lines were only added to the hunk
1258 for x in self.hunk:
1258 for x in self.hunk:
1259 if x.startswith('-') or x.startswith(' '):
1259 if x.startswith('-') or x.startswith(' '):
1260 self.a.append(x)
1260 self.a.append(x)
1261 if not self.b:
1261 if not self.b:
1262 # this happens when lines were only deleted from the hunk
1262 # this happens when lines were only deleted from the hunk
1263 for x in self.hunk:
1263 for x in self.hunk:
1264 if x.startswith('+') or x.startswith(' '):
1264 if x.startswith('+') or x.startswith(' '):
1265 self.b.append(x[1:])
1265 self.b.append(x[1:])
1266 # @@ -start,len +start,len @@
1266 # @@ -start,len +start,len @@
1267 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1267 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1268 self.startb, self.lenb)
1268 self.startb, self.lenb)
1269 self.hunk[0] = self.desc
1269 self.hunk[0] = self.desc
1270 self._fixnewline(lr)
1270 self._fixnewline(lr)
1271
1271
1272 def _fixnewline(self, lr):
1272 def _fixnewline(self, lr):
1273 l = lr.readline()
1273 l = lr.readline()
1274 if l.startswith('\ '):
1274 if l.startswith('\ '):
1275 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1275 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1276 else:
1276 else:
1277 lr.push(l)
1277 lr.push(l)
1278
1278
1279 def complete(self):
1279 def complete(self):
1280 return len(self.a) == self.lena and len(self.b) == self.lenb
1280 return len(self.a) == self.lena and len(self.b) == self.lenb
1281
1281
1282 def _fuzzit(self, old, new, fuzz, toponly):
1282 def _fuzzit(self, old, new, fuzz, toponly):
1283 # this removes context lines from the top and bottom of list 'l'. It
1283 # this removes context lines from the top and bottom of list 'l'. It
1284 # checks the hunk to make sure only context lines are removed, and then
1284 # checks the hunk to make sure only context lines are removed, and then
1285 # returns a new shortened list of lines.
1285 # returns a new shortened list of lines.
1286 fuzz = min(fuzz, len(old))
1286 fuzz = min(fuzz, len(old))
1287 if fuzz:
1287 if fuzz:
1288 top = 0
1288 top = 0
1289 bot = 0
1289 bot = 0
1290 hlen = len(self.hunk)
1290 hlen = len(self.hunk)
1291 for x in xrange(hlen - 1):
1291 for x in xrange(hlen - 1):
1292 # the hunk starts with the @@ line, so use x+1
1292 # the hunk starts with the @@ line, so use x+1
1293 if self.hunk[x + 1][0] == ' ':
1293 if self.hunk[x + 1][0] == ' ':
1294 top += 1
1294 top += 1
1295 else:
1295 else:
1296 break
1296 break
1297 if not toponly:
1297 if not toponly:
1298 for x in xrange(hlen - 1):
1298 for x in xrange(hlen - 1):
1299 if self.hunk[hlen - bot - 1][0] == ' ':
1299 if self.hunk[hlen - bot - 1][0] == ' ':
1300 bot += 1
1300 bot += 1
1301 else:
1301 else:
1302 break
1302 break
1303
1303
1304 bot = min(fuzz, bot)
1304 bot = min(fuzz, bot)
1305 top = min(fuzz, top)
1305 top = min(fuzz, top)
1306 return old[top:len(old) - bot], new[top:len(new) - bot], top
1306 return old[top:len(old) - bot], new[top:len(new) - bot], top
1307 return old, new, 0
1307 return old, new, 0
1308
1308
1309 def fuzzit(self, fuzz, toponly):
1309 def fuzzit(self, fuzz, toponly):
1310 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1310 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1311 oldstart = self.starta + top
1311 oldstart = self.starta + top
1312 newstart = self.startb + top
1312 newstart = self.startb + top
1313 # zero length hunk ranges already have their start decremented
1313 # zero length hunk ranges already have their start decremented
1314 if self.lena and oldstart > 0:
1314 if self.lena and oldstart > 0:
1315 oldstart -= 1
1315 oldstart -= 1
1316 if self.lenb and newstart > 0:
1316 if self.lenb and newstart > 0:
1317 newstart -= 1
1317 newstart -= 1
1318 return old, oldstart, new, newstart
1318 return old, oldstart, new, newstart
1319
1319
1320 class binhunk(object):
1320 class binhunk(object):
1321 'A binary patch file.'
1321 'A binary patch file.'
1322 def __init__(self, lr, fname):
1322 def __init__(self, lr, fname):
1323 self.text = None
1323 self.text = None
1324 self.delta = False
1324 self.delta = False
1325 self.hunk = ['GIT binary patch\n']
1325 self.hunk = ['GIT binary patch\n']
1326 self._fname = fname
1326 self._fname = fname
1327 self._read(lr)
1327 self._read(lr)
1328
1328
1329 def complete(self):
1329 def complete(self):
1330 return self.text is not None
1330 return self.text is not None
1331
1331
1332 def new(self, lines):
1332 def new(self, lines):
1333 if self.delta:
1333 if self.delta:
1334 return [applybindelta(self.text, ''.join(lines))]
1334 return [applybindelta(self.text, ''.join(lines))]
1335 return [self.text]
1335 return [self.text]
1336
1336
1337 def _read(self, lr):
1337 def _read(self, lr):
1338 def getline(lr, hunk):
1338 def getline(lr, hunk):
1339 l = lr.readline()
1339 l = lr.readline()
1340 hunk.append(l)
1340 hunk.append(l)
1341 return l.rstrip('\r\n')
1341 return l.rstrip('\r\n')
1342
1342
1343 size = 0
1343 size = 0
1344 while True:
1344 while True:
1345 line = getline(lr, self.hunk)
1345 line = getline(lr, self.hunk)
1346 if not line:
1346 if not line:
1347 raise PatchError(_('could not extract "%s" binary data')
1347 raise PatchError(_('could not extract "%s" binary data')
1348 % self._fname)
1348 % self._fname)
1349 if line.startswith('literal '):
1349 if line.startswith('literal '):
1350 size = int(line[8:].rstrip())
1350 size = int(line[8:].rstrip())
1351 break
1351 break
1352 if line.startswith('delta '):
1352 if line.startswith('delta '):
1353 size = int(line[6:].rstrip())
1353 size = int(line[6:].rstrip())
1354 self.delta = True
1354 self.delta = True
1355 break
1355 break
1356 dec = []
1356 dec = []
1357 line = getline(lr, self.hunk)
1357 line = getline(lr, self.hunk)
1358 while len(line) > 1:
1358 while len(line) > 1:
1359 l = line[0]
1359 l = line[0]
1360 if l <= 'Z' and l >= 'A':
1360 if l <= 'Z' and l >= 'A':
1361 l = ord(l) - ord('A') + 1
1361 l = ord(l) - ord('A') + 1
1362 else:
1362 else:
1363 l = ord(l) - ord('a') + 27
1363 l = ord(l) - ord('a') + 27
1364 try:
1364 try:
1365 dec.append(base85.b85decode(line[1:])[:l])
1365 dec.append(base85.b85decode(line[1:])[:l])
1366 except ValueError, e:
1366 except ValueError, e:
1367 raise PatchError(_('could not decode "%s" binary patch: %s')
1367 raise PatchError(_('could not decode "%s" binary patch: %s')
1368 % (self._fname, str(e)))
1368 % (self._fname, str(e)))
1369 line = getline(lr, self.hunk)
1369 line = getline(lr, self.hunk)
1370 text = zlib.decompress(''.join(dec))
1370 text = zlib.decompress(''.join(dec))
1371 if len(text) != size:
1371 if len(text) != size:
1372 raise PatchError(_('"%s" length is %d bytes, should be %d')
1372 raise PatchError(_('"%s" length is %d bytes, should be %d')
1373 % (self._fname, len(text), size))
1373 % (self._fname, len(text), size))
1374 self.text = text
1374 self.text = text
1375
1375
1376 def parsefilename(str):
1376 def parsefilename(str):
1377 # --- filename \t|space stuff
1377 # --- filename \t|space stuff
1378 s = str[4:].rstrip('\r\n')
1378 s = str[4:].rstrip('\r\n')
1379 i = s.find('\t')
1379 i = s.find('\t')
1380 if i < 0:
1380 if i < 0:
1381 i = s.find(' ')
1381 i = s.find(' ')
1382 if i < 0:
1382 if i < 0:
1383 return s
1383 return s
1384 return s[:i]
1384 return s[:i]
1385
1385
1386 def parsepatch(originalchunks):
1386 def parsepatch(originalchunks):
1387 """patch -> [] of headers -> [] of hunks """
1387 """patch -> [] of headers -> [] of hunks """
1388 class parser(object):
1388 class parser(object):
1389 """patch parsing state machine"""
1389 """patch parsing state machine"""
1390 def __init__(self):
1390 def __init__(self):
1391 self.fromline = 0
1391 self.fromline = 0
1392 self.toline = 0
1392 self.toline = 0
1393 self.proc = ''
1393 self.proc = ''
1394 self.header = None
1394 self.header = None
1395 self.context = []
1395 self.context = []
1396 self.before = []
1396 self.before = []
1397 self.hunk = []
1397 self.hunk = []
1398 self.headers = []
1398 self.headers = []
1399
1399
1400 def addrange(self, limits):
1400 def addrange(self, limits):
1401 fromstart, fromend, tostart, toend, proc = limits
1401 fromstart, fromend, tostart, toend, proc = limits
1402 self.fromline = int(fromstart)
1402 self.fromline = int(fromstart)
1403 self.toline = int(tostart)
1403 self.toline = int(tostart)
1404 self.proc = proc
1404 self.proc = proc
1405
1405
1406 def addcontext(self, context):
1406 def addcontext(self, context):
1407 if self.hunk:
1407 if self.hunk:
1408 h = recordhunk(self.header, self.fromline, self.toline,
1408 h = recordhunk(self.header, self.fromline, self.toline,
1409 self.proc, self.before, self.hunk, context)
1409 self.proc, self.before, self.hunk, context)
1410 self.header.hunks.append(h)
1410 self.header.hunks.append(h)
1411 self.fromline += len(self.before) + h.removed
1411 self.fromline += len(self.before) + h.removed
1412 self.toline += len(self.before) + h.added
1412 self.toline += len(self.before) + h.added
1413 self.before = []
1413 self.before = []
1414 self.hunk = []
1414 self.hunk = []
1415 self.proc = ''
1415 self.proc = ''
1416 self.context = context
1416 self.context = context
1417
1417
1418 def addhunk(self, hunk):
1418 def addhunk(self, hunk):
1419 if self.context:
1419 if self.context:
1420 self.before = self.context
1420 self.before = self.context
1421 self.context = []
1421 self.context = []
1422 self.hunk = hunk
1422 self.hunk = hunk
1423
1423
1424 def newfile(self, hdr):
1424 def newfile(self, hdr):
1425 self.addcontext([])
1425 self.addcontext([])
1426 h = header(hdr)
1426 h = header(hdr)
1427 self.headers.append(h)
1427 self.headers.append(h)
1428 self.header = h
1428 self.header = h
1429
1429
1430 def addother(self, line):
1430 def addother(self, line):
1431 pass # 'other' lines are ignored
1431 pass # 'other' lines are ignored
1432
1432
1433 def finished(self):
1433 def finished(self):
1434 self.addcontext([])
1434 self.addcontext([])
1435 return self.headers
1435 return self.headers
1436
1436
1437 transitions = {
1437 transitions = {
1438 'file': {'context': addcontext,
1438 'file': {'context': addcontext,
1439 'file': newfile,
1439 'file': newfile,
1440 'hunk': addhunk,
1440 'hunk': addhunk,
1441 'range': addrange},
1441 'range': addrange},
1442 'context': {'file': newfile,
1442 'context': {'file': newfile,
1443 'hunk': addhunk,
1443 'hunk': addhunk,
1444 'range': addrange,
1444 'range': addrange,
1445 'other': addother},
1445 'other': addother},
1446 'hunk': {'context': addcontext,
1446 'hunk': {'context': addcontext,
1447 'file': newfile,
1447 'file': newfile,
1448 'range': addrange},
1448 'range': addrange},
1449 'range': {'context': addcontext,
1449 'range': {'context': addcontext,
1450 'hunk': addhunk},
1450 'hunk': addhunk},
1451 'other': {'other': addother},
1451 'other': {'other': addother},
1452 }
1452 }
1453
1453
1454 p = parser()
1454 p = parser()
1455 fp = cStringIO.StringIO()
1455 fp = cStringIO.StringIO()
1456 fp.write(''.join(originalchunks))
1456 fp.write(''.join(originalchunks))
1457 fp.seek(0)
1457 fp.seek(0)
1458
1458
1459 state = 'context'
1459 state = 'context'
1460 for newstate, data in scanpatch(fp):
1460 for newstate, data in scanpatch(fp):
1461 try:
1461 try:
1462 p.transitions[state][newstate](p, data)
1462 p.transitions[state][newstate](p, data)
1463 except KeyError:
1463 except KeyError:
1464 raise PatchError('unhandled transition: %s -> %s' %
1464 raise PatchError('unhandled transition: %s -> %s' %
1465 (state, newstate))
1465 (state, newstate))
1466 state = newstate
1466 state = newstate
1467 del fp
1467 del fp
1468 return p.finished()
1468 return p.finished()
1469
1469
1470 def pathtransform(path, strip, prefix):
1470 def pathtransform(path, strip, prefix):
1471 '''turn a path from a patch into a path suitable for the repository
1471 '''turn a path from a patch into a path suitable for the repository
1472
1472
1473 prefix, if not empty, is expected to be normalized with a / at the end.
1473 prefix, if not empty, is expected to be normalized with a / at the end.
1474
1474
1475 Returns (stripped components, path in repository).
1475 Returns (stripped components, path in repository).
1476
1476
1477 >>> pathtransform('a/b/c', 0, '')
1477 >>> pathtransform('a/b/c', 0, '')
1478 ('', 'a/b/c')
1478 ('', 'a/b/c')
1479 >>> pathtransform(' a/b/c ', 0, '')
1479 >>> pathtransform(' a/b/c ', 0, '')
1480 ('', ' a/b/c')
1480 ('', ' a/b/c')
1481 >>> pathtransform(' a/b/c ', 2, '')
1481 >>> pathtransform(' a/b/c ', 2, '')
1482 ('a/b/', 'c')
1482 ('a/b/', 'c')
1483 >>> pathtransform('a/b/c', 0, 'd/e/')
1483 >>> pathtransform('a/b/c', 0, 'd/e/')
1484 ('', 'd/e/a/b/c')
1484 ('', 'd/e/a/b/c')
1485 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1485 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1486 ('a//b/', 'd/e/c')
1486 ('a//b/', 'd/e/c')
1487 >>> pathtransform('a/b/c', 3, '')
1487 >>> pathtransform('a/b/c', 3, '')
1488 Traceback (most recent call last):
1488 Traceback (most recent call last):
1489 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1489 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1490 '''
1490 '''
1491 pathlen = len(path)
1491 pathlen = len(path)
1492 i = 0
1492 i = 0
1493 if strip == 0:
1493 if strip == 0:
1494 return '', prefix + path.rstrip()
1494 return '', prefix + path.rstrip()
1495 count = strip
1495 count = strip
1496 while count > 0:
1496 while count > 0:
1497 i = path.find('/', i)
1497 i = path.find('/', i)
1498 if i == -1:
1498 if i == -1:
1499 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1499 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1500 (count, strip, path))
1500 (count, strip, path))
1501 i += 1
1501 i += 1
1502 # consume '//' in the path
1502 # consume '//' in the path
1503 while i < pathlen - 1 and path[i] == '/':
1503 while i < pathlen - 1 and path[i] == '/':
1504 i += 1
1504 i += 1
1505 count -= 1
1505 count -= 1
1506 return path[:i].lstrip(), prefix + path[i:].rstrip()
1506 return path[:i].lstrip(), prefix + path[i:].rstrip()
1507
1507
1508 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1508 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1509 nulla = afile_orig == "/dev/null"
1509 nulla = afile_orig == "/dev/null"
1510 nullb = bfile_orig == "/dev/null"
1510 nullb = bfile_orig == "/dev/null"
1511 create = nulla and hunk.starta == 0 and hunk.lena == 0
1511 create = nulla and hunk.starta == 0 and hunk.lena == 0
1512 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1512 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1513 abase, afile = pathtransform(afile_orig, strip, prefix)
1513 abase, afile = pathtransform(afile_orig, strip, prefix)
1514 gooda = not nulla and backend.exists(afile)
1514 gooda = not nulla and backend.exists(afile)
1515 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1515 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1516 if afile == bfile:
1516 if afile == bfile:
1517 goodb = gooda
1517 goodb = gooda
1518 else:
1518 else:
1519 goodb = not nullb and backend.exists(bfile)
1519 goodb = not nullb and backend.exists(bfile)
1520 missing = not goodb and not gooda and not create
1520 missing = not goodb and not gooda and not create
1521
1521
1522 # some diff programs apparently produce patches where the afile is
1522 # some diff programs apparently produce patches where the afile is
1523 # not /dev/null, but afile starts with bfile
1523 # not /dev/null, but afile starts with bfile
1524 abasedir = afile[:afile.rfind('/') + 1]
1524 abasedir = afile[:afile.rfind('/') + 1]
1525 bbasedir = bfile[:bfile.rfind('/') + 1]
1525 bbasedir = bfile[:bfile.rfind('/') + 1]
1526 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1526 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1527 and hunk.starta == 0 and hunk.lena == 0):
1527 and hunk.starta == 0 and hunk.lena == 0):
1528 create = True
1528 create = True
1529 missing = False
1529 missing = False
1530
1530
1531 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1531 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1532 # diff is between a file and its backup. In this case, the original
1532 # diff is between a file and its backup. In this case, the original
1533 # file should be patched (see original mpatch code).
1533 # file should be patched (see original mpatch code).
1534 isbackup = (abase == bbase and bfile.startswith(afile))
1534 isbackup = (abase == bbase and bfile.startswith(afile))
1535 fname = None
1535 fname = None
1536 if not missing:
1536 if not missing:
1537 if gooda and goodb:
1537 if gooda and goodb:
1538 if isbackup:
1538 if isbackup:
1539 fname = afile
1539 fname = afile
1540 else:
1540 else:
1541 fname = bfile
1541 fname = bfile
1542 elif gooda:
1542 elif gooda:
1543 fname = afile
1543 fname = afile
1544
1544
1545 if not fname:
1545 if not fname:
1546 if not nullb:
1546 if not nullb:
1547 if isbackup:
1547 if isbackup:
1548 fname = afile
1548 fname = afile
1549 else:
1549 else:
1550 fname = bfile
1550 fname = bfile
1551 elif not nulla:
1551 elif not nulla:
1552 fname = afile
1552 fname = afile
1553 else:
1553 else:
1554 raise PatchError(_("undefined source and destination files"))
1554 raise PatchError(_("undefined source and destination files"))
1555
1555
1556 gp = patchmeta(fname)
1556 gp = patchmeta(fname)
1557 if create:
1557 if create:
1558 gp.op = 'ADD'
1558 gp.op = 'ADD'
1559 elif remove:
1559 elif remove:
1560 gp.op = 'DELETE'
1560 gp.op = 'DELETE'
1561 return gp
1561 return gp
1562
1562
1563 def scanpatch(fp):
1563 def scanpatch(fp):
1564 """like patch.iterhunks, but yield different events
1564 """like patch.iterhunks, but yield different events
1565
1565
1566 - ('file', [header_lines + fromfile + tofile])
1566 - ('file', [header_lines + fromfile + tofile])
1567 - ('context', [context_lines])
1567 - ('context', [context_lines])
1568 - ('hunk', [hunk_lines])
1568 - ('hunk', [hunk_lines])
1569 - ('range', (-start,len, +start,len, proc))
1569 - ('range', (-start,len, +start,len, proc))
1570 """
1570 """
1571 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1571 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1572 lr = linereader(fp)
1572 lr = linereader(fp)
1573
1573
1574 def scanwhile(first, p):
1574 def scanwhile(first, p):
1575 """scan lr while predicate holds"""
1575 """scan lr while predicate holds"""
1576 lines = [first]
1576 lines = [first]
1577 while True:
1577 while True:
1578 line = lr.readline()
1578 line = lr.readline()
1579 if not line:
1579 if not line:
1580 break
1580 break
1581 if p(line):
1581 if p(line):
1582 lines.append(line)
1582 lines.append(line)
1583 else:
1583 else:
1584 lr.push(line)
1584 lr.push(line)
1585 break
1585 break
1586 return lines
1586 return lines
1587
1587
1588 while True:
1588 while True:
1589 line = lr.readline()
1589 line = lr.readline()
1590 if not line:
1590 if not line:
1591 break
1591 break
1592 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1592 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1593 def notheader(line):
1593 def notheader(line):
1594 s = line.split(None, 1)
1594 s = line.split(None, 1)
1595 return not s or s[0] not in ('---', 'diff')
1595 return not s or s[0] not in ('---', 'diff')
1596 header = scanwhile(line, notheader)
1596 header = scanwhile(line, notheader)
1597 fromfile = lr.readline()
1597 fromfile = lr.readline()
1598 if fromfile.startswith('---'):
1598 if fromfile.startswith('---'):
1599 tofile = lr.readline()
1599 tofile = lr.readline()
1600 header += [fromfile, tofile]
1600 header += [fromfile, tofile]
1601 else:
1601 else:
1602 lr.push(fromfile)
1602 lr.push(fromfile)
1603 yield 'file', header
1603 yield 'file', header
1604 elif line[0] == ' ':
1604 elif line[0] == ' ':
1605 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1605 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1606 elif line[0] in '-+':
1606 elif line[0] in '-+':
1607 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1607 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1608 else:
1608 else:
1609 m = lines_re.match(line)
1609 m = lines_re.match(line)
1610 if m:
1610 if m:
1611 yield 'range', m.groups()
1611 yield 'range', m.groups()
1612 else:
1612 else:
1613 yield 'other', line
1613 yield 'other', line
1614
1614
1615 def scangitpatch(lr, firstline):
1615 def scangitpatch(lr, firstline):
1616 """
1616 """
1617 Git patches can emit:
1617 Git patches can emit:
1618 - rename a to b
1618 - rename a to b
1619 - change b
1619 - change b
1620 - copy a to c
1620 - copy a to c
1621 - change c
1621 - change c
1622
1622
1623 We cannot apply this sequence as-is, the renamed 'a' could not be
1623 We cannot apply this sequence as-is, the renamed 'a' could not be
1624 found for it would have been renamed already. And we cannot copy
1624 found for it would have been renamed already. And we cannot copy
1625 from 'b' instead because 'b' would have been changed already. So
1625 from 'b' instead because 'b' would have been changed already. So
1626 we scan the git patch for copy and rename commands so we can
1626 we scan the git patch for copy and rename commands so we can
1627 perform the copies ahead of time.
1627 perform the copies ahead of time.
1628 """
1628 """
1629 pos = 0
1629 pos = 0
1630 try:
1630 try:
1631 pos = lr.fp.tell()
1631 pos = lr.fp.tell()
1632 fp = lr.fp
1632 fp = lr.fp
1633 except IOError:
1633 except IOError:
1634 fp = cStringIO.StringIO(lr.fp.read())
1634 fp = cStringIO.StringIO(lr.fp.read())
1635 gitlr = linereader(fp)
1635 gitlr = linereader(fp)
1636 gitlr.push(firstline)
1636 gitlr.push(firstline)
1637 gitpatches = readgitpatch(gitlr)
1637 gitpatches = readgitpatch(gitlr)
1638 fp.seek(pos)
1638 fp.seek(pos)
1639 return gitpatches
1639 return gitpatches
1640
1640
1641 def iterhunks(fp):
1641 def iterhunks(fp):
1642 """Read a patch and yield the following events:
1642 """Read a patch and yield the following events:
1643 - ("file", afile, bfile, firsthunk): select a new target file.
1643 - ("file", afile, bfile, firsthunk): select a new target file.
1644 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1644 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1645 "file" event.
1645 "file" event.
1646 - ("git", gitchanges): current diff is in git format, gitchanges
1646 - ("git", gitchanges): current diff is in git format, gitchanges
1647 maps filenames to gitpatch records. Unique event.
1647 maps filenames to gitpatch records. Unique event.
1648 """
1648 """
1649 afile = ""
1649 afile = ""
1650 bfile = ""
1650 bfile = ""
1651 state = None
1651 state = None
1652 hunknum = 0
1652 hunknum = 0
1653 emitfile = newfile = False
1653 emitfile = newfile = False
1654 gitpatches = None
1654 gitpatches = None
1655
1655
1656 # our states
1656 # our states
1657 BFILE = 1
1657 BFILE = 1
1658 context = None
1658 context = None
1659 lr = linereader(fp)
1659 lr = linereader(fp)
1660
1660
1661 while True:
1661 while True:
1662 x = lr.readline()
1662 x = lr.readline()
1663 if not x:
1663 if not x:
1664 break
1664 break
1665 if state == BFILE and (
1665 if state == BFILE and (
1666 (not context and x[0] == '@')
1666 (not context and x[0] == '@')
1667 or (context is not False and x.startswith('***************'))
1667 or (context is not False and x.startswith('***************'))
1668 or x.startswith('GIT binary patch')):
1668 or x.startswith('GIT binary patch')):
1669 gp = None
1669 gp = None
1670 if (gitpatches and
1670 if (gitpatches and
1671 gitpatches[-1].ispatching(afile, bfile)):
1671 gitpatches[-1].ispatching(afile, bfile)):
1672 gp = gitpatches.pop()
1672 gp = gitpatches.pop()
1673 if x.startswith('GIT binary patch'):
1673 if x.startswith('GIT binary patch'):
1674 h = binhunk(lr, gp.path)
1674 h = binhunk(lr, gp.path)
1675 else:
1675 else:
1676 if context is None and x.startswith('***************'):
1676 if context is None and x.startswith('***************'):
1677 context = True
1677 context = True
1678 h = hunk(x, hunknum + 1, lr, context)
1678 h = hunk(x, hunknum + 1, lr, context)
1679 hunknum += 1
1679 hunknum += 1
1680 if emitfile:
1680 if emitfile:
1681 emitfile = False
1681 emitfile = False
1682 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1682 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1683 yield 'hunk', h
1683 yield 'hunk', h
1684 elif x.startswith('diff --git a/'):
1684 elif x.startswith('diff --git a/'):
1685 m = gitre.match(x.rstrip(' \r\n'))
1685 m = gitre.match(x.rstrip(' \r\n'))
1686 if not m:
1686 if not m:
1687 continue
1687 continue
1688 if gitpatches is None:
1688 if gitpatches is None:
1689 # scan whole input for git metadata
1689 # scan whole input for git metadata
1690 gitpatches = scangitpatch(lr, x)
1690 gitpatches = scangitpatch(lr, x)
1691 yield 'git', [g.copy() for g in gitpatches
1691 yield 'git', [g.copy() for g in gitpatches
1692 if g.op in ('COPY', 'RENAME')]
1692 if g.op in ('COPY', 'RENAME')]
1693 gitpatches.reverse()
1693 gitpatches.reverse()
1694 afile = 'a/' + m.group(1)
1694 afile = 'a/' + m.group(1)
1695 bfile = 'b/' + m.group(2)
1695 bfile = 'b/' + m.group(2)
1696 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1696 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1697 gp = gitpatches.pop()
1697 gp = gitpatches.pop()
1698 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1698 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1699 if not gitpatches:
1699 if not gitpatches:
1700 raise PatchError(_('failed to synchronize metadata for "%s"')
1700 raise PatchError(_('failed to synchronize metadata for "%s"')
1701 % afile[2:])
1701 % afile[2:])
1702 gp = gitpatches[-1]
1702 gp = gitpatches[-1]
1703 newfile = True
1703 newfile = True
1704 elif x.startswith('---'):
1704 elif x.startswith('---'):
1705 # check for a unified diff
1705 # check for a unified diff
1706 l2 = lr.readline()
1706 l2 = lr.readline()
1707 if not l2.startswith('+++'):
1707 if not l2.startswith('+++'):
1708 lr.push(l2)
1708 lr.push(l2)
1709 continue
1709 continue
1710 newfile = True
1710 newfile = True
1711 context = False
1711 context = False
1712 afile = parsefilename(x)
1712 afile = parsefilename(x)
1713 bfile = parsefilename(l2)
1713 bfile = parsefilename(l2)
1714 elif x.startswith('***'):
1714 elif x.startswith('***'):
1715 # check for a context diff
1715 # check for a context diff
1716 l2 = lr.readline()
1716 l2 = lr.readline()
1717 if not l2.startswith('---'):
1717 if not l2.startswith('---'):
1718 lr.push(l2)
1718 lr.push(l2)
1719 continue
1719 continue
1720 l3 = lr.readline()
1720 l3 = lr.readline()
1721 lr.push(l3)
1721 lr.push(l3)
1722 if not l3.startswith("***************"):
1722 if not l3.startswith("***************"):
1723 lr.push(l2)
1723 lr.push(l2)
1724 continue
1724 continue
1725 newfile = True
1725 newfile = True
1726 context = True
1726 context = True
1727 afile = parsefilename(x)
1727 afile = parsefilename(x)
1728 bfile = parsefilename(l2)
1728 bfile = parsefilename(l2)
1729
1729
1730 if newfile:
1730 if newfile:
1731 newfile = False
1731 newfile = False
1732 emitfile = True
1732 emitfile = True
1733 state = BFILE
1733 state = BFILE
1734 hunknum = 0
1734 hunknum = 0
1735
1735
1736 while gitpatches:
1736 while gitpatches:
1737 gp = gitpatches.pop()
1737 gp = gitpatches.pop()
1738 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1738 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1739
1739
1740 def applybindelta(binchunk, data):
1740 def applybindelta(binchunk, data):
1741 """Apply a binary delta hunk
1741 """Apply a binary delta hunk
1742 The algorithm used is the algorithm from git's patch-delta.c
1742 The algorithm used is the algorithm from git's patch-delta.c
1743 """
1743 """
1744 def deltahead(binchunk):
1744 def deltahead(binchunk):
1745 i = 0
1745 i = 0
1746 for c in binchunk:
1746 for c in binchunk:
1747 i += 1
1747 i += 1
1748 if not (ord(c) & 0x80):
1748 if not (ord(c) & 0x80):
1749 return i
1749 return i
1750 return i
1750 return i
1751 out = ""
1751 out = ""
1752 s = deltahead(binchunk)
1752 s = deltahead(binchunk)
1753 binchunk = binchunk[s:]
1753 binchunk = binchunk[s:]
1754 s = deltahead(binchunk)
1754 s = deltahead(binchunk)
1755 binchunk = binchunk[s:]
1755 binchunk = binchunk[s:]
1756 i = 0
1756 i = 0
1757 while i < len(binchunk):
1757 while i < len(binchunk):
1758 cmd = ord(binchunk[i])
1758 cmd = ord(binchunk[i])
1759 i += 1
1759 i += 1
1760 if (cmd & 0x80):
1760 if (cmd & 0x80):
1761 offset = 0
1761 offset = 0
1762 size = 0
1762 size = 0
1763 if (cmd & 0x01):
1763 if (cmd & 0x01):
1764 offset = ord(binchunk[i])
1764 offset = ord(binchunk[i])
1765 i += 1
1765 i += 1
1766 if (cmd & 0x02):
1766 if (cmd & 0x02):
1767 offset |= ord(binchunk[i]) << 8
1767 offset |= ord(binchunk[i]) << 8
1768 i += 1
1768 i += 1
1769 if (cmd & 0x04):
1769 if (cmd & 0x04):
1770 offset |= ord(binchunk[i]) << 16
1770 offset |= ord(binchunk[i]) << 16
1771 i += 1
1771 i += 1
1772 if (cmd & 0x08):
1772 if (cmd & 0x08):
1773 offset |= ord(binchunk[i]) << 24
1773 offset |= ord(binchunk[i]) << 24
1774 i += 1
1774 i += 1
1775 if (cmd & 0x10):
1775 if (cmd & 0x10):
1776 size = ord(binchunk[i])
1776 size = ord(binchunk[i])
1777 i += 1
1777 i += 1
1778 if (cmd & 0x20):
1778 if (cmd & 0x20):
1779 size |= ord(binchunk[i]) << 8
1779 size |= ord(binchunk[i]) << 8
1780 i += 1
1780 i += 1
1781 if (cmd & 0x40):
1781 if (cmd & 0x40):
1782 size |= ord(binchunk[i]) << 16
1782 size |= ord(binchunk[i]) << 16
1783 i += 1
1783 i += 1
1784 if size == 0:
1784 if size == 0:
1785 size = 0x10000
1785 size = 0x10000
1786 offset_end = offset + size
1786 offset_end = offset + size
1787 out += data[offset:offset_end]
1787 out += data[offset:offset_end]
1788 elif cmd != 0:
1788 elif cmd != 0:
1789 offset_end = i + cmd
1789 offset_end = i + cmd
1790 out += binchunk[i:offset_end]
1790 out += binchunk[i:offset_end]
1791 i += cmd
1791 i += cmd
1792 else:
1792 else:
1793 raise PatchError(_('unexpected delta opcode 0'))
1793 raise PatchError(_('unexpected delta opcode 0'))
1794 return out
1794 return out
1795
1795
1796 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1796 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1797 """Reads a patch from fp and tries to apply it.
1797 """Reads a patch from fp and tries to apply it.
1798
1798
1799 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1799 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1800 there was any fuzz.
1800 there was any fuzz.
1801
1801
1802 If 'eolmode' is 'strict', the patch content and patched file are
1802 If 'eolmode' is 'strict', the patch content and patched file are
1803 read in binary mode. Otherwise, line endings are ignored when
1803 read in binary mode. Otherwise, line endings are ignored when
1804 patching then normalized according to 'eolmode'.
1804 patching then normalized according to 'eolmode'.
1805 """
1805 """
1806 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1806 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1807 prefix=prefix, eolmode=eolmode)
1807 prefix=prefix, eolmode=eolmode)
1808
1808
1809 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1809 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1810 eolmode='strict'):
1810 eolmode='strict'):
1811
1811
1812 if prefix:
1812 if prefix:
1813 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1813 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1814 prefix)
1814 prefix)
1815 if prefix != '':
1815 if prefix != '':
1816 prefix += '/'
1816 prefix += '/'
1817 def pstrip(p):
1817 def pstrip(p):
1818 return pathtransform(p, strip - 1, prefix)[1]
1818 return pathtransform(p, strip - 1, prefix)[1]
1819
1819
1820 rejects = 0
1820 rejects = 0
1821 err = 0
1821 err = 0
1822 current_file = None
1822 current_file = None
1823
1823
1824 for state, values in iterhunks(fp):
1824 for state, values in iterhunks(fp):
1825 if state == 'hunk':
1825 if state == 'hunk':
1826 if not current_file:
1826 if not current_file:
1827 continue
1827 continue
1828 ret = current_file.apply(values)
1828 ret = current_file.apply(values)
1829 if ret > 0:
1829 if ret > 0:
1830 err = 1
1830 err = 1
1831 elif state == 'file':
1831 elif state == 'file':
1832 if current_file:
1832 if current_file:
1833 rejects += current_file.close()
1833 rejects += current_file.close()
1834 current_file = None
1834 current_file = None
1835 afile, bfile, first_hunk, gp = values
1835 afile, bfile, first_hunk, gp = values
1836 if gp:
1836 if gp:
1837 gp.path = pstrip(gp.path)
1837 gp.path = pstrip(gp.path)
1838 if gp.oldpath:
1838 if gp.oldpath:
1839 gp.oldpath = pstrip(gp.oldpath)
1839 gp.oldpath = pstrip(gp.oldpath)
1840 else:
1840 else:
1841 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1841 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1842 prefix)
1842 prefix)
1843 if gp.op == 'RENAME':
1843 if gp.op == 'RENAME':
1844 backend.unlink(gp.oldpath)
1844 backend.unlink(gp.oldpath)
1845 if not first_hunk:
1845 if not first_hunk:
1846 if gp.op == 'DELETE':
1846 if gp.op == 'DELETE':
1847 backend.unlink(gp.path)
1847 backend.unlink(gp.path)
1848 continue
1848 continue
1849 data, mode = None, None
1849 data, mode = None, None
1850 if gp.op in ('RENAME', 'COPY'):
1850 if gp.op in ('RENAME', 'COPY'):
1851 data, mode = store.getfile(gp.oldpath)[:2]
1851 data, mode = store.getfile(gp.oldpath)[:2]
1852 # FIXME: failing getfile has never been handled here
1852 # FIXME: failing getfile has never been handled here
1853 assert data is not None
1853 assert data is not None
1854 if gp.mode:
1854 if gp.mode:
1855 mode = gp.mode
1855 mode = gp.mode
1856 if gp.op == 'ADD':
1856 if gp.op == 'ADD':
1857 # Added files without content have no hunk and
1857 # Added files without content have no hunk and
1858 # must be created
1858 # must be created
1859 data = ''
1859 data = ''
1860 if data or mode:
1860 if data or mode:
1861 if (gp.op in ('ADD', 'RENAME', 'COPY')
1861 if (gp.op in ('ADD', 'RENAME', 'COPY')
1862 and backend.exists(gp.path)):
1862 and backend.exists(gp.path)):
1863 raise PatchError(_("cannot create %s: destination "
1863 raise PatchError(_("cannot create %s: destination "
1864 "already exists") % gp.path)
1864 "already exists") % gp.path)
1865 backend.setfile(gp.path, data, mode, gp.oldpath)
1865 backend.setfile(gp.path, data, mode, gp.oldpath)
1866 continue
1866 continue
1867 try:
1867 try:
1868 current_file = patcher(ui, gp, backend, store,
1868 current_file = patcher(ui, gp, backend, store,
1869 eolmode=eolmode)
1869 eolmode=eolmode)
1870 except PatchError, inst:
1870 except PatchError, inst:
1871 ui.warn(str(inst) + '\n')
1871 ui.warn(str(inst) + '\n')
1872 current_file = None
1872 current_file = None
1873 rejects += 1
1873 rejects += 1
1874 continue
1874 continue
1875 elif state == 'git':
1875 elif state == 'git':
1876 for gp in values:
1876 for gp in values:
1877 path = pstrip(gp.oldpath)
1877 path = pstrip(gp.oldpath)
1878 data, mode = backend.getfile(path)
1878 data, mode = backend.getfile(path)
1879 if data is None:
1879 if data is None:
1880 # The error ignored here will trigger a getfile()
1880 # The error ignored here will trigger a getfile()
1881 # error in a place more appropriate for error
1881 # error in a place more appropriate for error
1882 # handling, and will not interrupt the patching
1882 # handling, and will not interrupt the patching
1883 # process.
1883 # process.
1884 pass
1884 pass
1885 else:
1885 else:
1886 store.setfile(path, data, mode)
1886 store.setfile(path, data, mode)
1887 else:
1887 else:
1888 raise util.Abort(_('unsupported parser state: %s') % state)
1888 raise util.Abort(_('unsupported parser state: %s') % state)
1889
1889
1890 if current_file:
1890 if current_file:
1891 rejects += current_file.close()
1891 rejects += current_file.close()
1892
1892
1893 if rejects:
1893 if rejects:
1894 return -1
1894 return -1
1895 return err
1895 return err
1896
1896
1897 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1897 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1898 similarity):
1898 similarity):
1899 """use <patcher> to apply <patchname> to the working directory.
1899 """use <patcher> to apply <patchname> to the working directory.
1900 returns whether patch was applied with fuzz factor."""
1900 returns whether patch was applied with fuzz factor."""
1901
1901
1902 fuzz = False
1902 fuzz = False
1903 args = []
1903 args = []
1904 cwd = repo.root
1904 cwd = repo.root
1905 if cwd:
1905 if cwd:
1906 args.append('-d %s' % util.shellquote(cwd))
1906 args.append('-d %s' % util.shellquote(cwd))
1907 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1907 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1908 util.shellquote(patchname)))
1908 util.shellquote(patchname)))
1909 try:
1909 try:
1910 for line in fp:
1910 for line in fp:
1911 line = line.rstrip()
1911 line = line.rstrip()
1912 ui.note(line + '\n')
1912 ui.note(line + '\n')
1913 if line.startswith('patching file '):
1913 if line.startswith('patching file '):
1914 pf = util.parsepatchoutput(line)
1914 pf = util.parsepatchoutput(line)
1915 printed_file = False
1915 printed_file = False
1916 files.add(pf)
1916 files.add(pf)
1917 elif line.find('with fuzz') >= 0:
1917 elif line.find('with fuzz') >= 0:
1918 fuzz = True
1918 fuzz = True
1919 if not printed_file:
1919 if not printed_file:
1920 ui.warn(pf + '\n')
1920 ui.warn(pf + '\n')
1921 printed_file = True
1921 printed_file = True
1922 ui.warn(line + '\n')
1922 ui.warn(line + '\n')
1923 elif line.find('saving rejects to file') >= 0:
1923 elif line.find('saving rejects to file') >= 0:
1924 ui.warn(line + '\n')
1924 ui.warn(line + '\n')
1925 elif line.find('FAILED') >= 0:
1925 elif line.find('FAILED') >= 0:
1926 if not printed_file:
1926 if not printed_file:
1927 ui.warn(pf + '\n')
1927 ui.warn(pf + '\n')
1928 printed_file = True
1928 printed_file = True
1929 ui.warn(line + '\n')
1929 ui.warn(line + '\n')
1930 finally:
1930 finally:
1931 if files:
1931 if files:
1932 scmutil.marktouched(repo, files, similarity)
1932 scmutil.marktouched(repo, files, similarity)
1933 code = fp.close()
1933 code = fp.close()
1934 if code:
1934 if code:
1935 raise PatchError(_("patch command failed: %s") %
1935 raise PatchError(_("patch command failed: %s") %
1936 util.explainexit(code)[0])
1936 util.explainexit(code)[0])
1937 return fuzz
1937 return fuzz
1938
1938
1939 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1939 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1940 eolmode='strict'):
1940 eolmode='strict'):
1941 if files is None:
1941 if files is None:
1942 files = set()
1942 files = set()
1943 if eolmode is None:
1943 if eolmode is None:
1944 eolmode = ui.config('patch', 'eol', 'strict')
1944 eolmode = ui.config('patch', 'eol', 'strict')
1945 if eolmode.lower() not in eolmodes:
1945 if eolmode.lower() not in eolmodes:
1946 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1946 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1947 eolmode = eolmode.lower()
1947 eolmode = eolmode.lower()
1948
1948
1949 store = filestore()
1949 store = filestore()
1950 try:
1950 try:
1951 fp = open(patchobj, 'rb')
1951 fp = open(patchobj, 'rb')
1952 except TypeError:
1952 except TypeError:
1953 fp = patchobj
1953 fp = patchobj
1954 try:
1954 try:
1955 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1955 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1956 eolmode=eolmode)
1956 eolmode=eolmode)
1957 finally:
1957 finally:
1958 if fp != patchobj:
1958 if fp != patchobj:
1959 fp.close()
1959 fp.close()
1960 files.update(backend.close())
1960 files.update(backend.close())
1961 store.close()
1961 store.close()
1962 if ret < 0:
1962 if ret < 0:
1963 raise PatchError(_('patch failed to apply'))
1963 raise PatchError(_('patch failed to apply'))
1964 return ret > 0
1964 return ret > 0
1965
1965
1966 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1966 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1967 eolmode='strict', similarity=0):
1967 eolmode='strict', similarity=0):
1968 """use builtin patch to apply <patchobj> to the working directory.
1968 """use builtin patch to apply <patchobj> to the working directory.
1969 returns whether patch was applied with fuzz factor."""
1969 returns whether patch was applied with fuzz factor."""
1970 backend = workingbackend(ui, repo, similarity)
1970 backend = workingbackend(ui, repo, similarity)
1971 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1971 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1972
1972
1973 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1973 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1974 eolmode='strict'):
1974 eolmode='strict'):
1975 backend = repobackend(ui, repo, ctx, store)
1975 backend = repobackend(ui, repo, ctx, store)
1976 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1976 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1977
1977
1978 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1978 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1979 similarity=0):
1979 similarity=0):
1980 """Apply <patchname> to the working directory.
1980 """Apply <patchname> to the working directory.
1981
1981
1982 'eolmode' specifies how end of lines should be handled. It can be:
1982 'eolmode' specifies how end of lines should be handled. It can be:
1983 - 'strict': inputs are read in binary mode, EOLs are preserved
1983 - 'strict': inputs are read in binary mode, EOLs are preserved
1984 - 'crlf': EOLs are ignored when patching and reset to CRLF
1984 - 'crlf': EOLs are ignored when patching and reset to CRLF
1985 - 'lf': EOLs are ignored when patching and reset to LF
1985 - 'lf': EOLs are ignored when patching and reset to LF
1986 - None: get it from user settings, default to 'strict'
1986 - None: get it from user settings, default to 'strict'
1987 'eolmode' is ignored when using an external patcher program.
1987 'eolmode' is ignored when using an external patcher program.
1988
1988
1989 Returns whether patch was applied with fuzz factor.
1989 Returns whether patch was applied with fuzz factor.
1990 """
1990 """
1991 patcher = ui.config('ui', 'patch')
1991 patcher = ui.config('ui', 'patch')
1992 if files is None:
1992 if files is None:
1993 files = set()
1993 files = set()
1994 if patcher:
1994 if patcher:
1995 return _externalpatch(ui, repo, patcher, patchname, strip,
1995 return _externalpatch(ui, repo, patcher, patchname, strip,
1996 files, similarity)
1996 files, similarity)
1997 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1997 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1998 similarity)
1998 similarity)
1999
1999
2000 def changedfiles(ui, repo, patchpath, strip=1):
2000 def changedfiles(ui, repo, patchpath, strip=1):
2001 backend = fsbackend(ui, repo.root)
2001 backend = fsbackend(ui, repo.root)
2002 fp = open(patchpath, 'rb')
2002 fp = open(patchpath, 'rb')
2003 try:
2003 try:
2004 changed = set()
2004 changed = set()
2005 for state, values in iterhunks(fp):
2005 for state, values in iterhunks(fp):
2006 if state == 'file':
2006 if state == 'file':
2007 afile, bfile, first_hunk, gp = values
2007 afile, bfile, first_hunk, gp = values
2008 if gp:
2008 if gp:
2009 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2009 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2010 if gp.oldpath:
2010 if gp.oldpath:
2011 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2011 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2012 else:
2012 else:
2013 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2013 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2014 '')
2014 '')
2015 changed.add(gp.path)
2015 changed.add(gp.path)
2016 if gp.op == 'RENAME':
2016 if gp.op == 'RENAME':
2017 changed.add(gp.oldpath)
2017 changed.add(gp.oldpath)
2018 elif state not in ('hunk', 'git'):
2018 elif state not in ('hunk', 'git'):
2019 raise util.Abort(_('unsupported parser state: %s') % state)
2019 raise util.Abort(_('unsupported parser state: %s') % state)
2020 return changed
2020 return changed
2021 finally:
2021 finally:
2022 fp.close()
2022 fp.close()
2023
2023
2024 class GitDiffRequired(Exception):
2024 class GitDiffRequired(Exception):
2025 pass
2025 pass
2026
2026
2027 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2027 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2028 '''return diffopts with all features supported and parsed'''
2028 '''return diffopts with all features supported and parsed'''
2029 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2029 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2030 git=True, whitespace=True, formatchanging=True)
2030 git=True, whitespace=True, formatchanging=True)
2031
2031
2032 diffopts = diffallopts
2032 diffopts = diffallopts
2033
2033
2034 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2034 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2035 whitespace=False, formatchanging=False):
2035 whitespace=False, formatchanging=False):
2036 '''return diffopts with only opted-in features parsed
2036 '''return diffopts with only opted-in features parsed
2037
2037
2038 Features:
2038 Features:
2039 - git: git-style diffs
2039 - git: git-style diffs
2040 - whitespace: whitespace options like ignoreblanklines and ignorews
2040 - whitespace: whitespace options like ignoreblanklines and ignorews
2041 - formatchanging: options that will likely break or cause correctness issues
2041 - formatchanging: options that will likely break or cause correctness issues
2042 with most diff parsers
2042 with most diff parsers
2043 '''
2043 '''
2044 def get(key, name=None, getter=ui.configbool, forceplain=None):
2044 def get(key, name=None, getter=ui.configbool, forceplain=None):
2045 if opts:
2045 if opts:
2046 v = opts.get(key)
2046 v = opts.get(key)
2047 if v:
2047 if v:
2048 return v
2048 return v
2049 if forceplain is not None and ui.plain():
2049 if forceplain is not None and ui.plain():
2050 return forceplain
2050 return forceplain
2051 return getter(section, name or key, None, untrusted=untrusted)
2051 return getter(section, name or key, None, untrusted=untrusted)
2052
2052
2053 # core options, expected to be understood by every diff parser
2053 # core options, expected to be understood by every diff parser
2054 buildopts = {
2054 buildopts = {
2055 'nodates': get('nodates'),
2055 'nodates': get('nodates'),
2056 'showfunc': get('show_function', 'showfunc'),
2056 'showfunc': get('show_function', 'showfunc'),
2057 'context': get('unified', getter=ui.config),
2057 'context': get('unified', getter=ui.config),
2058 }
2058 }
2059
2059
2060 if git:
2060 if git:
2061 buildopts['git'] = get('git')
2061 buildopts['git'] = get('git')
2062 if whitespace:
2062 if whitespace:
2063 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2063 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2064 buildopts['ignorewsamount'] = get('ignore_space_change',
2064 buildopts['ignorewsamount'] = get('ignore_space_change',
2065 'ignorewsamount')
2065 'ignorewsamount')
2066 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2066 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2067 'ignoreblanklines')
2067 'ignoreblanklines')
2068 if formatchanging:
2068 if formatchanging:
2069 buildopts['text'] = opts and opts.get('text')
2069 buildopts['text'] = opts and opts.get('text')
2070 buildopts['nobinary'] = get('nobinary')
2070 buildopts['nobinary'] = get('nobinary')
2071 buildopts['noprefix'] = get('noprefix', forceplain=False)
2071 buildopts['noprefix'] = get('noprefix', forceplain=False)
2072
2072
2073 return mdiff.diffopts(**buildopts)
2073 return mdiff.diffopts(**buildopts)
2074
2074
2075 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2075 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2076 losedatafn=None, prefix='', relroot=''):
2076 losedatafn=None, prefix='', relroot=''):
2077 '''yields diff of changes to files between two nodes, or node and
2077 '''yields diff of changes to files between two nodes, or node and
2078 working directory.
2078 working directory.
2079
2079
2080 if node1 is None, use first dirstate parent instead.
2080 if node1 is None, use first dirstate parent instead.
2081 if node2 is None, compare node1 with working directory.
2081 if node2 is None, compare node1 with working directory.
2082
2082
2083 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2083 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2084 every time some change cannot be represented with the current
2084 every time some change cannot be represented with the current
2085 patch format. Return False to upgrade to git patch format, True to
2085 patch format. Return False to upgrade to git patch format, True to
2086 accept the loss or raise an exception to abort the diff. It is
2086 accept the loss or raise an exception to abort the diff. It is
2087 called with the name of current file being diffed as 'fn'. If set
2087 called with the name of current file being diffed as 'fn'. If set
2088 to None, patches will always be upgraded to git format when
2088 to None, patches will always be upgraded to git format when
2089 necessary.
2089 necessary.
2090
2090
2091 prefix is a filename prefix that is prepended to all filenames on
2091 prefix is a filename prefix that is prepended to all filenames on
2092 display (used for subrepos).
2092 display (used for subrepos).
2093
2093
2094 relroot, if not empty, must be normalized with a trailing /. Any match
2094 relroot, if not empty, must be normalized with a trailing /. Any match
2095 patterns that fall outside it will be ignored.'''
2095 patterns that fall outside it will be ignored.'''
2096
2096
2097 if opts is None:
2097 if opts is None:
2098 opts = mdiff.defaultopts
2098 opts = mdiff.defaultopts
2099
2099
2100 if not node1 and not node2:
2100 if not node1 and not node2:
2101 node1 = repo.dirstate.p1()
2101 node1 = repo.dirstate.p1()
2102
2102
2103 def lrugetfilectx():
2103 def lrugetfilectx():
2104 cache = {}
2104 cache = {}
2105 order = collections.deque()
2105 order = collections.deque()
2106 def getfilectx(f, ctx):
2106 def getfilectx(f, ctx):
2107 fctx = ctx.filectx(f, filelog=cache.get(f))
2107 fctx = ctx.filectx(f, filelog=cache.get(f))
2108 if f not in cache:
2108 if f not in cache:
2109 if len(cache) > 20:
2109 if len(cache) > 20:
2110 del cache[order.popleft()]
2110 del cache[order.popleft()]
2111 cache[f] = fctx.filelog()
2111 cache[f] = fctx.filelog()
2112 else:
2112 else:
2113 order.remove(f)
2113 order.remove(f)
2114 order.append(f)
2114 order.append(f)
2115 return fctx
2115 return fctx
2116 return getfilectx
2116 return getfilectx
2117 getfilectx = lrugetfilectx()
2117 getfilectx = lrugetfilectx()
2118
2118
2119 ctx1 = repo[node1]
2119 ctx1 = repo[node1]
2120 ctx2 = repo[node2]
2120 ctx2 = repo[node2]
2121
2121
2122 relfiltered = False
2122 relfiltered = False
2123 if relroot != '' and match.always():
2123 if relroot != '' and match.always():
2124 # as a special case, create a new matcher with just the relroot
2124 # as a special case, create a new matcher with just the relroot
2125 pats = [relroot]
2125 pats = [relroot]
2126 match = scmutil.match(ctx2, pats, default='path')
2126 match = scmutil.match(ctx2, pats, default='path')
2127 relfiltered = True
2127 relfiltered = True
2128
2128
2129 if not changes:
2129 if not changes:
2130 changes = repo.status(ctx1, ctx2, match=match)
2130 changes = repo.status(ctx1, ctx2, match=match)
2131 modified, added, removed = changes[:3]
2131 modified, added, removed = changes[:3]
2132
2132
2133 if not modified and not added and not removed:
2133 if not modified and not added and not removed:
2134 return []
2134 return []
2135
2135
2136 if repo.ui.debugflag:
2136 if repo.ui.debugflag:
2137 hexfunc = hex
2137 hexfunc = hex
2138 else:
2138 else:
2139 hexfunc = short
2139 hexfunc = short
2140 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2140 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2141
2141
2142 copy = {}
2142 copy = {}
2143 if opts.git or opts.upgrade:
2143 if opts.git or opts.upgrade:
2144 copy = copies.pathcopies(ctx1, ctx2, match=match)
2144 copy = copies.pathcopies(ctx1, ctx2, match=match)
2145
2145
2146 if relroot is not None:
2146 if relroot is not None:
2147 if not relfiltered:
2147 if not relfiltered:
2148 # XXX this would ideally be done in the matcher, but that is
2148 # XXX this would ideally be done in the matcher, but that is
2149 # generally meant to 'or' patterns, not 'and' them. In this case we
2149 # generally meant to 'or' patterns, not 'and' them. In this case we
2150 # need to 'and' all the patterns from the matcher with relroot.
2150 # need to 'and' all the patterns from the matcher with relroot.
2151 def filterrel(l):
2151 def filterrel(l):
2152 return [f for f in l if f.startswith(relroot)]
2152 return [f for f in l if f.startswith(relroot)]
2153 modified = filterrel(modified)
2153 modified = filterrel(modified)
2154 added = filterrel(added)
2154 added = filterrel(added)
2155 removed = filterrel(removed)
2155 removed = filterrel(removed)
2156 relfiltered = True
2156 relfiltered = True
2157 # filter out copies where either side isn't inside the relative root
2157 # filter out copies where either side isn't inside the relative root
2158 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2158 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2159 if dst.startswith(relroot)
2159 if dst.startswith(relroot)
2160 and src.startswith(relroot)))
2160 and src.startswith(relroot)))
2161
2161
2162 def difffn(opts, losedata):
2162 def difffn(opts, losedata):
2163 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2163 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2164 copy, getfilectx, opts, losedata, prefix, relroot)
2164 copy, getfilectx, opts, losedata, prefix, relroot)
2165 if opts.upgrade and not opts.git:
2165 if opts.upgrade and not opts.git:
2166 try:
2166 try:
2167 def losedata(fn):
2167 def losedata(fn):
2168 if not losedatafn or not losedatafn(fn=fn):
2168 if not losedatafn or not losedatafn(fn=fn):
2169 raise GitDiffRequired
2169 raise GitDiffRequired
2170 # Buffer the whole output until we are sure it can be generated
2170 # Buffer the whole output until we are sure it can be generated
2171 return list(difffn(opts.copy(git=False), losedata))
2171 return list(difffn(opts.copy(git=False), losedata))
2172 except GitDiffRequired:
2172 except GitDiffRequired:
2173 return difffn(opts.copy(git=True), None)
2173 return difffn(opts.copy(git=True), None)
2174 else:
2174 else:
2175 return difffn(opts, None)
2175 return difffn(opts, None)
2176
2176
2177 def difflabel(func, *args, **kw):
2177 def difflabel(func, *args, **kw):
2178 '''yields 2-tuples of (output, label) based on the output of func()'''
2178 '''yields 2-tuples of (output, label) based on the output of func()'''
2179 headprefixes = [('diff', 'diff.diffline'),
2179 headprefixes = [('diff', 'diff.diffline'),
2180 ('copy', 'diff.extended'),
2180 ('copy', 'diff.extended'),
2181 ('rename', 'diff.extended'),
2181 ('rename', 'diff.extended'),
2182 ('old', 'diff.extended'),
2182 ('old', 'diff.extended'),
2183 ('new', 'diff.extended'),
2183 ('new', 'diff.extended'),
2184 ('deleted', 'diff.extended'),
2184 ('deleted', 'diff.extended'),
2185 ('---', 'diff.file_a'),
2185 ('---', 'diff.file_a'),
2186 ('+++', 'diff.file_b')]
2186 ('+++', 'diff.file_b')]
2187 textprefixes = [('@', 'diff.hunk'),
2187 textprefixes = [('@', 'diff.hunk'),
2188 ('-', 'diff.deleted'),
2188 ('-', 'diff.deleted'),
2189 ('+', 'diff.inserted')]
2189 ('+', 'diff.inserted')]
2190 head = False
2190 head = False
2191 for chunk in func(*args, **kw):
2191 for chunk in func(*args, **kw):
2192 lines = chunk.split('\n')
2192 lines = chunk.split('\n')
2193 for i, line in enumerate(lines):
2193 for i, line in enumerate(lines):
2194 if i != 0:
2194 if i != 0:
2195 yield ('\n', '')
2195 yield ('\n', '')
2196 if head:
2196 if head:
2197 if line.startswith('@'):
2197 if line.startswith('@'):
2198 head = False
2198 head = False
2199 else:
2199 else:
2200 if line and line[0] not in ' +-@\\':
2200 if line and line[0] not in ' +-@\\':
2201 head = True
2201 head = True
2202 stripline = line
2202 stripline = line
2203 diffline = False
2203 diffline = False
2204 if not head and line and line[0] in '+-':
2204 if not head and line and line[0] in '+-':
2205 # highlight tabs and trailing whitespace, but only in
2205 # highlight tabs and trailing whitespace, but only in
2206 # changed lines
2206 # changed lines
2207 stripline = line.rstrip()
2207 stripline = line.rstrip()
2208 diffline = True
2208 diffline = True
2209
2209
2210 prefixes = textprefixes
2210 prefixes = textprefixes
2211 if head:
2211 if head:
2212 prefixes = headprefixes
2212 prefixes = headprefixes
2213 for prefix, label in prefixes:
2213 for prefix, label in prefixes:
2214 if stripline.startswith(prefix):
2214 if stripline.startswith(prefix):
2215 if diffline:
2215 if diffline:
2216 for token in tabsplitter.findall(stripline):
2216 for token in tabsplitter.findall(stripline):
2217 if '\t' == token[0]:
2217 if '\t' == token[0]:
2218 yield (token, 'diff.tab')
2218 yield (token, 'diff.tab')
2219 else:
2219 else:
2220 yield (token, label)
2220 yield (token, label)
2221 else:
2221 else:
2222 yield (stripline, label)
2222 yield (stripline, label)
2223 break
2223 break
2224 else:
2224 else:
2225 yield (line, '')
2225 yield (line, '')
2226 if line != stripline:
2226 if line != stripline:
2227 yield (line[len(stripline):], 'diff.trailingwhitespace')
2227 yield (line[len(stripline):], 'diff.trailingwhitespace')
2228
2228
2229 def diffui(*args, **kw):
2229 def diffui(*args, **kw):
2230 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2230 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2231 return difflabel(diff, *args, **kw)
2231 return difflabel(diff, *args, **kw)
2232
2232
2233 def _filepairs(ctx1, modified, added, removed, copy, opts):
2233 def _filepairs(ctx1, modified, added, removed, copy, opts):
2234 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2234 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2235 before and f2 is the the name after. For added files, f1 will be None,
2235 before and f2 is the the name after. For added files, f1 will be None,
2236 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2236 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2237 or 'rename' (the latter two only if opts.git is set).'''
2237 or 'rename' (the latter two only if opts.git is set).'''
2238 gone = set()
2238 gone = set()
2239
2239
2240 copyto = dict([(v, k) for k, v in copy.items()])
2240 copyto = dict([(v, k) for k, v in copy.items()])
2241
2241
2242 addedset, removedset = set(added), set(removed)
2242 addedset, removedset = set(added), set(removed)
2243 # Fix up added, since merged-in additions appear as
2243 # Fix up added, since merged-in additions appear as
2244 # modifications during merges
2244 # modifications during merges
2245 for f in modified:
2245 for f in modified:
2246 if f not in ctx1:
2246 if f not in ctx1:
2247 addedset.add(f)
2247 addedset.add(f)
2248
2248
2249 for f in sorted(modified + added + removed):
2249 for f in sorted(modified + added + removed):
2250 copyop = None
2250 copyop = None
2251 f1, f2 = f, f
2251 f1, f2 = f, f
2252 if f in addedset:
2252 if f in addedset:
2253 f1 = None
2253 f1 = None
2254 if f in copy:
2254 if f in copy:
2255 if opts.git:
2255 if opts.git:
2256 f1 = copy[f]
2256 f1 = copy[f]
2257 if f1 in removedset and f1 not in gone:
2257 if f1 in removedset and f1 not in gone:
2258 copyop = 'rename'
2258 copyop = 'rename'
2259 gone.add(f1)
2259 gone.add(f1)
2260 else:
2260 else:
2261 copyop = 'copy'
2261 copyop = 'copy'
2262 elif f in removedset:
2262 elif f in removedset:
2263 f2 = None
2263 f2 = None
2264 if opts.git:
2264 if opts.git:
2265 # have we already reported a copy above?
2265 # have we already reported a copy above?
2266 if (f in copyto and copyto[f] in addedset
2266 if (f in copyto and copyto[f] in addedset
2267 and copy[copyto[f]] == f):
2267 and copy[copyto[f]] == f):
2268 continue
2268 continue
2269 yield f1, f2, copyop
2269 yield f1, f2, copyop
2270
2270
2271 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2271 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2272 copy, getfilectx, opts, losedatafn, prefix, relroot):
2272 copy, getfilectx, opts, losedatafn, prefix, relroot):
2273 '''given input data, generate a diff and yield it in blocks
2273 '''given input data, generate a diff and yield it in blocks
2274
2274
2275 If generating a diff would lose data like flags or binary data and
2275 If generating a diff would lose data like flags or binary data and
2276 losedatafn is not None, it will be called.
2276 losedatafn is not None, it will be called.
2277
2277
2278 relroot is removed and prefix is added to every path in the diff output.
2278 relroot is removed and prefix is added to every path in the diff output.
2279
2279
2280 If relroot is not empty, this function expects every path in modified,
2280 If relroot is not empty, this function expects every path in modified,
2281 added, removed and copy to start with it.'''
2281 added, removed and copy to start with it.'''
2282
2282
2283 def gitindex(text):
2283 def gitindex(text):
2284 if not text:
2284 if not text:
2285 text = ""
2285 text = ""
2286 l = len(text)
2286 l = len(text)
2287 s = util.sha1('blob %d\0' % l)
2287 s = util.sha1('blob %d\0' % l)
2288 s.update(text)
2288 s.update(text)
2289 return s.hexdigest()
2289 return s.hexdigest()
2290
2290
2291 if opts.noprefix:
2291 if opts.noprefix:
2292 aprefix = bprefix = ''
2292 aprefix = bprefix = ''
2293 else:
2293 else:
2294 aprefix = 'a/'
2294 aprefix = 'a/'
2295 bprefix = 'b/'
2295 bprefix = 'b/'
2296
2296
2297 def diffline(f, revs):
2297 def diffline(f, revs):
2298 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2298 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2299 return 'diff %s %s' % (revinfo, f)
2299 return 'diff %s %s' % (revinfo, f)
2300
2300
2301 date1 = util.datestr(ctx1.date())
2301 date1 = util.datestr(ctx1.date())
2302 date2 = util.datestr(ctx2.date())
2302 date2 = util.datestr(ctx2.date())
2303
2303
2304 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2304 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2305
2305
2306 if relroot != '' and (repo.ui.configbool('devel', 'all')
2306 if relroot != '' and (repo.ui.configbool('devel', 'all')
2307 or repo.ui.configbool('devel', 'check-relroot')):
2307 or repo.ui.configbool('devel', 'check-relroot')):
2308 for f in modified + added + removed + copy.keys() + copy.values():
2308 for f in modified + added + removed + copy.keys() + copy.values():
2309 if f is not None and not f.startswith(relroot):
2309 if f is not None and not f.startswith(relroot):
2310 raise AssertionError(
2310 raise AssertionError(
2311 "file %s doesn't start with relroot %s" % (f, relroot))
2311 "file %s doesn't start with relroot %s" % (f, relroot))
2312
2312
2313 for f1, f2, copyop in _filepairs(
2313 for f1, f2, copyop in _filepairs(
2314 ctx1, modified, added, removed, copy, opts):
2314 ctx1, modified, added, removed, copy, opts):
2315 content1 = None
2315 content1 = None
2316 content2 = None
2316 content2 = None
2317 flag1 = None
2317 flag1 = None
2318 flag2 = None
2318 flag2 = None
2319 if f1:
2319 if f1:
2320 content1 = getfilectx(f1, ctx1).data()
2320 content1 = getfilectx(f1, ctx1).data()
2321 if opts.git or losedatafn:
2321 if opts.git or losedatafn:
2322 flag1 = ctx1.flags(f1)
2322 flag1 = ctx1.flags(f1)
2323 if f2:
2323 if f2:
2324 content2 = getfilectx(f2, ctx2).data()
2324 content2 = getfilectx(f2, ctx2).data()
2325 if opts.git or losedatafn:
2325 if opts.git or losedatafn:
2326 flag2 = ctx2.flags(f2)
2326 flag2 = ctx2.flags(f2)
2327 binary = False
2327 binary = False
2328 if opts.git or losedatafn:
2328 if opts.git or losedatafn:
2329 binary = util.binary(content1) or util.binary(content2)
2329 binary = util.binary(content1) or util.binary(content2)
2330
2330
2331 if losedatafn and not opts.git:
2331 if losedatafn and not opts.git:
2332 if (binary or
2332 if (binary or
2333 # copy/rename
2333 # copy/rename
2334 f2 in copy or
2334 f2 in copy or
2335 # empty file creation
2335 # empty file creation
2336 (not f1 and not content2) or
2336 (not f1 and not content2) or
2337 # empty file deletion
2337 # empty file deletion
2338 (not content1 and not f2) or
2338 (not content1 and not f2) or
2339 # create with flags
2339 # create with flags
2340 (not f1 and flag2) or
2340 (not f1 and flag2) or
2341 # change flags
2341 # change flags
2342 (f1 and f2 and flag1 != flag2)):
2342 (f1 and f2 and flag1 != flag2)):
2343 losedatafn(f2 or f1)
2343 losedatafn(f2 or f1)
2344
2344
2345 path1 = f1 or f2
2345 path1 = f1 or f2
2346 path2 = f2 or f1
2346 path2 = f2 or f1
2347 path1 = posixpath.join(prefix, path1[len(relroot):])
2347 path1 = posixpath.join(prefix, path1[len(relroot):])
2348 path2 = posixpath.join(prefix, path2[len(relroot):])
2348 path2 = posixpath.join(prefix, path2[len(relroot):])
2349 header = []
2349 header = []
2350 if opts.git:
2350 if opts.git:
2351 header.append('diff --git %s%s %s%s' %
2351 header.append('diff --git %s%s %s%s' %
2352 (aprefix, path1, bprefix, path2))
2352 (aprefix, path1, bprefix, path2))
2353 if not f1: # added
2353 if not f1: # added
2354 header.append('new file mode %s' % gitmode[flag2])
2354 header.append('new file mode %s' % gitmode[flag2])
2355 elif not f2: # removed
2355 elif not f2: # removed
2356 header.append('deleted file mode %s' % gitmode[flag1])
2356 header.append('deleted file mode %s' % gitmode[flag1])
2357 else: # modified/copied/renamed
2357 else: # modified/copied/renamed
2358 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2358 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2359 if mode1 != mode2:
2359 if mode1 != mode2:
2360 header.append('old mode %s' % mode1)
2360 header.append('old mode %s' % mode1)
2361 header.append('new mode %s' % mode2)
2361 header.append('new mode %s' % mode2)
2362 if copyop is not None:
2362 if copyop is not None:
2363 header.append('%s from %s' % (copyop, path1))
2363 header.append('%s from %s' % (copyop, path1))
2364 header.append('%s to %s' % (copyop, path2))
2364 header.append('%s to %s' % (copyop, path2))
2365 elif revs and not repo.ui.quiet:
2365 elif revs and not repo.ui.quiet:
2366 header.append(diffline(path1, revs))
2366 header.append(diffline(path1, revs))
2367
2367
2368 if binary and opts.git and not opts.nobinary:
2368 if binary and opts.git and not opts.nobinary:
2369 text = mdiff.b85diff(content1, content2)
2369 text = mdiff.b85diff(content1, content2)
2370 if text:
2370 if text:
2371 header.append('index %s..%s' %
2371 header.append('index %s..%s' %
2372 (gitindex(content1), gitindex(content2)))
2372 (gitindex(content1), gitindex(content2)))
2373 else:
2373 else:
2374 text = mdiff.unidiff(content1, date1,
2374 text = mdiff.unidiff(content1, date1,
2375 content2, date2,
2375 content2, date2,
2376 path1, path2, opts=opts)
2376 path1, path2, opts=opts)
2377 if header and (text or len(header) > 1):
2377 if header and (text or len(header) > 1):
2378 yield '\n'.join(header) + '\n'
2378 yield '\n'.join(header) + '\n'
2379 if text:
2379 if text:
2380 yield text
2380 yield text
2381
2381
2382 def diffstatsum(stats):
2382 def diffstatsum(stats):
2383 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2383 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2384 for f, a, r, b in stats:
2384 for f, a, r, b in stats:
2385 maxfile = max(maxfile, encoding.colwidth(f))
2385 maxfile = max(maxfile, encoding.colwidth(f))
2386 maxtotal = max(maxtotal, a + r)
2386 maxtotal = max(maxtotal, a + r)
2387 addtotal += a
2387 addtotal += a
2388 removetotal += r
2388 removetotal += r
2389 binary = binary or b
2389 binary = binary or b
2390
2390
2391 return maxfile, maxtotal, addtotal, removetotal, binary
2391 return maxfile, maxtotal, addtotal, removetotal, binary
2392
2392
2393 def diffstatdata(lines):
2393 def diffstatdata(lines):
2394 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2394 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2395
2395
2396 results = []
2396 results = []
2397 filename, adds, removes, isbinary = None, 0, 0, False
2397 filename, adds, removes, isbinary = None, 0, 0, False
2398
2398
2399 def addresult():
2399 def addresult():
2400 if filename:
2400 if filename:
2401 results.append((filename, adds, removes, isbinary))
2401 results.append((filename, adds, removes, isbinary))
2402
2402
2403 for line in lines:
2403 for line in lines:
2404 if line.startswith('diff'):
2404 if line.startswith('diff'):
2405 addresult()
2405 addresult()
2406 # set numbers to 0 anyway when starting new file
2406 # set numbers to 0 anyway when starting new file
2407 adds, removes, isbinary = 0, 0, False
2407 adds, removes, isbinary = 0, 0, False
2408 if line.startswith('diff --git a/'):
2408 if line.startswith('diff --git a/'):
2409 filename = gitre.search(line).group(2)
2409 filename = gitre.search(line).group(2)
2410 elif line.startswith('diff -r'):
2410 elif line.startswith('diff -r'):
2411 # format: "diff -r ... -r ... filename"
2411 # format: "diff -r ... -r ... filename"
2412 filename = diffre.search(line).group(1)
2412 filename = diffre.search(line).group(1)
2413 elif line.startswith('+') and not line.startswith('+++ '):
2413 elif line.startswith('+') and not line.startswith('+++ '):
2414 adds += 1
2414 adds += 1
2415 elif line.startswith('-') and not line.startswith('--- '):
2415 elif line.startswith('-') and not line.startswith('--- '):
2416 removes += 1
2416 removes += 1
2417 elif (line.startswith('GIT binary patch') or
2417 elif (line.startswith('GIT binary patch') or
2418 line.startswith('Binary file')):
2418 line.startswith('Binary file')):
2419 isbinary = True
2419 isbinary = True
2420 addresult()
2420 addresult()
2421 return results
2421 return results
2422
2422
2423 def diffstat(lines, width=80, git=False):
2423 def diffstat(lines, width=80, git=False):
2424 output = []
2424 output = []
2425 stats = diffstatdata(lines)
2425 stats = diffstatdata(lines)
2426 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2426 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2427
2427
2428 countwidth = len(str(maxtotal))
2428 countwidth = len(str(maxtotal))
2429 if hasbinary and countwidth < 3:
2429 if hasbinary and countwidth < 3:
2430 countwidth = 3
2430 countwidth = 3
2431 graphwidth = width - countwidth - maxname - 6
2431 graphwidth = width - countwidth - maxname - 6
2432 if graphwidth < 10:
2432 if graphwidth < 10:
2433 graphwidth = 10
2433 graphwidth = 10
2434
2434
2435 def scale(i):
2435 def scale(i):
2436 if maxtotal <= graphwidth:
2436 if maxtotal <= graphwidth:
2437 return i
2437 return i
2438 # If diffstat runs out of room it doesn't print anything,
2438 # If diffstat runs out of room it doesn't print anything,
2439 # which isn't very useful, so always print at least one + or -
2439 # which isn't very useful, so always print at least one + or -
2440 # if there were at least some changes.
2440 # if there were at least some changes.
2441 return max(i * graphwidth // maxtotal, int(bool(i)))
2441 return max(i * graphwidth // maxtotal, int(bool(i)))
2442
2442
2443 for filename, adds, removes, isbinary in stats:
2443 for filename, adds, removes, isbinary in stats:
2444 if isbinary:
2444 if isbinary:
2445 count = 'Bin'
2445 count = 'Bin'
2446 else:
2446 else:
2447 count = adds + removes
2447 count = adds + removes
2448 pluses = '+' * scale(adds)
2448 pluses = '+' * scale(adds)
2449 minuses = '-' * scale(removes)
2449 minuses = '-' * scale(removes)
2450 output.append(' %s%s | %*s %s%s\n' %
2450 output.append(' %s%s | %*s %s%s\n' %
2451 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2451 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2452 countwidth, count, pluses, minuses))
2452 countwidth, count, pluses, minuses))
2453
2453
2454 if stats:
2454 if stats:
2455 output.append(_(' %d files changed, %d insertions(+), '
2455 output.append(_(' %d files changed, %d insertions(+), '
2456 '%d deletions(-)\n')
2456 '%d deletions(-)\n')
2457 % (len(stats), totaladds, totalremoves))
2457 % (len(stats), totaladds, totalremoves))
2458
2458
2459 return ''.join(output)
2459 return ''.join(output)
2460
2460
2461 def diffstatui(*args, **kw):
2461 def diffstatui(*args, **kw):
2462 '''like diffstat(), but yields 2-tuples of (output, label) for
2462 '''like diffstat(), but yields 2-tuples of (output, label) for
2463 ui.write()
2463 ui.write()
2464 '''
2464 '''
2465
2465
2466 for line in diffstat(*args, **kw).splitlines():
2466 for line in diffstat(*args, **kw).splitlines():
2467 if line and line[-1] in '+-':
2467 if line and line[-1] in '+-':
2468 name, graph = line.rsplit(' ', 1)
2468 name, graph = line.rsplit(' ', 1)
2469 yield (name + ' ', '')
2469 yield (name + ' ', '')
2470 m = re.search(r'\++', graph)
2470 m = re.search(r'\++', graph)
2471 if m:
2471 if m:
2472 yield (m.group(0), 'diffstat.inserted')
2472 yield (m.group(0), 'diffstat.inserted')
2473 m = re.search(r'-+', graph)
2473 m = re.search(r'-+', graph)
2474 if m:
2474 if m:
2475 yield (m.group(0), 'diffstat.deleted')
2475 yield (m.group(0), 'diffstat.deleted')
2476 else:
2476 else:
2477 yield (line, '')
2477 yield (line, '')
2478 yield ('\n', '')
2478 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now