##// END OF EJS Templates
record: change interface of the filtering function...
Laurent Charignon -
r24341:616c01b6 default
parent child Browse files
Show More
@@ -1,3184 +1,3180 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, errno, re, tempfile, cStringIO, shutil
10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 import match as matchmod
12 import match as matchmod
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 import changelog
14 import changelog
15 import bookmarks
15 import bookmarks
16 import encoding
16 import encoding
17 import crecord as crecordmod
17 import crecord as crecordmod
18 import lock as lockmod
18 import lock as lockmod
19
19
20 def parsealiases(cmd):
20 def parsealiases(cmd):
21 return cmd.lstrip("^").split("|")
21 return cmd.lstrip("^").split("|")
22
22
23 def recordfilter(ui, fp):
23 def recordfilter(ui, originalhunks):
24 return patch.filterpatch(ui, patch.parsepatch(fp))
24 return patch.filterpatch(ui, originalhunks)
25
25
26 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
26 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
27 filterfn, *pats, **opts):
27 filterfn, *pats, **opts):
28 import merge as mergemod
28 import merge as mergemod
29 if not ui.interactive():
29 if not ui.interactive():
30 raise util.Abort(_('running non-interactively, use %s instead') %
30 raise util.Abort(_('running non-interactively, use %s instead') %
31 cmdsuggest)
31 cmdsuggest)
32
32
33 # make sure username is set before going interactive
33 # make sure username is set before going interactive
34 if not opts.get('user'):
34 if not opts.get('user'):
35 ui.username() # raise exception, username not provided
35 ui.username() # raise exception, username not provided
36
36
37 def recordfunc(ui, repo, message, match, opts):
37 def recordfunc(ui, repo, message, match, opts):
38 """This is generic record driver.
38 """This is generic record driver.
39
39
40 Its job is to interactively filter local changes, and
40 Its job is to interactively filter local changes, and
41 accordingly prepare working directory into a state in which the
41 accordingly prepare working directory into a state in which the
42 job can be delegated to a non-interactive commit command such as
42 job can be delegated to a non-interactive commit command such as
43 'commit' or 'qrefresh'.
43 'commit' or 'qrefresh'.
44
44
45 After the actual job is done by non-interactive command, the
45 After the actual job is done by non-interactive command, the
46 working directory is restored to its original state.
46 working directory is restored to its original state.
47
47
48 In the end we'll record interesting changes, and everything else
48 In the end we'll record interesting changes, and everything else
49 will be left in place, so the user can continue working.
49 will be left in place, so the user can continue working.
50 """
50 """
51
51
52 checkunfinished(repo, commit=True)
52 checkunfinished(repo, commit=True)
53 merge = len(repo[None].parents()) > 1
53 merge = len(repo[None].parents()) > 1
54 if merge:
54 if merge:
55 raise util.Abort(_('cannot partially commit a merge '
55 raise util.Abort(_('cannot partially commit a merge '
56 '(use "hg commit" instead)'))
56 '(use "hg commit" instead)'))
57
57
58 status = repo.status(match=match)
58 status = repo.status(match=match)
59 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
59 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
60 diffopts.nodates = True
60 diffopts.nodates = True
61 diffopts.git = True
61 diffopts.git = True
62 originalchunks = patch.diff(repo, changes=status, opts=diffopts)
62 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
63 fp = cStringIO.StringIO()
63 originalchunks = patch.parsepatch(originaldiff)
64 fp.write(''.join(originalchunks))
65 fp.seek(0)
66
64
67 # 1. filter patch, so we have intending-to apply subset of it
65 # 1. filter patch, so we have intending-to apply subset of it
68 try:
66 try:
69 chunks = filterfn(ui, fp)
67 chunks = filterfn(ui, originalchunks)
70 except patch.PatchError, err:
68 except patch.PatchError, err:
71 raise util.Abort(_('error parsing patch: %s') % err)
69 raise util.Abort(_('error parsing patch: %s') % err)
72
70
73 del fp
74
75 contenders = set()
71 contenders = set()
76 for h in chunks:
72 for h in chunks:
77 try:
73 try:
78 contenders.update(set(h.files()))
74 contenders.update(set(h.files()))
79 except AttributeError:
75 except AttributeError:
80 pass
76 pass
81
77
82 changed = status.modified + status.added + status.removed
78 changed = status.modified + status.added + status.removed
83 newfiles = [f for f in changed if f in contenders]
79 newfiles = [f for f in changed if f in contenders]
84 if not newfiles:
80 if not newfiles:
85 ui.status(_('no changes to record\n'))
81 ui.status(_('no changes to record\n'))
86 return 0
82 return 0
87
83
88 newandmodifiedfiles = set()
84 newandmodifiedfiles = set()
89 for h in chunks:
85 for h in chunks:
90 iscrecordhunk = isinstance(h, crecordmod.uihunk)
86 iscrecordhunk = isinstance(h, crecordmod.uihunk)
91 ishunk = isinstance(h, patch.recordhunk)
87 ishunk = isinstance(h, patch.recordhunk)
92 isnew = h.filename() in status.added
88 isnew = h.filename() in status.added
93 if (ishunk or iscrecordhunk) and isnew and not h in originalchunks:
89 if (ishunk or iscrecordhunk) and isnew and not h in originalchunks:
94 newandmodifiedfiles.add(h.filename())
90 newandmodifiedfiles.add(h.filename())
95
91
96 modified = set(status.modified)
92 modified = set(status.modified)
97
93
98 # 2. backup changed files, so we can restore them in the end
94 # 2. backup changed files, so we can restore them in the end
99
95
100 if backupall:
96 if backupall:
101 tobackup = changed
97 tobackup = changed
102 else:
98 else:
103 tobackup = [f for f in newfiles
99 tobackup = [f for f in newfiles
104 if f in modified or f in newandmodifiedfiles]
100 if f in modified or f in newandmodifiedfiles]
105
101
106 backups = {}
102 backups = {}
107 if tobackup:
103 if tobackup:
108 backupdir = repo.join('record-backups')
104 backupdir = repo.join('record-backups')
109 try:
105 try:
110 os.mkdir(backupdir)
106 os.mkdir(backupdir)
111 except OSError, err:
107 except OSError, err:
112 if err.errno != errno.EEXIST:
108 if err.errno != errno.EEXIST:
113 raise
109 raise
114 try:
110 try:
115 # backup continues
111 # backup continues
116 for f in tobackup:
112 for f in tobackup:
117 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
113 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
118 dir=backupdir)
114 dir=backupdir)
119 os.close(fd)
115 os.close(fd)
120 ui.debug('backup %r as %r\n' % (f, tmpname))
116 ui.debug('backup %r as %r\n' % (f, tmpname))
121 util.copyfile(repo.wjoin(f), tmpname)
117 util.copyfile(repo.wjoin(f), tmpname)
122 shutil.copystat(repo.wjoin(f), tmpname)
118 shutil.copystat(repo.wjoin(f), tmpname)
123 backups[f] = tmpname
119 backups[f] = tmpname
124
120
125 fp = cStringIO.StringIO()
121 fp = cStringIO.StringIO()
126 for c in chunks:
122 for c in chunks:
127 fname = c.filename()
123 fname = c.filename()
128 if fname in backups or fname in newandmodifiedfiles:
124 if fname in backups or fname in newandmodifiedfiles:
129 c.write(fp)
125 c.write(fp)
130 dopatch = fp.tell()
126 dopatch = fp.tell()
131 fp.seek(0)
127 fp.seek(0)
132
128
133 [os.unlink(c) for c in newandmodifiedfiles]
129 [os.unlink(c) for c in newandmodifiedfiles]
134
130
135 # 3a. apply filtered patch to clean repo (clean)
131 # 3a. apply filtered patch to clean repo (clean)
136 if backups:
132 if backups:
137 # Equivalent to hg.revert
133 # Equivalent to hg.revert
138 choices = lambda key: key in backups
134 choices = lambda key: key in backups
139 mergemod.update(repo, repo.dirstate.p1(),
135 mergemod.update(repo, repo.dirstate.p1(),
140 False, True, choices)
136 False, True, choices)
141
137
142
138
143 # 3b. (apply)
139 # 3b. (apply)
144 if dopatch:
140 if dopatch:
145 try:
141 try:
146 ui.debug('applying patch\n')
142 ui.debug('applying patch\n')
147 ui.debug(fp.getvalue())
143 ui.debug(fp.getvalue())
148 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
144 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
149 except patch.PatchError, err:
145 except patch.PatchError, err:
150 raise util.Abort(str(err))
146 raise util.Abort(str(err))
151 del fp
147 del fp
152
148
153 # 4. We prepared working directory according to filtered
149 # 4. We prepared working directory according to filtered
154 # patch. Now is the time to delegate the job to
150 # patch. Now is the time to delegate the job to
155 # commit/qrefresh or the like!
151 # commit/qrefresh or the like!
156
152
157 # Make all of the pathnames absolute.
153 # Make all of the pathnames absolute.
158 newfiles = [repo.wjoin(nf) for nf in newfiles]
154 newfiles = [repo.wjoin(nf) for nf in newfiles]
159 commitfunc(ui, repo, *newfiles, **opts)
155 commitfunc(ui, repo, *newfiles, **opts)
160
156
161 return 0
157 return 0
162 finally:
158 finally:
163 # 5. finally restore backed-up files
159 # 5. finally restore backed-up files
164 try:
160 try:
165 for realname, tmpname in backups.iteritems():
161 for realname, tmpname in backups.iteritems():
166 ui.debug('restoring %r to %r\n' % (tmpname, realname))
162 ui.debug('restoring %r to %r\n' % (tmpname, realname))
167 util.copyfile(tmpname, repo.wjoin(realname))
163 util.copyfile(tmpname, repo.wjoin(realname))
168 # Our calls to copystat() here and above are a
164 # Our calls to copystat() here and above are a
169 # hack to trick any editors that have f open that
165 # hack to trick any editors that have f open that
170 # we haven't modified them.
166 # we haven't modified them.
171 #
167 #
172 # Also note that this racy as an editor could
168 # Also note that this racy as an editor could
173 # notice the file's mtime before we've finished
169 # notice the file's mtime before we've finished
174 # writing it.
170 # writing it.
175 shutil.copystat(tmpname, repo.wjoin(realname))
171 shutil.copystat(tmpname, repo.wjoin(realname))
176 os.unlink(tmpname)
172 os.unlink(tmpname)
177 if tobackup:
173 if tobackup:
178 os.rmdir(backupdir)
174 os.rmdir(backupdir)
179 except OSError:
175 except OSError:
180 pass
176 pass
181
177
182 # wrap ui.write so diff output can be labeled/colorized
178 # wrap ui.write so diff output can be labeled/colorized
183 def wrapwrite(orig, *args, **kw):
179 def wrapwrite(orig, *args, **kw):
184 label = kw.pop('label', '')
180 label = kw.pop('label', '')
185 for chunk, l in patch.difflabel(lambda: args):
181 for chunk, l in patch.difflabel(lambda: args):
186 orig(chunk, label=label + l)
182 orig(chunk, label=label + l)
187
183
188 oldwrite = ui.write
184 oldwrite = ui.write
189 def wrap(*args, **kwargs):
185 def wrap(*args, **kwargs):
190 return wrapwrite(oldwrite, *args, **kwargs)
186 return wrapwrite(oldwrite, *args, **kwargs)
191 setattr(ui, 'write', wrap)
187 setattr(ui, 'write', wrap)
192
188
193 try:
189 try:
194 return commit(ui, repo, recordfunc, pats, opts)
190 return commit(ui, repo, recordfunc, pats, opts)
195 finally:
191 finally:
196 ui.write = oldwrite
192 ui.write = oldwrite
197
193
198
194
199 def findpossible(cmd, table, strict=False):
195 def findpossible(cmd, table, strict=False):
200 """
196 """
201 Return cmd -> (aliases, command table entry)
197 Return cmd -> (aliases, command table entry)
202 for each matching command.
198 for each matching command.
203 Return debug commands (or their aliases) only if no normal command matches.
199 Return debug commands (or their aliases) only if no normal command matches.
204 """
200 """
205 choice = {}
201 choice = {}
206 debugchoice = {}
202 debugchoice = {}
207
203
208 if cmd in table:
204 if cmd in table:
209 # short-circuit exact matches, "log" alias beats "^log|history"
205 # short-circuit exact matches, "log" alias beats "^log|history"
210 keys = [cmd]
206 keys = [cmd]
211 else:
207 else:
212 keys = table.keys()
208 keys = table.keys()
213
209
214 allcmds = []
210 allcmds = []
215 for e in keys:
211 for e in keys:
216 aliases = parsealiases(e)
212 aliases = parsealiases(e)
217 allcmds.extend(aliases)
213 allcmds.extend(aliases)
218 found = None
214 found = None
219 if cmd in aliases:
215 if cmd in aliases:
220 found = cmd
216 found = cmd
221 elif not strict:
217 elif not strict:
222 for a in aliases:
218 for a in aliases:
223 if a.startswith(cmd):
219 if a.startswith(cmd):
224 found = a
220 found = a
225 break
221 break
226 if found is not None:
222 if found is not None:
227 if aliases[0].startswith("debug") or found.startswith("debug"):
223 if aliases[0].startswith("debug") or found.startswith("debug"):
228 debugchoice[found] = (aliases, table[e])
224 debugchoice[found] = (aliases, table[e])
229 else:
225 else:
230 choice[found] = (aliases, table[e])
226 choice[found] = (aliases, table[e])
231
227
232 if not choice and debugchoice:
228 if not choice and debugchoice:
233 choice = debugchoice
229 choice = debugchoice
234
230
235 return choice, allcmds
231 return choice, allcmds
236
232
237 def findcmd(cmd, table, strict=True):
233 def findcmd(cmd, table, strict=True):
238 """Return (aliases, command table entry) for command string."""
234 """Return (aliases, command table entry) for command string."""
239 choice, allcmds = findpossible(cmd, table, strict)
235 choice, allcmds = findpossible(cmd, table, strict)
240
236
241 if cmd in choice:
237 if cmd in choice:
242 return choice[cmd]
238 return choice[cmd]
243
239
244 if len(choice) > 1:
240 if len(choice) > 1:
245 clist = choice.keys()
241 clist = choice.keys()
246 clist.sort()
242 clist.sort()
247 raise error.AmbiguousCommand(cmd, clist)
243 raise error.AmbiguousCommand(cmd, clist)
248
244
249 if choice:
245 if choice:
250 return choice.values()[0]
246 return choice.values()[0]
251
247
252 raise error.UnknownCommand(cmd, allcmds)
248 raise error.UnknownCommand(cmd, allcmds)
253
249
254 def findrepo(p):
250 def findrepo(p):
255 while not os.path.isdir(os.path.join(p, ".hg")):
251 while not os.path.isdir(os.path.join(p, ".hg")):
256 oldp, p = p, os.path.dirname(p)
252 oldp, p = p, os.path.dirname(p)
257 if p == oldp:
253 if p == oldp:
258 return None
254 return None
259
255
260 return p
256 return p
261
257
262 def bailifchanged(repo):
258 def bailifchanged(repo):
263 if repo.dirstate.p2() != nullid:
259 if repo.dirstate.p2() != nullid:
264 raise util.Abort(_('outstanding uncommitted merge'))
260 raise util.Abort(_('outstanding uncommitted merge'))
265 modified, added, removed, deleted = repo.status()[:4]
261 modified, added, removed, deleted = repo.status()[:4]
266 if modified or added or removed or deleted:
262 if modified or added or removed or deleted:
267 raise util.Abort(_('uncommitted changes'))
263 raise util.Abort(_('uncommitted changes'))
268 ctx = repo[None]
264 ctx = repo[None]
269 for s in sorted(ctx.substate):
265 for s in sorted(ctx.substate):
270 if ctx.sub(s).dirty():
266 if ctx.sub(s).dirty():
271 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
267 raise util.Abort(_("uncommitted changes in subrepo %s") % s)
272
268
273 def logmessage(ui, opts):
269 def logmessage(ui, opts):
274 """ get the log message according to -m and -l option """
270 """ get the log message according to -m and -l option """
275 message = opts.get('message')
271 message = opts.get('message')
276 logfile = opts.get('logfile')
272 logfile = opts.get('logfile')
277
273
278 if message and logfile:
274 if message and logfile:
279 raise util.Abort(_('options --message and --logfile are mutually '
275 raise util.Abort(_('options --message and --logfile are mutually '
280 'exclusive'))
276 'exclusive'))
281 if not message and logfile:
277 if not message and logfile:
282 try:
278 try:
283 if logfile == '-':
279 if logfile == '-':
284 message = ui.fin.read()
280 message = ui.fin.read()
285 else:
281 else:
286 message = '\n'.join(util.readfile(logfile).splitlines())
282 message = '\n'.join(util.readfile(logfile).splitlines())
287 except IOError, inst:
283 except IOError, inst:
288 raise util.Abort(_("can't read commit message '%s': %s") %
284 raise util.Abort(_("can't read commit message '%s': %s") %
289 (logfile, inst.strerror))
285 (logfile, inst.strerror))
290 return message
286 return message
291
287
292 def mergeeditform(ctxorbool, baseformname):
288 def mergeeditform(ctxorbool, baseformname):
293 """return appropriate editform name (referencing a committemplate)
289 """return appropriate editform name (referencing a committemplate)
294
290
295 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
291 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
296 merging is committed.
292 merging is committed.
297
293
298 This returns baseformname with '.merge' appended if it is a merge,
294 This returns baseformname with '.merge' appended if it is a merge,
299 otherwise '.normal' is appended.
295 otherwise '.normal' is appended.
300 """
296 """
301 if isinstance(ctxorbool, bool):
297 if isinstance(ctxorbool, bool):
302 if ctxorbool:
298 if ctxorbool:
303 return baseformname + ".merge"
299 return baseformname + ".merge"
304 elif 1 < len(ctxorbool.parents()):
300 elif 1 < len(ctxorbool.parents()):
305 return baseformname + ".merge"
301 return baseformname + ".merge"
306
302
307 return baseformname + ".normal"
303 return baseformname + ".normal"
308
304
309 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
305 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
310 editform='', **opts):
306 editform='', **opts):
311 """get appropriate commit message editor according to '--edit' option
307 """get appropriate commit message editor according to '--edit' option
312
308
313 'finishdesc' is a function to be called with edited commit message
309 'finishdesc' is a function to be called with edited commit message
314 (= 'description' of the new changeset) just after editing, but
310 (= 'description' of the new changeset) just after editing, but
315 before checking empty-ness. It should return actual text to be
311 before checking empty-ness. It should return actual text to be
316 stored into history. This allows to change description before
312 stored into history. This allows to change description before
317 storing.
313 storing.
318
314
319 'extramsg' is a extra message to be shown in the editor instead of
315 'extramsg' is a extra message to be shown in the editor instead of
320 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
316 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
321 is automatically added.
317 is automatically added.
322
318
323 'editform' is a dot-separated list of names, to distinguish
319 'editform' is a dot-separated list of names, to distinguish
324 the purpose of commit text editing.
320 the purpose of commit text editing.
325
321
326 'getcommiteditor' returns 'commitforceeditor' regardless of
322 'getcommiteditor' returns 'commitforceeditor' regardless of
327 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
323 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
328 they are specific for usage in MQ.
324 they are specific for usage in MQ.
329 """
325 """
330 if edit or finishdesc or extramsg:
326 if edit or finishdesc or extramsg:
331 return lambda r, c, s: commitforceeditor(r, c, s,
327 return lambda r, c, s: commitforceeditor(r, c, s,
332 finishdesc=finishdesc,
328 finishdesc=finishdesc,
333 extramsg=extramsg,
329 extramsg=extramsg,
334 editform=editform)
330 editform=editform)
335 elif editform:
331 elif editform:
336 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
332 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
337 else:
333 else:
338 return commiteditor
334 return commiteditor
339
335
340 def loglimit(opts):
336 def loglimit(opts):
341 """get the log limit according to option -l/--limit"""
337 """get the log limit according to option -l/--limit"""
342 limit = opts.get('limit')
338 limit = opts.get('limit')
343 if limit:
339 if limit:
344 try:
340 try:
345 limit = int(limit)
341 limit = int(limit)
346 except ValueError:
342 except ValueError:
347 raise util.Abort(_('limit must be a positive integer'))
343 raise util.Abort(_('limit must be a positive integer'))
348 if limit <= 0:
344 if limit <= 0:
349 raise util.Abort(_('limit must be positive'))
345 raise util.Abort(_('limit must be positive'))
350 else:
346 else:
351 limit = None
347 limit = None
352 return limit
348 return limit
353
349
354 def makefilename(repo, pat, node, desc=None,
350 def makefilename(repo, pat, node, desc=None,
355 total=None, seqno=None, revwidth=None, pathname=None):
351 total=None, seqno=None, revwidth=None, pathname=None):
356 node_expander = {
352 node_expander = {
357 'H': lambda: hex(node),
353 'H': lambda: hex(node),
358 'R': lambda: str(repo.changelog.rev(node)),
354 'R': lambda: str(repo.changelog.rev(node)),
359 'h': lambda: short(node),
355 'h': lambda: short(node),
360 'm': lambda: re.sub('[^\w]', '_', str(desc))
356 'm': lambda: re.sub('[^\w]', '_', str(desc))
361 }
357 }
362 expander = {
358 expander = {
363 '%': lambda: '%',
359 '%': lambda: '%',
364 'b': lambda: os.path.basename(repo.root),
360 'b': lambda: os.path.basename(repo.root),
365 }
361 }
366
362
367 try:
363 try:
368 if node:
364 if node:
369 expander.update(node_expander)
365 expander.update(node_expander)
370 if node:
366 if node:
371 expander['r'] = (lambda:
367 expander['r'] = (lambda:
372 str(repo.changelog.rev(node)).zfill(revwidth or 0))
368 str(repo.changelog.rev(node)).zfill(revwidth or 0))
373 if total is not None:
369 if total is not None:
374 expander['N'] = lambda: str(total)
370 expander['N'] = lambda: str(total)
375 if seqno is not None:
371 if seqno is not None:
376 expander['n'] = lambda: str(seqno)
372 expander['n'] = lambda: str(seqno)
377 if total is not None and seqno is not None:
373 if total is not None and seqno is not None:
378 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
374 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
379 if pathname is not None:
375 if pathname is not None:
380 expander['s'] = lambda: os.path.basename(pathname)
376 expander['s'] = lambda: os.path.basename(pathname)
381 expander['d'] = lambda: os.path.dirname(pathname) or '.'
377 expander['d'] = lambda: os.path.dirname(pathname) or '.'
382 expander['p'] = lambda: pathname
378 expander['p'] = lambda: pathname
383
379
384 newname = []
380 newname = []
385 patlen = len(pat)
381 patlen = len(pat)
386 i = 0
382 i = 0
387 while i < patlen:
383 while i < patlen:
388 c = pat[i]
384 c = pat[i]
389 if c == '%':
385 if c == '%':
390 i += 1
386 i += 1
391 c = pat[i]
387 c = pat[i]
392 c = expander[c]()
388 c = expander[c]()
393 newname.append(c)
389 newname.append(c)
394 i += 1
390 i += 1
395 return ''.join(newname)
391 return ''.join(newname)
396 except KeyError, inst:
392 except KeyError, inst:
397 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
393 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
398 inst.args[0])
394 inst.args[0])
399
395
400 def makefileobj(repo, pat, node=None, desc=None, total=None,
396 def makefileobj(repo, pat, node=None, desc=None, total=None,
401 seqno=None, revwidth=None, mode='wb', modemap=None,
397 seqno=None, revwidth=None, mode='wb', modemap=None,
402 pathname=None):
398 pathname=None):
403
399
404 writable = mode not in ('r', 'rb')
400 writable = mode not in ('r', 'rb')
405
401
406 if not pat or pat == '-':
402 if not pat or pat == '-':
407 if writable:
403 if writable:
408 fp = repo.ui.fout
404 fp = repo.ui.fout
409 else:
405 else:
410 fp = repo.ui.fin
406 fp = repo.ui.fin
411 if util.safehasattr(fp, 'fileno'):
407 if util.safehasattr(fp, 'fileno'):
412 return os.fdopen(os.dup(fp.fileno()), mode)
408 return os.fdopen(os.dup(fp.fileno()), mode)
413 else:
409 else:
414 # if this fp can't be duped properly, return
410 # if this fp can't be duped properly, return
415 # a dummy object that can be closed
411 # a dummy object that can be closed
416 class wrappedfileobj(object):
412 class wrappedfileobj(object):
417 noop = lambda x: None
413 noop = lambda x: None
418 def __init__(self, f):
414 def __init__(self, f):
419 self.f = f
415 self.f = f
420 def __getattr__(self, attr):
416 def __getattr__(self, attr):
421 if attr == 'close':
417 if attr == 'close':
422 return self.noop
418 return self.noop
423 else:
419 else:
424 return getattr(self.f, attr)
420 return getattr(self.f, attr)
425
421
426 return wrappedfileobj(fp)
422 return wrappedfileobj(fp)
427 if util.safehasattr(pat, 'write') and writable:
423 if util.safehasattr(pat, 'write') and writable:
428 return pat
424 return pat
429 if util.safehasattr(pat, 'read') and 'r' in mode:
425 if util.safehasattr(pat, 'read') and 'r' in mode:
430 return pat
426 return pat
431 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
427 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
432 if modemap is not None:
428 if modemap is not None:
433 mode = modemap.get(fn, mode)
429 mode = modemap.get(fn, mode)
434 if mode == 'wb':
430 if mode == 'wb':
435 modemap[fn] = 'ab'
431 modemap[fn] = 'ab'
436 return open(fn, mode)
432 return open(fn, mode)
437
433
438 def openrevlog(repo, cmd, file_, opts):
434 def openrevlog(repo, cmd, file_, opts):
439 """opens the changelog, manifest, a filelog or a given revlog"""
435 """opens the changelog, manifest, a filelog or a given revlog"""
440 cl = opts['changelog']
436 cl = opts['changelog']
441 mf = opts['manifest']
437 mf = opts['manifest']
442 msg = None
438 msg = None
443 if cl and mf:
439 if cl and mf:
444 msg = _('cannot specify --changelog and --manifest at the same time')
440 msg = _('cannot specify --changelog and --manifest at the same time')
445 elif cl or mf:
441 elif cl or mf:
446 if file_:
442 if file_:
447 msg = _('cannot specify filename with --changelog or --manifest')
443 msg = _('cannot specify filename with --changelog or --manifest')
448 elif not repo:
444 elif not repo:
449 msg = _('cannot specify --changelog or --manifest '
445 msg = _('cannot specify --changelog or --manifest '
450 'without a repository')
446 'without a repository')
451 if msg:
447 if msg:
452 raise util.Abort(msg)
448 raise util.Abort(msg)
453
449
454 r = None
450 r = None
455 if repo:
451 if repo:
456 if cl:
452 if cl:
457 r = repo.unfiltered().changelog
453 r = repo.unfiltered().changelog
458 elif mf:
454 elif mf:
459 r = repo.manifest
455 r = repo.manifest
460 elif file_:
456 elif file_:
461 filelog = repo.file(file_)
457 filelog = repo.file(file_)
462 if len(filelog):
458 if len(filelog):
463 r = filelog
459 r = filelog
464 if not r:
460 if not r:
465 if not file_:
461 if not file_:
466 raise error.CommandError(cmd, _('invalid arguments'))
462 raise error.CommandError(cmd, _('invalid arguments'))
467 if not os.path.isfile(file_):
463 if not os.path.isfile(file_):
468 raise util.Abort(_("revlog '%s' not found") % file_)
464 raise util.Abort(_("revlog '%s' not found") % file_)
469 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
465 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
470 file_[:-2] + ".i")
466 file_[:-2] + ".i")
471 return r
467 return r
472
468
473 def copy(ui, repo, pats, opts, rename=False):
469 def copy(ui, repo, pats, opts, rename=False):
474 # called with the repo lock held
470 # called with the repo lock held
475 #
471 #
476 # hgsep => pathname that uses "/" to separate directories
472 # hgsep => pathname that uses "/" to separate directories
477 # ossep => pathname that uses os.sep to separate directories
473 # ossep => pathname that uses os.sep to separate directories
478 cwd = repo.getcwd()
474 cwd = repo.getcwd()
479 targets = {}
475 targets = {}
480 after = opts.get("after")
476 after = opts.get("after")
481 dryrun = opts.get("dry_run")
477 dryrun = opts.get("dry_run")
482 wctx = repo[None]
478 wctx = repo[None]
483
479
484 def walkpat(pat):
480 def walkpat(pat):
485 srcs = []
481 srcs = []
486 if after:
482 if after:
487 badstates = '?'
483 badstates = '?'
488 else:
484 else:
489 badstates = '?r'
485 badstates = '?r'
490 m = scmutil.match(repo[None], [pat], opts, globbed=True)
486 m = scmutil.match(repo[None], [pat], opts, globbed=True)
491 for abs in repo.walk(m):
487 for abs in repo.walk(m):
492 state = repo.dirstate[abs]
488 state = repo.dirstate[abs]
493 rel = m.rel(abs)
489 rel = m.rel(abs)
494 exact = m.exact(abs)
490 exact = m.exact(abs)
495 if state in badstates:
491 if state in badstates:
496 if exact and state == '?':
492 if exact and state == '?':
497 ui.warn(_('%s: not copying - file is not managed\n') % rel)
493 ui.warn(_('%s: not copying - file is not managed\n') % rel)
498 if exact and state == 'r':
494 if exact and state == 'r':
499 ui.warn(_('%s: not copying - file has been marked for'
495 ui.warn(_('%s: not copying - file has been marked for'
500 ' remove\n') % rel)
496 ' remove\n') % rel)
501 continue
497 continue
502 # abs: hgsep
498 # abs: hgsep
503 # rel: ossep
499 # rel: ossep
504 srcs.append((abs, rel, exact))
500 srcs.append((abs, rel, exact))
505 return srcs
501 return srcs
506
502
507 # abssrc: hgsep
503 # abssrc: hgsep
508 # relsrc: ossep
504 # relsrc: ossep
509 # otarget: ossep
505 # otarget: ossep
510 def copyfile(abssrc, relsrc, otarget, exact):
506 def copyfile(abssrc, relsrc, otarget, exact):
511 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
507 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
512 if '/' in abstarget:
508 if '/' in abstarget:
513 # We cannot normalize abstarget itself, this would prevent
509 # We cannot normalize abstarget itself, this would prevent
514 # case only renames, like a => A.
510 # case only renames, like a => A.
515 abspath, absname = abstarget.rsplit('/', 1)
511 abspath, absname = abstarget.rsplit('/', 1)
516 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
512 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
517 reltarget = repo.pathto(abstarget, cwd)
513 reltarget = repo.pathto(abstarget, cwd)
518 target = repo.wjoin(abstarget)
514 target = repo.wjoin(abstarget)
519 src = repo.wjoin(abssrc)
515 src = repo.wjoin(abssrc)
520 state = repo.dirstate[abstarget]
516 state = repo.dirstate[abstarget]
521
517
522 scmutil.checkportable(ui, abstarget)
518 scmutil.checkportable(ui, abstarget)
523
519
524 # check for collisions
520 # check for collisions
525 prevsrc = targets.get(abstarget)
521 prevsrc = targets.get(abstarget)
526 if prevsrc is not None:
522 if prevsrc is not None:
527 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
523 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
528 (reltarget, repo.pathto(abssrc, cwd),
524 (reltarget, repo.pathto(abssrc, cwd),
529 repo.pathto(prevsrc, cwd)))
525 repo.pathto(prevsrc, cwd)))
530 return
526 return
531
527
532 # check for overwrites
528 # check for overwrites
533 exists = os.path.lexists(target)
529 exists = os.path.lexists(target)
534 samefile = False
530 samefile = False
535 if exists and abssrc != abstarget:
531 if exists and abssrc != abstarget:
536 if (repo.dirstate.normalize(abssrc) ==
532 if (repo.dirstate.normalize(abssrc) ==
537 repo.dirstate.normalize(abstarget)):
533 repo.dirstate.normalize(abstarget)):
538 if not rename:
534 if not rename:
539 ui.warn(_("%s: can't copy - same file\n") % reltarget)
535 ui.warn(_("%s: can't copy - same file\n") % reltarget)
540 return
536 return
541 exists = False
537 exists = False
542 samefile = True
538 samefile = True
543
539
544 if not after and exists or after and state in 'mn':
540 if not after and exists or after and state in 'mn':
545 if not opts['force']:
541 if not opts['force']:
546 ui.warn(_('%s: not overwriting - file exists\n') %
542 ui.warn(_('%s: not overwriting - file exists\n') %
547 reltarget)
543 reltarget)
548 return
544 return
549
545
550 if after:
546 if after:
551 if not exists:
547 if not exists:
552 if rename:
548 if rename:
553 ui.warn(_('%s: not recording move - %s does not exist\n') %
549 ui.warn(_('%s: not recording move - %s does not exist\n') %
554 (relsrc, reltarget))
550 (relsrc, reltarget))
555 else:
551 else:
556 ui.warn(_('%s: not recording copy - %s does not exist\n') %
552 ui.warn(_('%s: not recording copy - %s does not exist\n') %
557 (relsrc, reltarget))
553 (relsrc, reltarget))
558 return
554 return
559 elif not dryrun:
555 elif not dryrun:
560 try:
556 try:
561 if exists:
557 if exists:
562 os.unlink(target)
558 os.unlink(target)
563 targetdir = os.path.dirname(target) or '.'
559 targetdir = os.path.dirname(target) or '.'
564 if not os.path.isdir(targetdir):
560 if not os.path.isdir(targetdir):
565 os.makedirs(targetdir)
561 os.makedirs(targetdir)
566 if samefile:
562 if samefile:
567 tmp = target + "~hgrename"
563 tmp = target + "~hgrename"
568 os.rename(src, tmp)
564 os.rename(src, tmp)
569 os.rename(tmp, target)
565 os.rename(tmp, target)
570 else:
566 else:
571 util.copyfile(src, target)
567 util.copyfile(src, target)
572 srcexists = True
568 srcexists = True
573 except IOError, inst:
569 except IOError, inst:
574 if inst.errno == errno.ENOENT:
570 if inst.errno == errno.ENOENT:
575 ui.warn(_('%s: deleted in working copy\n') % relsrc)
571 ui.warn(_('%s: deleted in working copy\n') % relsrc)
576 srcexists = False
572 srcexists = False
577 else:
573 else:
578 ui.warn(_('%s: cannot copy - %s\n') %
574 ui.warn(_('%s: cannot copy - %s\n') %
579 (relsrc, inst.strerror))
575 (relsrc, inst.strerror))
580 return True # report a failure
576 return True # report a failure
581
577
582 if ui.verbose or not exact:
578 if ui.verbose or not exact:
583 if rename:
579 if rename:
584 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
580 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
585 else:
581 else:
586 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
582 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
587
583
588 targets[abstarget] = abssrc
584 targets[abstarget] = abssrc
589
585
590 # fix up dirstate
586 # fix up dirstate
591 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
587 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
592 dryrun=dryrun, cwd=cwd)
588 dryrun=dryrun, cwd=cwd)
593 if rename and not dryrun:
589 if rename and not dryrun:
594 if not after and srcexists and not samefile:
590 if not after and srcexists and not samefile:
595 util.unlinkpath(repo.wjoin(abssrc))
591 util.unlinkpath(repo.wjoin(abssrc))
596 wctx.forget([abssrc])
592 wctx.forget([abssrc])
597
593
598 # pat: ossep
594 # pat: ossep
599 # dest ossep
595 # dest ossep
600 # srcs: list of (hgsep, hgsep, ossep, bool)
596 # srcs: list of (hgsep, hgsep, ossep, bool)
601 # return: function that takes hgsep and returns ossep
597 # return: function that takes hgsep and returns ossep
602 def targetpathfn(pat, dest, srcs):
598 def targetpathfn(pat, dest, srcs):
603 if os.path.isdir(pat):
599 if os.path.isdir(pat):
604 abspfx = pathutil.canonpath(repo.root, cwd, pat)
600 abspfx = pathutil.canonpath(repo.root, cwd, pat)
605 abspfx = util.localpath(abspfx)
601 abspfx = util.localpath(abspfx)
606 if destdirexists:
602 if destdirexists:
607 striplen = len(os.path.split(abspfx)[0])
603 striplen = len(os.path.split(abspfx)[0])
608 else:
604 else:
609 striplen = len(abspfx)
605 striplen = len(abspfx)
610 if striplen:
606 if striplen:
611 striplen += len(os.sep)
607 striplen += len(os.sep)
612 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
608 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
613 elif destdirexists:
609 elif destdirexists:
614 res = lambda p: os.path.join(dest,
610 res = lambda p: os.path.join(dest,
615 os.path.basename(util.localpath(p)))
611 os.path.basename(util.localpath(p)))
616 else:
612 else:
617 res = lambda p: dest
613 res = lambda p: dest
618 return res
614 return res
619
615
620 # pat: ossep
616 # pat: ossep
621 # dest ossep
617 # dest ossep
622 # srcs: list of (hgsep, hgsep, ossep, bool)
618 # srcs: list of (hgsep, hgsep, ossep, bool)
623 # return: function that takes hgsep and returns ossep
619 # return: function that takes hgsep and returns ossep
624 def targetpathafterfn(pat, dest, srcs):
620 def targetpathafterfn(pat, dest, srcs):
625 if matchmod.patkind(pat):
621 if matchmod.patkind(pat):
626 # a mercurial pattern
622 # a mercurial pattern
627 res = lambda p: os.path.join(dest,
623 res = lambda p: os.path.join(dest,
628 os.path.basename(util.localpath(p)))
624 os.path.basename(util.localpath(p)))
629 else:
625 else:
630 abspfx = pathutil.canonpath(repo.root, cwd, pat)
626 abspfx = pathutil.canonpath(repo.root, cwd, pat)
631 if len(abspfx) < len(srcs[0][0]):
627 if len(abspfx) < len(srcs[0][0]):
632 # A directory. Either the target path contains the last
628 # A directory. Either the target path contains the last
633 # component of the source path or it does not.
629 # component of the source path or it does not.
634 def evalpath(striplen):
630 def evalpath(striplen):
635 score = 0
631 score = 0
636 for s in srcs:
632 for s in srcs:
637 t = os.path.join(dest, util.localpath(s[0])[striplen:])
633 t = os.path.join(dest, util.localpath(s[0])[striplen:])
638 if os.path.lexists(t):
634 if os.path.lexists(t):
639 score += 1
635 score += 1
640 return score
636 return score
641
637
642 abspfx = util.localpath(abspfx)
638 abspfx = util.localpath(abspfx)
643 striplen = len(abspfx)
639 striplen = len(abspfx)
644 if striplen:
640 if striplen:
645 striplen += len(os.sep)
641 striplen += len(os.sep)
646 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
642 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
647 score = evalpath(striplen)
643 score = evalpath(striplen)
648 striplen1 = len(os.path.split(abspfx)[0])
644 striplen1 = len(os.path.split(abspfx)[0])
649 if striplen1:
645 if striplen1:
650 striplen1 += len(os.sep)
646 striplen1 += len(os.sep)
651 if evalpath(striplen1) > score:
647 if evalpath(striplen1) > score:
652 striplen = striplen1
648 striplen = striplen1
653 res = lambda p: os.path.join(dest,
649 res = lambda p: os.path.join(dest,
654 util.localpath(p)[striplen:])
650 util.localpath(p)[striplen:])
655 else:
651 else:
656 # a file
652 # a file
657 if destdirexists:
653 if destdirexists:
658 res = lambda p: os.path.join(dest,
654 res = lambda p: os.path.join(dest,
659 os.path.basename(util.localpath(p)))
655 os.path.basename(util.localpath(p)))
660 else:
656 else:
661 res = lambda p: dest
657 res = lambda p: dest
662 return res
658 return res
663
659
664
660
665 pats = scmutil.expandpats(pats)
661 pats = scmutil.expandpats(pats)
666 if not pats:
662 if not pats:
667 raise util.Abort(_('no source or destination specified'))
663 raise util.Abort(_('no source or destination specified'))
668 if len(pats) == 1:
664 if len(pats) == 1:
669 raise util.Abort(_('no destination specified'))
665 raise util.Abort(_('no destination specified'))
670 dest = pats.pop()
666 dest = pats.pop()
671 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
667 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
672 if not destdirexists:
668 if not destdirexists:
673 if len(pats) > 1 or matchmod.patkind(pats[0]):
669 if len(pats) > 1 or matchmod.patkind(pats[0]):
674 raise util.Abort(_('with multiple sources, destination must be an '
670 raise util.Abort(_('with multiple sources, destination must be an '
675 'existing directory'))
671 'existing directory'))
676 if util.endswithsep(dest):
672 if util.endswithsep(dest):
677 raise util.Abort(_('destination %s is not a directory') % dest)
673 raise util.Abort(_('destination %s is not a directory') % dest)
678
674
679 tfn = targetpathfn
675 tfn = targetpathfn
680 if after:
676 if after:
681 tfn = targetpathafterfn
677 tfn = targetpathafterfn
682 copylist = []
678 copylist = []
683 for pat in pats:
679 for pat in pats:
684 srcs = walkpat(pat)
680 srcs = walkpat(pat)
685 if not srcs:
681 if not srcs:
686 continue
682 continue
687 copylist.append((tfn(pat, dest, srcs), srcs))
683 copylist.append((tfn(pat, dest, srcs), srcs))
688 if not copylist:
684 if not copylist:
689 raise util.Abort(_('no files to copy'))
685 raise util.Abort(_('no files to copy'))
690
686
691 errors = 0
687 errors = 0
692 for targetpath, srcs in copylist:
688 for targetpath, srcs in copylist:
693 for abssrc, relsrc, exact in srcs:
689 for abssrc, relsrc, exact in srcs:
694 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
690 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
695 errors += 1
691 errors += 1
696
692
697 if errors:
693 if errors:
698 ui.warn(_('(consider using --after)\n'))
694 ui.warn(_('(consider using --after)\n'))
699
695
700 return errors != 0
696 return errors != 0
701
697
702 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
698 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
703 runargs=None, appendpid=False):
699 runargs=None, appendpid=False):
704 '''Run a command as a service.'''
700 '''Run a command as a service.'''
705
701
706 def writepid(pid):
702 def writepid(pid):
707 if opts['pid_file']:
703 if opts['pid_file']:
708 if appendpid:
704 if appendpid:
709 mode = 'a'
705 mode = 'a'
710 else:
706 else:
711 mode = 'w'
707 mode = 'w'
712 fp = open(opts['pid_file'], mode)
708 fp = open(opts['pid_file'], mode)
713 fp.write(str(pid) + '\n')
709 fp.write(str(pid) + '\n')
714 fp.close()
710 fp.close()
715
711
716 if opts['daemon'] and not opts['daemon_pipefds']:
712 if opts['daemon'] and not opts['daemon_pipefds']:
717 # Signal child process startup with file removal
713 # Signal child process startup with file removal
718 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
714 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
719 os.close(lockfd)
715 os.close(lockfd)
720 try:
716 try:
721 if not runargs:
717 if not runargs:
722 runargs = util.hgcmd() + sys.argv[1:]
718 runargs = util.hgcmd() + sys.argv[1:]
723 runargs.append('--daemon-pipefds=%s' % lockpath)
719 runargs.append('--daemon-pipefds=%s' % lockpath)
724 # Don't pass --cwd to the child process, because we've already
720 # Don't pass --cwd to the child process, because we've already
725 # changed directory.
721 # changed directory.
726 for i in xrange(1, len(runargs)):
722 for i in xrange(1, len(runargs)):
727 if runargs[i].startswith('--cwd='):
723 if runargs[i].startswith('--cwd='):
728 del runargs[i]
724 del runargs[i]
729 break
725 break
730 elif runargs[i].startswith('--cwd'):
726 elif runargs[i].startswith('--cwd'):
731 del runargs[i:i + 2]
727 del runargs[i:i + 2]
732 break
728 break
733 def condfn():
729 def condfn():
734 return not os.path.exists(lockpath)
730 return not os.path.exists(lockpath)
735 pid = util.rundetached(runargs, condfn)
731 pid = util.rundetached(runargs, condfn)
736 if pid < 0:
732 if pid < 0:
737 raise util.Abort(_('child process failed to start'))
733 raise util.Abort(_('child process failed to start'))
738 writepid(pid)
734 writepid(pid)
739 finally:
735 finally:
740 try:
736 try:
741 os.unlink(lockpath)
737 os.unlink(lockpath)
742 except OSError, e:
738 except OSError, e:
743 if e.errno != errno.ENOENT:
739 if e.errno != errno.ENOENT:
744 raise
740 raise
745 if parentfn:
741 if parentfn:
746 return parentfn(pid)
742 return parentfn(pid)
747 else:
743 else:
748 return
744 return
749
745
750 if initfn:
746 if initfn:
751 initfn()
747 initfn()
752
748
753 if not opts['daemon']:
749 if not opts['daemon']:
754 writepid(os.getpid())
750 writepid(os.getpid())
755
751
756 if opts['daemon_pipefds']:
752 if opts['daemon_pipefds']:
757 lockpath = opts['daemon_pipefds']
753 lockpath = opts['daemon_pipefds']
758 try:
754 try:
759 os.setsid()
755 os.setsid()
760 except AttributeError:
756 except AttributeError:
761 pass
757 pass
762 os.unlink(lockpath)
758 os.unlink(lockpath)
763 util.hidewindow()
759 util.hidewindow()
764 sys.stdout.flush()
760 sys.stdout.flush()
765 sys.stderr.flush()
761 sys.stderr.flush()
766
762
767 nullfd = os.open(os.devnull, os.O_RDWR)
763 nullfd = os.open(os.devnull, os.O_RDWR)
768 logfilefd = nullfd
764 logfilefd = nullfd
769 if logfile:
765 if logfile:
770 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
766 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
771 os.dup2(nullfd, 0)
767 os.dup2(nullfd, 0)
772 os.dup2(logfilefd, 1)
768 os.dup2(logfilefd, 1)
773 os.dup2(logfilefd, 2)
769 os.dup2(logfilefd, 2)
774 if nullfd not in (0, 1, 2):
770 if nullfd not in (0, 1, 2):
775 os.close(nullfd)
771 os.close(nullfd)
776 if logfile and logfilefd not in (0, 1, 2):
772 if logfile and logfilefd not in (0, 1, 2):
777 os.close(logfilefd)
773 os.close(logfilefd)
778
774
779 if runfn:
775 if runfn:
780 return runfn()
776 return runfn()
781
777
782 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
778 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
783 """Utility function used by commands.import to import a single patch
779 """Utility function used by commands.import to import a single patch
784
780
785 This function is explicitly defined here to help the evolve extension to
781 This function is explicitly defined here to help the evolve extension to
786 wrap this part of the import logic.
782 wrap this part of the import logic.
787
783
788 The API is currently a bit ugly because it a simple code translation from
784 The API is currently a bit ugly because it a simple code translation from
789 the import command. Feel free to make it better.
785 the import command. Feel free to make it better.
790
786
791 :hunk: a patch (as a binary string)
787 :hunk: a patch (as a binary string)
792 :parents: nodes that will be parent of the created commit
788 :parents: nodes that will be parent of the created commit
793 :opts: the full dict of option passed to the import command
789 :opts: the full dict of option passed to the import command
794 :msgs: list to save commit message to.
790 :msgs: list to save commit message to.
795 (used in case we need to save it when failing)
791 (used in case we need to save it when failing)
796 :updatefunc: a function that update a repo to a given node
792 :updatefunc: a function that update a repo to a given node
797 updatefunc(<repo>, <node>)
793 updatefunc(<repo>, <node>)
798 """
794 """
799 tmpname, message, user, date, branch, nodeid, p1, p2 = \
795 tmpname, message, user, date, branch, nodeid, p1, p2 = \
800 patch.extract(ui, hunk)
796 patch.extract(ui, hunk)
801
797
802 update = not opts.get('bypass')
798 update = not opts.get('bypass')
803 strip = opts["strip"]
799 strip = opts["strip"]
804 prefix = opts["prefix"]
800 prefix = opts["prefix"]
805 sim = float(opts.get('similarity') or 0)
801 sim = float(opts.get('similarity') or 0)
806 if not tmpname:
802 if not tmpname:
807 return (None, None, False)
803 return (None, None, False)
808 msg = _('applied to working directory')
804 msg = _('applied to working directory')
809
805
810 rejects = False
806 rejects = False
811
807
812 try:
808 try:
813 cmdline_message = logmessage(ui, opts)
809 cmdline_message = logmessage(ui, opts)
814 if cmdline_message:
810 if cmdline_message:
815 # pickup the cmdline msg
811 # pickup the cmdline msg
816 message = cmdline_message
812 message = cmdline_message
817 elif message:
813 elif message:
818 # pickup the patch msg
814 # pickup the patch msg
819 message = message.strip()
815 message = message.strip()
820 else:
816 else:
821 # launch the editor
817 # launch the editor
822 message = None
818 message = None
823 ui.debug('message:\n%s\n' % message)
819 ui.debug('message:\n%s\n' % message)
824
820
825 if len(parents) == 1:
821 if len(parents) == 1:
826 parents.append(repo[nullid])
822 parents.append(repo[nullid])
827 if opts.get('exact'):
823 if opts.get('exact'):
828 if not nodeid or not p1:
824 if not nodeid or not p1:
829 raise util.Abort(_('not a Mercurial patch'))
825 raise util.Abort(_('not a Mercurial patch'))
830 p1 = repo[p1]
826 p1 = repo[p1]
831 p2 = repo[p2 or nullid]
827 p2 = repo[p2 or nullid]
832 elif p2:
828 elif p2:
833 try:
829 try:
834 p1 = repo[p1]
830 p1 = repo[p1]
835 p2 = repo[p2]
831 p2 = repo[p2]
836 # Without any options, consider p2 only if the
832 # Without any options, consider p2 only if the
837 # patch is being applied on top of the recorded
833 # patch is being applied on top of the recorded
838 # first parent.
834 # first parent.
839 if p1 != parents[0]:
835 if p1 != parents[0]:
840 p1 = parents[0]
836 p1 = parents[0]
841 p2 = repo[nullid]
837 p2 = repo[nullid]
842 except error.RepoError:
838 except error.RepoError:
843 p1, p2 = parents
839 p1, p2 = parents
844 if p2.node() == nullid:
840 if p2.node() == nullid:
845 ui.warn(_("warning: import the patch as a normal revision\n"
841 ui.warn(_("warning: import the patch as a normal revision\n"
846 "(use --exact to import the patch as a merge)\n"))
842 "(use --exact to import the patch as a merge)\n"))
847 else:
843 else:
848 p1, p2 = parents
844 p1, p2 = parents
849
845
850 n = None
846 n = None
851 if update:
847 if update:
852 repo.dirstate.beginparentchange()
848 repo.dirstate.beginparentchange()
853 if p1 != parents[0]:
849 if p1 != parents[0]:
854 updatefunc(repo, p1.node())
850 updatefunc(repo, p1.node())
855 if p2 != parents[1]:
851 if p2 != parents[1]:
856 repo.setparents(p1.node(), p2.node())
852 repo.setparents(p1.node(), p2.node())
857
853
858 if opts.get('exact') or opts.get('import_branch'):
854 if opts.get('exact') or opts.get('import_branch'):
859 repo.dirstate.setbranch(branch or 'default')
855 repo.dirstate.setbranch(branch or 'default')
860
856
861 partial = opts.get('partial', False)
857 partial = opts.get('partial', False)
862 files = set()
858 files = set()
863 try:
859 try:
864 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
860 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
865 files=files, eolmode=None, similarity=sim / 100.0)
861 files=files, eolmode=None, similarity=sim / 100.0)
866 except patch.PatchError, e:
862 except patch.PatchError, e:
867 if not partial:
863 if not partial:
868 raise util.Abort(str(e))
864 raise util.Abort(str(e))
869 if partial:
865 if partial:
870 rejects = True
866 rejects = True
871
867
872 files = list(files)
868 files = list(files)
873 if opts.get('no_commit'):
869 if opts.get('no_commit'):
874 if message:
870 if message:
875 msgs.append(message)
871 msgs.append(message)
876 else:
872 else:
877 if opts.get('exact') or p2:
873 if opts.get('exact') or p2:
878 # If you got here, you either use --force and know what
874 # If you got here, you either use --force and know what
879 # you are doing or used --exact or a merge patch while
875 # you are doing or used --exact or a merge patch while
880 # being updated to its first parent.
876 # being updated to its first parent.
881 m = None
877 m = None
882 else:
878 else:
883 m = scmutil.matchfiles(repo, files or [])
879 m = scmutil.matchfiles(repo, files or [])
884 editform = mergeeditform(repo[None], 'import.normal')
880 editform = mergeeditform(repo[None], 'import.normal')
885 if opts.get('exact'):
881 if opts.get('exact'):
886 editor = None
882 editor = None
887 else:
883 else:
888 editor = getcommiteditor(editform=editform, **opts)
884 editor = getcommiteditor(editform=editform, **opts)
889 n = repo.commit(message, opts.get('user') or user,
885 n = repo.commit(message, opts.get('user') or user,
890 opts.get('date') or date, match=m,
886 opts.get('date') or date, match=m,
891 editor=editor, force=partial)
887 editor=editor, force=partial)
892 repo.dirstate.endparentchange()
888 repo.dirstate.endparentchange()
893 else:
889 else:
894 if opts.get('exact') or opts.get('import_branch'):
890 if opts.get('exact') or opts.get('import_branch'):
895 branch = branch or 'default'
891 branch = branch or 'default'
896 else:
892 else:
897 branch = p1.branch()
893 branch = p1.branch()
898 store = patch.filestore()
894 store = patch.filestore()
899 try:
895 try:
900 files = set()
896 files = set()
901 try:
897 try:
902 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
898 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
903 files, eolmode=None)
899 files, eolmode=None)
904 except patch.PatchError, e:
900 except patch.PatchError, e:
905 raise util.Abort(str(e))
901 raise util.Abort(str(e))
906 if opts.get('exact'):
902 if opts.get('exact'):
907 editor = None
903 editor = None
908 else:
904 else:
909 editor = getcommiteditor(editform='import.bypass')
905 editor = getcommiteditor(editform='import.bypass')
910 memctx = context.makememctx(repo, (p1.node(), p2.node()),
906 memctx = context.makememctx(repo, (p1.node(), p2.node()),
911 message,
907 message,
912 opts.get('user') or user,
908 opts.get('user') or user,
913 opts.get('date') or date,
909 opts.get('date') or date,
914 branch, files, store,
910 branch, files, store,
915 editor=editor)
911 editor=editor)
916 n = memctx.commit()
912 n = memctx.commit()
917 finally:
913 finally:
918 store.close()
914 store.close()
919 if opts.get('exact') and opts.get('no_commit'):
915 if opts.get('exact') and opts.get('no_commit'):
920 # --exact with --no-commit is still useful in that it does merge
916 # --exact with --no-commit is still useful in that it does merge
921 # and branch bits
917 # and branch bits
922 ui.warn(_("warning: can't check exact import with --no-commit\n"))
918 ui.warn(_("warning: can't check exact import with --no-commit\n"))
923 elif opts.get('exact') and hex(n) != nodeid:
919 elif opts.get('exact') and hex(n) != nodeid:
924 raise util.Abort(_('patch is damaged or loses information'))
920 raise util.Abort(_('patch is damaged or loses information'))
925 if n:
921 if n:
926 # i18n: refers to a short changeset id
922 # i18n: refers to a short changeset id
927 msg = _('created %s') % short(n)
923 msg = _('created %s') % short(n)
928 return (msg, n, rejects)
924 return (msg, n, rejects)
929 finally:
925 finally:
930 os.unlink(tmpname)
926 os.unlink(tmpname)
931
927
932 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
928 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
933 opts=None):
929 opts=None):
934 '''export changesets as hg patches.'''
930 '''export changesets as hg patches.'''
935
931
936 total = len(revs)
932 total = len(revs)
937 revwidth = max([len(str(rev)) for rev in revs])
933 revwidth = max([len(str(rev)) for rev in revs])
938 filemode = {}
934 filemode = {}
939
935
940 def single(rev, seqno, fp):
936 def single(rev, seqno, fp):
941 ctx = repo[rev]
937 ctx = repo[rev]
942 node = ctx.node()
938 node = ctx.node()
943 parents = [p.node() for p in ctx.parents() if p]
939 parents = [p.node() for p in ctx.parents() if p]
944 branch = ctx.branch()
940 branch = ctx.branch()
945 if switch_parent:
941 if switch_parent:
946 parents.reverse()
942 parents.reverse()
947
943
948 if parents:
944 if parents:
949 prev = parents[0]
945 prev = parents[0]
950 else:
946 else:
951 prev = nullid
947 prev = nullid
952
948
953 shouldclose = False
949 shouldclose = False
954 if not fp and len(template) > 0:
950 if not fp and len(template) > 0:
955 desc_lines = ctx.description().rstrip().split('\n')
951 desc_lines = ctx.description().rstrip().split('\n')
956 desc = desc_lines[0] #Commit always has a first line.
952 desc = desc_lines[0] #Commit always has a first line.
957 fp = makefileobj(repo, template, node, desc=desc, total=total,
953 fp = makefileobj(repo, template, node, desc=desc, total=total,
958 seqno=seqno, revwidth=revwidth, mode='wb',
954 seqno=seqno, revwidth=revwidth, mode='wb',
959 modemap=filemode)
955 modemap=filemode)
960 if fp != template:
956 if fp != template:
961 shouldclose = True
957 shouldclose = True
962 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
958 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
963 repo.ui.note("%s\n" % fp.name)
959 repo.ui.note("%s\n" % fp.name)
964
960
965 if not fp:
961 if not fp:
966 write = repo.ui.write
962 write = repo.ui.write
967 else:
963 else:
968 def write(s, **kw):
964 def write(s, **kw):
969 fp.write(s)
965 fp.write(s)
970
966
971
967
972 write("# HG changeset patch\n")
968 write("# HG changeset patch\n")
973 write("# User %s\n" % ctx.user())
969 write("# User %s\n" % ctx.user())
974 write("# Date %d %d\n" % ctx.date())
970 write("# Date %d %d\n" % ctx.date())
975 write("# %s\n" % util.datestr(ctx.date()))
971 write("# %s\n" % util.datestr(ctx.date()))
976 if branch and branch != 'default':
972 if branch and branch != 'default':
977 write("# Branch %s\n" % branch)
973 write("# Branch %s\n" % branch)
978 write("# Node ID %s\n" % hex(node))
974 write("# Node ID %s\n" % hex(node))
979 write("# Parent %s\n" % hex(prev))
975 write("# Parent %s\n" % hex(prev))
980 if len(parents) > 1:
976 if len(parents) > 1:
981 write("# Parent %s\n" % hex(parents[1]))
977 write("# Parent %s\n" % hex(parents[1]))
982 write(ctx.description().rstrip())
978 write(ctx.description().rstrip())
983 write("\n\n")
979 write("\n\n")
984
980
985 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
981 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
986 write(chunk, label=label)
982 write(chunk, label=label)
987
983
988 if shouldclose:
984 if shouldclose:
989 fp.close()
985 fp.close()
990
986
991 for seqno, rev in enumerate(revs):
987 for seqno, rev in enumerate(revs):
992 single(rev, seqno + 1, fp)
988 single(rev, seqno + 1, fp)
993
989
994 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
990 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
995 changes=None, stat=False, fp=None, prefix='',
991 changes=None, stat=False, fp=None, prefix='',
996 listsubrepos=False):
992 listsubrepos=False):
997 '''show diff or diffstat.'''
993 '''show diff or diffstat.'''
998 if fp is None:
994 if fp is None:
999 write = ui.write
995 write = ui.write
1000 else:
996 else:
1001 def write(s, **kw):
997 def write(s, **kw):
1002 fp.write(s)
998 fp.write(s)
1003
999
1004 if stat:
1000 if stat:
1005 diffopts = diffopts.copy(context=0)
1001 diffopts = diffopts.copy(context=0)
1006 width = 80
1002 width = 80
1007 if not ui.plain():
1003 if not ui.plain():
1008 width = ui.termwidth()
1004 width = ui.termwidth()
1009 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1005 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1010 prefix=prefix)
1006 prefix=prefix)
1011 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1007 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1012 width=width,
1008 width=width,
1013 git=diffopts.git):
1009 git=diffopts.git):
1014 write(chunk, label=label)
1010 write(chunk, label=label)
1015 else:
1011 else:
1016 for chunk, label in patch.diffui(repo, node1, node2, match,
1012 for chunk, label in patch.diffui(repo, node1, node2, match,
1017 changes, diffopts, prefix=prefix):
1013 changes, diffopts, prefix=prefix):
1018 write(chunk, label=label)
1014 write(chunk, label=label)
1019
1015
1020 if listsubrepos:
1016 if listsubrepos:
1021 ctx1 = repo[node1]
1017 ctx1 = repo[node1]
1022 ctx2 = repo[node2]
1018 ctx2 = repo[node2]
1023 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1019 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1024 tempnode2 = node2
1020 tempnode2 = node2
1025 try:
1021 try:
1026 if node2 is not None:
1022 if node2 is not None:
1027 tempnode2 = ctx2.substate[subpath][1]
1023 tempnode2 = ctx2.substate[subpath][1]
1028 except KeyError:
1024 except KeyError:
1029 # A subrepo that existed in node1 was deleted between node1 and
1025 # A subrepo that existed in node1 was deleted between node1 and
1030 # node2 (inclusive). Thus, ctx2's substate won't contain that
1026 # node2 (inclusive). Thus, ctx2's substate won't contain that
1031 # subpath. The best we can do is to ignore it.
1027 # subpath. The best we can do is to ignore it.
1032 tempnode2 = None
1028 tempnode2 = None
1033 submatch = matchmod.narrowmatcher(subpath, match)
1029 submatch = matchmod.narrowmatcher(subpath, match)
1034 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1030 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1035 stat=stat, fp=fp, prefix=prefix)
1031 stat=stat, fp=fp, prefix=prefix)
1036
1032
1037 class changeset_printer(object):
1033 class changeset_printer(object):
1038 '''show changeset information when templating not requested.'''
1034 '''show changeset information when templating not requested.'''
1039
1035
1040 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1036 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1041 self.ui = ui
1037 self.ui = ui
1042 self.repo = repo
1038 self.repo = repo
1043 self.buffered = buffered
1039 self.buffered = buffered
1044 self.matchfn = matchfn
1040 self.matchfn = matchfn
1045 self.diffopts = diffopts
1041 self.diffopts = diffopts
1046 self.header = {}
1042 self.header = {}
1047 self.hunk = {}
1043 self.hunk = {}
1048 self.lastheader = None
1044 self.lastheader = None
1049 self.footer = None
1045 self.footer = None
1050
1046
1051 def flush(self, rev):
1047 def flush(self, rev):
1052 if rev in self.header:
1048 if rev in self.header:
1053 h = self.header[rev]
1049 h = self.header[rev]
1054 if h != self.lastheader:
1050 if h != self.lastheader:
1055 self.lastheader = h
1051 self.lastheader = h
1056 self.ui.write(h)
1052 self.ui.write(h)
1057 del self.header[rev]
1053 del self.header[rev]
1058 if rev in self.hunk:
1054 if rev in self.hunk:
1059 self.ui.write(self.hunk[rev])
1055 self.ui.write(self.hunk[rev])
1060 del self.hunk[rev]
1056 del self.hunk[rev]
1061 return 1
1057 return 1
1062 return 0
1058 return 0
1063
1059
1064 def close(self):
1060 def close(self):
1065 if self.footer:
1061 if self.footer:
1066 self.ui.write(self.footer)
1062 self.ui.write(self.footer)
1067
1063
1068 def show(self, ctx, copies=None, matchfn=None, **props):
1064 def show(self, ctx, copies=None, matchfn=None, **props):
1069 if self.buffered:
1065 if self.buffered:
1070 self.ui.pushbuffer()
1066 self.ui.pushbuffer()
1071 self._show(ctx, copies, matchfn, props)
1067 self._show(ctx, copies, matchfn, props)
1072 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1068 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1073 else:
1069 else:
1074 self._show(ctx, copies, matchfn, props)
1070 self._show(ctx, copies, matchfn, props)
1075
1071
1076 def _show(self, ctx, copies, matchfn, props):
1072 def _show(self, ctx, copies, matchfn, props):
1077 '''show a single changeset or file revision'''
1073 '''show a single changeset or file revision'''
1078 changenode = ctx.node()
1074 changenode = ctx.node()
1079 rev = ctx.rev()
1075 rev = ctx.rev()
1080
1076
1081 if self.ui.quiet:
1077 if self.ui.quiet:
1082 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1078 self.ui.write("%d:%s\n" % (rev, short(changenode)),
1083 label='log.node')
1079 label='log.node')
1084 return
1080 return
1085
1081
1086 log = self.repo.changelog
1082 log = self.repo.changelog
1087 date = util.datestr(ctx.date())
1083 date = util.datestr(ctx.date())
1088
1084
1089 if self.ui.debugflag:
1085 if self.ui.debugflag:
1090 hexfunc = hex
1086 hexfunc = hex
1091 else:
1087 else:
1092 hexfunc = short
1088 hexfunc = short
1093
1089
1094 parents = [(p, hexfunc(log.node(p)))
1090 parents = [(p, hexfunc(log.node(p)))
1095 for p in self._meaningful_parentrevs(log, rev)]
1091 for p in self._meaningful_parentrevs(log, rev)]
1096
1092
1097 # i18n: column positioning for "hg log"
1093 # i18n: column positioning for "hg log"
1098 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1094 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)),
1099 label='log.changeset changeset.%s' % ctx.phasestr())
1095 label='log.changeset changeset.%s' % ctx.phasestr())
1100
1096
1101 # branches are shown first before any other names due to backwards
1097 # branches are shown first before any other names due to backwards
1102 # compatibility
1098 # compatibility
1103 branch = ctx.branch()
1099 branch = ctx.branch()
1104 # don't show the default branch name
1100 # don't show the default branch name
1105 if branch != 'default':
1101 if branch != 'default':
1106 # i18n: column positioning for "hg log"
1102 # i18n: column positioning for "hg log"
1107 self.ui.write(_("branch: %s\n") % branch,
1103 self.ui.write(_("branch: %s\n") % branch,
1108 label='log.branch')
1104 label='log.branch')
1109
1105
1110 for name, ns in self.repo.names.iteritems():
1106 for name, ns in self.repo.names.iteritems():
1111 # branches has special logic already handled above, so here we just
1107 # branches has special logic already handled above, so here we just
1112 # skip it
1108 # skip it
1113 if name == 'branches':
1109 if name == 'branches':
1114 continue
1110 continue
1115 # we will use the templatename as the color name since those two
1111 # we will use the templatename as the color name since those two
1116 # should be the same
1112 # should be the same
1117 for name in ns.names(self.repo, changenode):
1113 for name in ns.names(self.repo, changenode):
1118 self.ui.write(ns.logfmt % name,
1114 self.ui.write(ns.logfmt % name,
1119 label='log.%s' % ns.colorname)
1115 label='log.%s' % ns.colorname)
1120 if self.ui.debugflag:
1116 if self.ui.debugflag:
1121 # i18n: column positioning for "hg log"
1117 # i18n: column positioning for "hg log"
1122 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1118 self.ui.write(_("phase: %s\n") % _(ctx.phasestr()),
1123 label='log.phase')
1119 label='log.phase')
1124 for parent in parents:
1120 for parent in parents:
1125 label = 'log.parent changeset.%s' % self.repo[parent[0]].phasestr()
1121 label = 'log.parent changeset.%s' % self.repo[parent[0]].phasestr()
1126 # i18n: column positioning for "hg log"
1122 # i18n: column positioning for "hg log"
1127 self.ui.write(_("parent: %d:%s\n") % parent,
1123 self.ui.write(_("parent: %d:%s\n") % parent,
1128 label=label)
1124 label=label)
1129
1125
1130 if self.ui.debugflag:
1126 if self.ui.debugflag:
1131 mnode = ctx.manifestnode()
1127 mnode = ctx.manifestnode()
1132 # i18n: column positioning for "hg log"
1128 # i18n: column positioning for "hg log"
1133 self.ui.write(_("manifest: %d:%s\n") %
1129 self.ui.write(_("manifest: %d:%s\n") %
1134 (self.repo.manifest.rev(mnode), hex(mnode)),
1130 (self.repo.manifest.rev(mnode), hex(mnode)),
1135 label='ui.debug log.manifest')
1131 label='ui.debug log.manifest')
1136 # i18n: column positioning for "hg log"
1132 # i18n: column positioning for "hg log"
1137 self.ui.write(_("user: %s\n") % ctx.user(),
1133 self.ui.write(_("user: %s\n") % ctx.user(),
1138 label='log.user')
1134 label='log.user')
1139 # i18n: column positioning for "hg log"
1135 # i18n: column positioning for "hg log"
1140 self.ui.write(_("date: %s\n") % date,
1136 self.ui.write(_("date: %s\n") % date,
1141 label='log.date')
1137 label='log.date')
1142
1138
1143 if self.ui.debugflag:
1139 if self.ui.debugflag:
1144 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
1140 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
1145 for key, value in zip([# i18n: column positioning for "hg log"
1141 for key, value in zip([# i18n: column positioning for "hg log"
1146 _("files:"),
1142 _("files:"),
1147 # i18n: column positioning for "hg log"
1143 # i18n: column positioning for "hg log"
1148 _("files+:"),
1144 _("files+:"),
1149 # i18n: column positioning for "hg log"
1145 # i18n: column positioning for "hg log"
1150 _("files-:")], files):
1146 _("files-:")], files):
1151 if value:
1147 if value:
1152 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1148 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1153 label='ui.debug log.files')
1149 label='ui.debug log.files')
1154 elif ctx.files() and self.ui.verbose:
1150 elif ctx.files() and self.ui.verbose:
1155 # i18n: column positioning for "hg log"
1151 # i18n: column positioning for "hg log"
1156 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1152 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1157 label='ui.note log.files')
1153 label='ui.note log.files')
1158 if copies and self.ui.verbose:
1154 if copies and self.ui.verbose:
1159 copies = ['%s (%s)' % c for c in copies]
1155 copies = ['%s (%s)' % c for c in copies]
1160 # i18n: column positioning for "hg log"
1156 # i18n: column positioning for "hg log"
1161 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1157 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1162 label='ui.note log.copies')
1158 label='ui.note log.copies')
1163
1159
1164 extra = ctx.extra()
1160 extra = ctx.extra()
1165 if extra and self.ui.debugflag:
1161 if extra and self.ui.debugflag:
1166 for key, value in sorted(extra.items()):
1162 for key, value in sorted(extra.items()):
1167 # i18n: column positioning for "hg log"
1163 # i18n: column positioning for "hg log"
1168 self.ui.write(_("extra: %s=%s\n")
1164 self.ui.write(_("extra: %s=%s\n")
1169 % (key, value.encode('string_escape')),
1165 % (key, value.encode('string_escape')),
1170 label='ui.debug log.extra')
1166 label='ui.debug log.extra')
1171
1167
1172 description = ctx.description().strip()
1168 description = ctx.description().strip()
1173 if description:
1169 if description:
1174 if self.ui.verbose:
1170 if self.ui.verbose:
1175 self.ui.write(_("description:\n"),
1171 self.ui.write(_("description:\n"),
1176 label='ui.note log.description')
1172 label='ui.note log.description')
1177 self.ui.write(description,
1173 self.ui.write(description,
1178 label='ui.note log.description')
1174 label='ui.note log.description')
1179 self.ui.write("\n\n")
1175 self.ui.write("\n\n")
1180 else:
1176 else:
1181 # i18n: column positioning for "hg log"
1177 # i18n: column positioning for "hg log"
1182 self.ui.write(_("summary: %s\n") %
1178 self.ui.write(_("summary: %s\n") %
1183 description.splitlines()[0],
1179 description.splitlines()[0],
1184 label='log.summary')
1180 label='log.summary')
1185 self.ui.write("\n")
1181 self.ui.write("\n")
1186
1182
1187 self.showpatch(changenode, matchfn)
1183 self.showpatch(changenode, matchfn)
1188
1184
1189 def showpatch(self, node, matchfn):
1185 def showpatch(self, node, matchfn):
1190 if not matchfn:
1186 if not matchfn:
1191 matchfn = self.matchfn
1187 matchfn = self.matchfn
1192 if matchfn:
1188 if matchfn:
1193 stat = self.diffopts.get('stat')
1189 stat = self.diffopts.get('stat')
1194 diff = self.diffopts.get('patch')
1190 diff = self.diffopts.get('patch')
1195 diffopts = patch.diffallopts(self.ui, self.diffopts)
1191 diffopts = patch.diffallopts(self.ui, self.diffopts)
1196 prev = self.repo.changelog.parents(node)[0]
1192 prev = self.repo.changelog.parents(node)[0]
1197 if stat:
1193 if stat:
1198 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1194 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1199 match=matchfn, stat=True)
1195 match=matchfn, stat=True)
1200 if diff:
1196 if diff:
1201 if stat:
1197 if stat:
1202 self.ui.write("\n")
1198 self.ui.write("\n")
1203 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1199 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1204 match=matchfn, stat=False)
1200 match=matchfn, stat=False)
1205 self.ui.write("\n")
1201 self.ui.write("\n")
1206
1202
1207 def _meaningful_parentrevs(self, log, rev):
1203 def _meaningful_parentrevs(self, log, rev):
1208 """Return list of meaningful (or all if debug) parentrevs for rev.
1204 """Return list of meaningful (or all if debug) parentrevs for rev.
1209
1205
1210 For merges (two non-nullrev revisions) both parents are meaningful.
1206 For merges (two non-nullrev revisions) both parents are meaningful.
1211 Otherwise the first parent revision is considered meaningful if it
1207 Otherwise the first parent revision is considered meaningful if it
1212 is not the preceding revision.
1208 is not the preceding revision.
1213 """
1209 """
1214 parents = log.parentrevs(rev)
1210 parents = log.parentrevs(rev)
1215 if not self.ui.debugflag and parents[1] == nullrev:
1211 if not self.ui.debugflag and parents[1] == nullrev:
1216 if parents[0] >= rev - 1:
1212 if parents[0] >= rev - 1:
1217 parents = []
1213 parents = []
1218 else:
1214 else:
1219 parents = [parents[0]]
1215 parents = [parents[0]]
1220 return parents
1216 return parents
1221
1217
1222 class jsonchangeset(changeset_printer):
1218 class jsonchangeset(changeset_printer):
1223 '''format changeset information.'''
1219 '''format changeset information.'''
1224
1220
1225 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1221 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1226 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1222 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1227 self.cache = {}
1223 self.cache = {}
1228 self._first = True
1224 self._first = True
1229
1225
1230 def close(self):
1226 def close(self):
1231 if not self._first:
1227 if not self._first:
1232 self.ui.write("\n]\n")
1228 self.ui.write("\n]\n")
1233 else:
1229 else:
1234 self.ui.write("[]\n")
1230 self.ui.write("[]\n")
1235
1231
1236 def _show(self, ctx, copies, matchfn, props):
1232 def _show(self, ctx, copies, matchfn, props):
1237 '''show a single changeset or file revision'''
1233 '''show a single changeset or file revision'''
1238 hexnode = hex(ctx.node())
1234 hexnode = hex(ctx.node())
1239 rev = ctx.rev()
1235 rev = ctx.rev()
1240 j = encoding.jsonescape
1236 j = encoding.jsonescape
1241
1237
1242 if self._first:
1238 if self._first:
1243 self.ui.write("[\n {")
1239 self.ui.write("[\n {")
1244 self._first = False
1240 self._first = False
1245 else:
1241 else:
1246 self.ui.write(",\n {")
1242 self.ui.write(",\n {")
1247
1243
1248 if self.ui.quiet:
1244 if self.ui.quiet:
1249 self.ui.write('\n "rev": %d' % rev)
1245 self.ui.write('\n "rev": %d' % rev)
1250 self.ui.write(',\n "node": "%s"' % hexnode)
1246 self.ui.write(',\n "node": "%s"' % hexnode)
1251 self.ui.write('\n }')
1247 self.ui.write('\n }')
1252 return
1248 return
1253
1249
1254 self.ui.write('\n "rev": %d' % rev)
1250 self.ui.write('\n "rev": %d' % rev)
1255 self.ui.write(',\n "node": "%s"' % hexnode)
1251 self.ui.write(',\n "node": "%s"' % hexnode)
1256 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1252 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1257 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1253 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1258 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1254 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1259 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1255 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1260 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1256 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1261
1257
1262 self.ui.write(',\n "bookmarks": [%s]' %
1258 self.ui.write(',\n "bookmarks": [%s]' %
1263 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1259 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1264 self.ui.write(',\n "tags": [%s]' %
1260 self.ui.write(',\n "tags": [%s]' %
1265 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1261 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1266 self.ui.write(',\n "parents": [%s]' %
1262 self.ui.write(',\n "parents": [%s]' %
1267 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1263 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1268
1264
1269 if self.ui.debugflag:
1265 if self.ui.debugflag:
1270 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1266 self.ui.write(',\n "manifest": "%s"' % hex(ctx.manifestnode()))
1271
1267
1272 self.ui.write(',\n "extra": {%s}' %
1268 self.ui.write(',\n "extra": {%s}' %
1273 ", ".join('"%s": "%s"' % (j(k), j(v))
1269 ", ".join('"%s": "%s"' % (j(k), j(v))
1274 for k, v in ctx.extra().items()))
1270 for k, v in ctx.extra().items()))
1275
1271
1276 files = ctx.p1().status(ctx)
1272 files = ctx.p1().status(ctx)
1277 self.ui.write(',\n "modified": [%s]' %
1273 self.ui.write(',\n "modified": [%s]' %
1278 ", ".join('"%s"' % j(f) for f in files[0]))
1274 ", ".join('"%s"' % j(f) for f in files[0]))
1279 self.ui.write(',\n "added": [%s]' %
1275 self.ui.write(',\n "added": [%s]' %
1280 ", ".join('"%s"' % j(f) for f in files[1]))
1276 ", ".join('"%s"' % j(f) for f in files[1]))
1281 self.ui.write(',\n "removed": [%s]' %
1277 self.ui.write(',\n "removed": [%s]' %
1282 ", ".join('"%s"' % j(f) for f in files[2]))
1278 ", ".join('"%s"' % j(f) for f in files[2]))
1283
1279
1284 elif self.ui.verbose:
1280 elif self.ui.verbose:
1285 self.ui.write(',\n "files": [%s]' %
1281 self.ui.write(',\n "files": [%s]' %
1286 ", ".join('"%s"' % j(f) for f in ctx.files()))
1282 ", ".join('"%s"' % j(f) for f in ctx.files()))
1287
1283
1288 if copies:
1284 if copies:
1289 self.ui.write(',\n "copies": {%s}' %
1285 self.ui.write(',\n "copies": {%s}' %
1290 ", ".join('"%s": "%s"' % (j(k), j(v))
1286 ", ".join('"%s": "%s"' % (j(k), j(v))
1291 for k, v in copies))
1287 for k, v in copies))
1292
1288
1293 matchfn = self.matchfn
1289 matchfn = self.matchfn
1294 if matchfn:
1290 if matchfn:
1295 stat = self.diffopts.get('stat')
1291 stat = self.diffopts.get('stat')
1296 diff = self.diffopts.get('patch')
1292 diff = self.diffopts.get('patch')
1297 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1293 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1298 node, prev = ctx.node(), ctx.p1().node()
1294 node, prev = ctx.node(), ctx.p1().node()
1299 if stat:
1295 if stat:
1300 self.ui.pushbuffer()
1296 self.ui.pushbuffer()
1301 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1297 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1302 match=matchfn, stat=True)
1298 match=matchfn, stat=True)
1303 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1299 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1304 if diff:
1300 if diff:
1305 self.ui.pushbuffer()
1301 self.ui.pushbuffer()
1306 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1302 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1307 match=matchfn, stat=False)
1303 match=matchfn, stat=False)
1308 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1304 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1309
1305
1310 self.ui.write("\n }")
1306 self.ui.write("\n }")
1311
1307
1312 class changeset_templater(changeset_printer):
1308 class changeset_templater(changeset_printer):
1313 '''format changeset information.'''
1309 '''format changeset information.'''
1314
1310
1315 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1311 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1316 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1312 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1317 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1313 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1318 defaulttempl = {
1314 defaulttempl = {
1319 'parent': '{rev}:{node|formatnode} ',
1315 'parent': '{rev}:{node|formatnode} ',
1320 'manifest': '{rev}:{node|formatnode}',
1316 'manifest': '{rev}:{node|formatnode}',
1321 'file_copy': '{name} ({source})',
1317 'file_copy': '{name} ({source})',
1322 'extra': '{key}={value|stringescape}'
1318 'extra': '{key}={value|stringescape}'
1323 }
1319 }
1324 # filecopy is preserved for compatibility reasons
1320 # filecopy is preserved for compatibility reasons
1325 defaulttempl['filecopy'] = defaulttempl['file_copy']
1321 defaulttempl['filecopy'] = defaulttempl['file_copy']
1326 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1322 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1327 cache=defaulttempl)
1323 cache=defaulttempl)
1328 if tmpl:
1324 if tmpl:
1329 self.t.cache['changeset'] = tmpl
1325 self.t.cache['changeset'] = tmpl
1330
1326
1331 self.cache = {}
1327 self.cache = {}
1332
1328
1333 def _meaningful_parentrevs(self, ctx):
1329 def _meaningful_parentrevs(self, ctx):
1334 """Return list of meaningful (or all if debug) parentrevs for rev.
1330 """Return list of meaningful (or all if debug) parentrevs for rev.
1335 """
1331 """
1336 parents = ctx.parents()
1332 parents = ctx.parents()
1337 if len(parents) > 1:
1333 if len(parents) > 1:
1338 return parents
1334 return parents
1339 if self.ui.debugflag:
1335 if self.ui.debugflag:
1340 return [parents[0], self.repo['null']]
1336 return [parents[0], self.repo['null']]
1341 if parents[0].rev() >= ctx.rev() - 1:
1337 if parents[0].rev() >= ctx.rev() - 1:
1342 return []
1338 return []
1343 return parents
1339 return parents
1344
1340
1345 def _show(self, ctx, copies, matchfn, props):
1341 def _show(self, ctx, copies, matchfn, props):
1346 '''show a single changeset or file revision'''
1342 '''show a single changeset or file revision'''
1347
1343
1348 showlist = templatekw.showlist
1344 showlist = templatekw.showlist
1349
1345
1350 # showparents() behaviour depends on ui trace level which
1346 # showparents() behaviour depends on ui trace level which
1351 # causes unexpected behaviours at templating level and makes
1347 # causes unexpected behaviours at templating level and makes
1352 # it harder to extract it in a standalone function. Its
1348 # it harder to extract it in a standalone function. Its
1353 # behaviour cannot be changed so leave it here for now.
1349 # behaviour cannot be changed so leave it here for now.
1354 def showparents(**args):
1350 def showparents(**args):
1355 ctx = args['ctx']
1351 ctx = args['ctx']
1356 parents = [[('rev', p.rev()),
1352 parents = [[('rev', p.rev()),
1357 ('node', p.hex()),
1353 ('node', p.hex()),
1358 ('phase', p.phasestr())]
1354 ('phase', p.phasestr())]
1359 for p in self._meaningful_parentrevs(ctx)]
1355 for p in self._meaningful_parentrevs(ctx)]
1360 return showlist('parent', parents, **args)
1356 return showlist('parent', parents, **args)
1361
1357
1362 props = props.copy()
1358 props = props.copy()
1363 props.update(templatekw.keywords)
1359 props.update(templatekw.keywords)
1364 props['parents'] = showparents
1360 props['parents'] = showparents
1365 props['templ'] = self.t
1361 props['templ'] = self.t
1366 props['ctx'] = ctx
1362 props['ctx'] = ctx
1367 props['repo'] = self.repo
1363 props['repo'] = self.repo
1368 props['revcache'] = {'copies': copies}
1364 props['revcache'] = {'copies': copies}
1369 props['cache'] = self.cache
1365 props['cache'] = self.cache
1370
1366
1371 # find correct templates for current mode
1367 # find correct templates for current mode
1372
1368
1373 tmplmodes = [
1369 tmplmodes = [
1374 (True, None),
1370 (True, None),
1375 (self.ui.verbose, 'verbose'),
1371 (self.ui.verbose, 'verbose'),
1376 (self.ui.quiet, 'quiet'),
1372 (self.ui.quiet, 'quiet'),
1377 (self.ui.debugflag, 'debug'),
1373 (self.ui.debugflag, 'debug'),
1378 ]
1374 ]
1379
1375
1380 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1376 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1381 for mode, postfix in tmplmodes:
1377 for mode, postfix in tmplmodes:
1382 for type in types:
1378 for type in types:
1383 cur = postfix and ('%s_%s' % (type, postfix)) or type
1379 cur = postfix and ('%s_%s' % (type, postfix)) or type
1384 if mode and cur in self.t:
1380 if mode and cur in self.t:
1385 types[type] = cur
1381 types[type] = cur
1386
1382
1387 try:
1383 try:
1388
1384
1389 # write header
1385 # write header
1390 if types['header']:
1386 if types['header']:
1391 h = templater.stringify(self.t(types['header'], **props))
1387 h = templater.stringify(self.t(types['header'], **props))
1392 if self.buffered:
1388 if self.buffered:
1393 self.header[ctx.rev()] = h
1389 self.header[ctx.rev()] = h
1394 else:
1390 else:
1395 if self.lastheader != h:
1391 if self.lastheader != h:
1396 self.lastheader = h
1392 self.lastheader = h
1397 self.ui.write(h)
1393 self.ui.write(h)
1398
1394
1399 # write changeset metadata, then patch if requested
1395 # write changeset metadata, then patch if requested
1400 key = types['changeset']
1396 key = types['changeset']
1401 self.ui.write(templater.stringify(self.t(key, **props)))
1397 self.ui.write(templater.stringify(self.t(key, **props)))
1402 self.showpatch(ctx.node(), matchfn)
1398 self.showpatch(ctx.node(), matchfn)
1403
1399
1404 if types['footer']:
1400 if types['footer']:
1405 if not self.footer:
1401 if not self.footer:
1406 self.footer = templater.stringify(self.t(types['footer'],
1402 self.footer = templater.stringify(self.t(types['footer'],
1407 **props))
1403 **props))
1408
1404
1409 except KeyError, inst:
1405 except KeyError, inst:
1410 msg = _("%s: no key named '%s'")
1406 msg = _("%s: no key named '%s'")
1411 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1407 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1412 except SyntaxError, inst:
1408 except SyntaxError, inst:
1413 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1409 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1414
1410
1415 def gettemplate(ui, tmpl, style):
1411 def gettemplate(ui, tmpl, style):
1416 """
1412 """
1417 Find the template matching the given template spec or style.
1413 Find the template matching the given template spec or style.
1418 """
1414 """
1419
1415
1420 # ui settings
1416 # ui settings
1421 if not tmpl and not style: # template are stronger than style
1417 if not tmpl and not style: # template are stronger than style
1422 tmpl = ui.config('ui', 'logtemplate')
1418 tmpl = ui.config('ui', 'logtemplate')
1423 if tmpl:
1419 if tmpl:
1424 try:
1420 try:
1425 tmpl = templater.parsestring(tmpl)
1421 tmpl = templater.parsestring(tmpl)
1426 except SyntaxError:
1422 except SyntaxError:
1427 tmpl = templater.parsestring(tmpl, quoted=False)
1423 tmpl = templater.parsestring(tmpl, quoted=False)
1428 return tmpl, None
1424 return tmpl, None
1429 else:
1425 else:
1430 style = util.expandpath(ui.config('ui', 'style', ''))
1426 style = util.expandpath(ui.config('ui', 'style', ''))
1431
1427
1432 if not tmpl and style:
1428 if not tmpl and style:
1433 mapfile = style
1429 mapfile = style
1434 if not os.path.split(mapfile)[0]:
1430 if not os.path.split(mapfile)[0]:
1435 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1431 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1436 or templater.templatepath(mapfile))
1432 or templater.templatepath(mapfile))
1437 if mapname:
1433 if mapname:
1438 mapfile = mapname
1434 mapfile = mapname
1439 return None, mapfile
1435 return None, mapfile
1440
1436
1441 if not tmpl:
1437 if not tmpl:
1442 return None, None
1438 return None, None
1443
1439
1444 # looks like a literal template?
1440 # looks like a literal template?
1445 if '{' in tmpl:
1441 if '{' in tmpl:
1446 return tmpl, None
1442 return tmpl, None
1447
1443
1448 # perhaps a stock style?
1444 # perhaps a stock style?
1449 if not os.path.split(tmpl)[0]:
1445 if not os.path.split(tmpl)[0]:
1450 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1446 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1451 or templater.templatepath(tmpl))
1447 or templater.templatepath(tmpl))
1452 if mapname and os.path.isfile(mapname):
1448 if mapname and os.path.isfile(mapname):
1453 return None, mapname
1449 return None, mapname
1454
1450
1455 # perhaps it's a reference to [templates]
1451 # perhaps it's a reference to [templates]
1456 t = ui.config('templates', tmpl)
1452 t = ui.config('templates', tmpl)
1457 if t:
1453 if t:
1458 try:
1454 try:
1459 tmpl = templater.parsestring(t)
1455 tmpl = templater.parsestring(t)
1460 except SyntaxError:
1456 except SyntaxError:
1461 tmpl = templater.parsestring(t, quoted=False)
1457 tmpl = templater.parsestring(t, quoted=False)
1462 return tmpl, None
1458 return tmpl, None
1463
1459
1464 if tmpl == 'list':
1460 if tmpl == 'list':
1465 ui.write(_("available styles: %s\n") % templater.stylelist())
1461 ui.write(_("available styles: %s\n") % templater.stylelist())
1466 raise util.Abort(_("specify a template"))
1462 raise util.Abort(_("specify a template"))
1467
1463
1468 # perhaps it's a path to a map or a template
1464 # perhaps it's a path to a map or a template
1469 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1465 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1470 # is it a mapfile for a style?
1466 # is it a mapfile for a style?
1471 if os.path.basename(tmpl).startswith("map-"):
1467 if os.path.basename(tmpl).startswith("map-"):
1472 return None, os.path.realpath(tmpl)
1468 return None, os.path.realpath(tmpl)
1473 tmpl = open(tmpl).read()
1469 tmpl = open(tmpl).read()
1474 return tmpl, None
1470 return tmpl, None
1475
1471
1476 # constant string?
1472 # constant string?
1477 return tmpl, None
1473 return tmpl, None
1478
1474
1479 def show_changeset(ui, repo, opts, buffered=False):
1475 def show_changeset(ui, repo, opts, buffered=False):
1480 """show one changeset using template or regular display.
1476 """show one changeset using template or regular display.
1481
1477
1482 Display format will be the first non-empty hit of:
1478 Display format will be the first non-empty hit of:
1483 1. option 'template'
1479 1. option 'template'
1484 2. option 'style'
1480 2. option 'style'
1485 3. [ui] setting 'logtemplate'
1481 3. [ui] setting 'logtemplate'
1486 4. [ui] setting 'style'
1482 4. [ui] setting 'style'
1487 If all of these values are either the unset or the empty string,
1483 If all of these values are either the unset or the empty string,
1488 regular display via changeset_printer() is done.
1484 regular display via changeset_printer() is done.
1489 """
1485 """
1490 # options
1486 # options
1491 matchfn = None
1487 matchfn = None
1492 if opts.get('patch') or opts.get('stat'):
1488 if opts.get('patch') or opts.get('stat'):
1493 matchfn = scmutil.matchall(repo)
1489 matchfn = scmutil.matchall(repo)
1494
1490
1495 if opts.get('template') == 'json':
1491 if opts.get('template') == 'json':
1496 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1492 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1497
1493
1498 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1494 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1499
1495
1500 if not tmpl and not mapfile:
1496 if not tmpl and not mapfile:
1501 return changeset_printer(ui, repo, matchfn, opts, buffered)
1497 return changeset_printer(ui, repo, matchfn, opts, buffered)
1502
1498
1503 try:
1499 try:
1504 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1500 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1505 buffered)
1501 buffered)
1506 except SyntaxError, inst:
1502 except SyntaxError, inst:
1507 raise util.Abort(inst.args[0])
1503 raise util.Abort(inst.args[0])
1508 return t
1504 return t
1509
1505
1510 def showmarker(ui, marker):
1506 def showmarker(ui, marker):
1511 """utility function to display obsolescence marker in a readable way
1507 """utility function to display obsolescence marker in a readable way
1512
1508
1513 To be used by debug function."""
1509 To be used by debug function."""
1514 ui.write(hex(marker.precnode()))
1510 ui.write(hex(marker.precnode()))
1515 for repl in marker.succnodes():
1511 for repl in marker.succnodes():
1516 ui.write(' ')
1512 ui.write(' ')
1517 ui.write(hex(repl))
1513 ui.write(hex(repl))
1518 ui.write(' %X ' % marker.flags())
1514 ui.write(' %X ' % marker.flags())
1519 parents = marker.parentnodes()
1515 parents = marker.parentnodes()
1520 if parents is not None:
1516 if parents is not None:
1521 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1517 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1522 ui.write('(%s) ' % util.datestr(marker.date()))
1518 ui.write('(%s) ' % util.datestr(marker.date()))
1523 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1519 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1524 sorted(marker.metadata().items())
1520 sorted(marker.metadata().items())
1525 if t[0] != 'date')))
1521 if t[0] != 'date')))
1526 ui.write('\n')
1522 ui.write('\n')
1527
1523
1528 def finddate(ui, repo, date):
1524 def finddate(ui, repo, date):
1529 """Find the tipmost changeset that matches the given date spec"""
1525 """Find the tipmost changeset that matches the given date spec"""
1530
1526
1531 df = util.matchdate(date)
1527 df = util.matchdate(date)
1532 m = scmutil.matchall(repo)
1528 m = scmutil.matchall(repo)
1533 results = {}
1529 results = {}
1534
1530
1535 def prep(ctx, fns):
1531 def prep(ctx, fns):
1536 d = ctx.date()
1532 d = ctx.date()
1537 if df(d[0]):
1533 if df(d[0]):
1538 results[ctx.rev()] = d
1534 results[ctx.rev()] = d
1539
1535
1540 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1536 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1541 rev = ctx.rev()
1537 rev = ctx.rev()
1542 if rev in results:
1538 if rev in results:
1543 ui.status(_("found revision %s from %s\n") %
1539 ui.status(_("found revision %s from %s\n") %
1544 (rev, util.datestr(results[rev])))
1540 (rev, util.datestr(results[rev])))
1545 return str(rev)
1541 return str(rev)
1546
1542
1547 raise util.Abort(_("revision matching date not found"))
1543 raise util.Abort(_("revision matching date not found"))
1548
1544
1549 def increasingwindows(windowsize=8, sizelimit=512):
1545 def increasingwindows(windowsize=8, sizelimit=512):
1550 while True:
1546 while True:
1551 yield windowsize
1547 yield windowsize
1552 if windowsize < sizelimit:
1548 if windowsize < sizelimit:
1553 windowsize *= 2
1549 windowsize *= 2
1554
1550
1555 class FileWalkError(Exception):
1551 class FileWalkError(Exception):
1556 pass
1552 pass
1557
1553
1558 def walkfilerevs(repo, match, follow, revs, fncache):
1554 def walkfilerevs(repo, match, follow, revs, fncache):
1559 '''Walks the file history for the matched files.
1555 '''Walks the file history for the matched files.
1560
1556
1561 Returns the changeset revs that are involved in the file history.
1557 Returns the changeset revs that are involved in the file history.
1562
1558
1563 Throws FileWalkError if the file history can't be walked using
1559 Throws FileWalkError if the file history can't be walked using
1564 filelogs alone.
1560 filelogs alone.
1565 '''
1561 '''
1566 wanted = set()
1562 wanted = set()
1567 copies = []
1563 copies = []
1568 minrev, maxrev = min(revs), max(revs)
1564 minrev, maxrev = min(revs), max(revs)
1569 def filerevgen(filelog, last):
1565 def filerevgen(filelog, last):
1570 """
1566 """
1571 Only files, no patterns. Check the history of each file.
1567 Only files, no patterns. Check the history of each file.
1572
1568
1573 Examines filelog entries within minrev, maxrev linkrev range
1569 Examines filelog entries within minrev, maxrev linkrev range
1574 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1570 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1575 tuples in backwards order
1571 tuples in backwards order
1576 """
1572 """
1577 cl_count = len(repo)
1573 cl_count = len(repo)
1578 revs = []
1574 revs = []
1579 for j in xrange(0, last + 1):
1575 for j in xrange(0, last + 1):
1580 linkrev = filelog.linkrev(j)
1576 linkrev = filelog.linkrev(j)
1581 if linkrev < minrev:
1577 if linkrev < minrev:
1582 continue
1578 continue
1583 # only yield rev for which we have the changelog, it can
1579 # only yield rev for which we have the changelog, it can
1584 # happen while doing "hg log" during a pull or commit
1580 # happen while doing "hg log" during a pull or commit
1585 if linkrev >= cl_count:
1581 if linkrev >= cl_count:
1586 break
1582 break
1587
1583
1588 parentlinkrevs = []
1584 parentlinkrevs = []
1589 for p in filelog.parentrevs(j):
1585 for p in filelog.parentrevs(j):
1590 if p != nullrev:
1586 if p != nullrev:
1591 parentlinkrevs.append(filelog.linkrev(p))
1587 parentlinkrevs.append(filelog.linkrev(p))
1592 n = filelog.node(j)
1588 n = filelog.node(j)
1593 revs.append((linkrev, parentlinkrevs,
1589 revs.append((linkrev, parentlinkrevs,
1594 follow and filelog.renamed(n)))
1590 follow and filelog.renamed(n)))
1595
1591
1596 return reversed(revs)
1592 return reversed(revs)
1597 def iterfiles():
1593 def iterfiles():
1598 pctx = repo['.']
1594 pctx = repo['.']
1599 for filename in match.files():
1595 for filename in match.files():
1600 if follow:
1596 if follow:
1601 if filename not in pctx:
1597 if filename not in pctx:
1602 raise util.Abort(_('cannot follow file not in parent '
1598 raise util.Abort(_('cannot follow file not in parent '
1603 'revision: "%s"') % filename)
1599 'revision: "%s"') % filename)
1604 yield filename, pctx[filename].filenode()
1600 yield filename, pctx[filename].filenode()
1605 else:
1601 else:
1606 yield filename, None
1602 yield filename, None
1607 for filename_node in copies:
1603 for filename_node in copies:
1608 yield filename_node
1604 yield filename_node
1609
1605
1610 for file_, node in iterfiles():
1606 for file_, node in iterfiles():
1611 filelog = repo.file(file_)
1607 filelog = repo.file(file_)
1612 if not len(filelog):
1608 if not len(filelog):
1613 if node is None:
1609 if node is None:
1614 # A zero count may be a directory or deleted file, so
1610 # A zero count may be a directory or deleted file, so
1615 # try to find matching entries on the slow path.
1611 # try to find matching entries on the slow path.
1616 if follow:
1612 if follow:
1617 raise util.Abort(
1613 raise util.Abort(
1618 _('cannot follow nonexistent file: "%s"') % file_)
1614 _('cannot follow nonexistent file: "%s"') % file_)
1619 raise FileWalkError("Cannot walk via filelog")
1615 raise FileWalkError("Cannot walk via filelog")
1620 else:
1616 else:
1621 continue
1617 continue
1622
1618
1623 if node is None:
1619 if node is None:
1624 last = len(filelog) - 1
1620 last = len(filelog) - 1
1625 else:
1621 else:
1626 last = filelog.rev(node)
1622 last = filelog.rev(node)
1627
1623
1628
1624
1629 # keep track of all ancestors of the file
1625 # keep track of all ancestors of the file
1630 ancestors = set([filelog.linkrev(last)])
1626 ancestors = set([filelog.linkrev(last)])
1631
1627
1632 # iterate from latest to oldest revision
1628 # iterate from latest to oldest revision
1633 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1629 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1634 if not follow:
1630 if not follow:
1635 if rev > maxrev:
1631 if rev > maxrev:
1636 continue
1632 continue
1637 else:
1633 else:
1638 # Note that last might not be the first interesting
1634 # Note that last might not be the first interesting
1639 # rev to us:
1635 # rev to us:
1640 # if the file has been changed after maxrev, we'll
1636 # if the file has been changed after maxrev, we'll
1641 # have linkrev(last) > maxrev, and we still need
1637 # have linkrev(last) > maxrev, and we still need
1642 # to explore the file graph
1638 # to explore the file graph
1643 if rev not in ancestors:
1639 if rev not in ancestors:
1644 continue
1640 continue
1645 # XXX insert 1327 fix here
1641 # XXX insert 1327 fix here
1646 if flparentlinkrevs:
1642 if flparentlinkrevs:
1647 ancestors.update(flparentlinkrevs)
1643 ancestors.update(flparentlinkrevs)
1648
1644
1649 fncache.setdefault(rev, []).append(file_)
1645 fncache.setdefault(rev, []).append(file_)
1650 wanted.add(rev)
1646 wanted.add(rev)
1651 if copied:
1647 if copied:
1652 copies.append(copied)
1648 copies.append(copied)
1653
1649
1654 return wanted
1650 return wanted
1655
1651
1656 def walkchangerevs(repo, match, opts, prepare):
1652 def walkchangerevs(repo, match, opts, prepare):
1657 '''Iterate over files and the revs in which they changed.
1653 '''Iterate over files and the revs in which they changed.
1658
1654
1659 Callers most commonly need to iterate backwards over the history
1655 Callers most commonly need to iterate backwards over the history
1660 in which they are interested. Doing so has awful (quadratic-looking)
1656 in which they are interested. Doing so has awful (quadratic-looking)
1661 performance, so we use iterators in a "windowed" way.
1657 performance, so we use iterators in a "windowed" way.
1662
1658
1663 We walk a window of revisions in the desired order. Within the
1659 We walk a window of revisions in the desired order. Within the
1664 window, we first walk forwards to gather data, then in the desired
1660 window, we first walk forwards to gather data, then in the desired
1665 order (usually backwards) to display it.
1661 order (usually backwards) to display it.
1666
1662
1667 This function returns an iterator yielding contexts. Before
1663 This function returns an iterator yielding contexts. Before
1668 yielding each context, the iterator will first call the prepare
1664 yielding each context, the iterator will first call the prepare
1669 function on each context in the window in forward order.'''
1665 function on each context in the window in forward order.'''
1670
1666
1671 follow = opts.get('follow') or opts.get('follow_first')
1667 follow = opts.get('follow') or opts.get('follow_first')
1672 revs = _logrevs(repo, opts)
1668 revs = _logrevs(repo, opts)
1673 if not revs:
1669 if not revs:
1674 return []
1670 return []
1675 wanted = set()
1671 wanted = set()
1676 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1672 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1677 fncache = {}
1673 fncache = {}
1678 change = repo.changectx
1674 change = repo.changectx
1679
1675
1680 # First step is to fill wanted, the set of revisions that we want to yield.
1676 # First step is to fill wanted, the set of revisions that we want to yield.
1681 # When it does not induce extra cost, we also fill fncache for revisions in
1677 # When it does not induce extra cost, we also fill fncache for revisions in
1682 # wanted: a cache of filenames that were changed (ctx.files()) and that
1678 # wanted: a cache of filenames that were changed (ctx.files()) and that
1683 # match the file filtering conditions.
1679 # match the file filtering conditions.
1684
1680
1685 if not slowpath and not match.files():
1681 if not slowpath and not match.files():
1686 # No files, no patterns. Display all revs.
1682 # No files, no patterns. Display all revs.
1687 wanted = revs
1683 wanted = revs
1688
1684
1689 if not slowpath and match.files():
1685 if not slowpath and match.files():
1690 # We only have to read through the filelog to find wanted revisions
1686 # We only have to read through the filelog to find wanted revisions
1691
1687
1692 try:
1688 try:
1693 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1689 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1694 except FileWalkError:
1690 except FileWalkError:
1695 slowpath = True
1691 slowpath = True
1696
1692
1697 # We decided to fall back to the slowpath because at least one
1693 # We decided to fall back to the slowpath because at least one
1698 # of the paths was not a file. Check to see if at least one of them
1694 # of the paths was not a file. Check to see if at least one of them
1699 # existed in history, otherwise simply return
1695 # existed in history, otherwise simply return
1700 for path in match.files():
1696 for path in match.files():
1701 if path == '.' or path in repo.store:
1697 if path == '.' or path in repo.store:
1702 break
1698 break
1703 else:
1699 else:
1704 return []
1700 return []
1705
1701
1706 if slowpath:
1702 if slowpath:
1707 # We have to read the changelog to match filenames against
1703 # We have to read the changelog to match filenames against
1708 # changed files
1704 # changed files
1709
1705
1710 if follow:
1706 if follow:
1711 raise util.Abort(_('can only follow copies/renames for explicit '
1707 raise util.Abort(_('can only follow copies/renames for explicit '
1712 'filenames'))
1708 'filenames'))
1713
1709
1714 # The slow path checks files modified in every changeset.
1710 # The slow path checks files modified in every changeset.
1715 # This is really slow on large repos, so compute the set lazily.
1711 # This is really slow on large repos, so compute the set lazily.
1716 class lazywantedset(object):
1712 class lazywantedset(object):
1717 def __init__(self):
1713 def __init__(self):
1718 self.set = set()
1714 self.set = set()
1719 self.revs = set(revs)
1715 self.revs = set(revs)
1720
1716
1721 # No need to worry about locality here because it will be accessed
1717 # No need to worry about locality here because it will be accessed
1722 # in the same order as the increasing window below.
1718 # in the same order as the increasing window below.
1723 def __contains__(self, value):
1719 def __contains__(self, value):
1724 if value in self.set:
1720 if value in self.set:
1725 return True
1721 return True
1726 elif not value in self.revs:
1722 elif not value in self.revs:
1727 return False
1723 return False
1728 else:
1724 else:
1729 self.revs.discard(value)
1725 self.revs.discard(value)
1730 ctx = change(value)
1726 ctx = change(value)
1731 matches = filter(match, ctx.files())
1727 matches = filter(match, ctx.files())
1732 if matches:
1728 if matches:
1733 fncache[value] = matches
1729 fncache[value] = matches
1734 self.set.add(value)
1730 self.set.add(value)
1735 return True
1731 return True
1736 return False
1732 return False
1737
1733
1738 def discard(self, value):
1734 def discard(self, value):
1739 self.revs.discard(value)
1735 self.revs.discard(value)
1740 self.set.discard(value)
1736 self.set.discard(value)
1741
1737
1742 wanted = lazywantedset()
1738 wanted = lazywantedset()
1743
1739
1744 class followfilter(object):
1740 class followfilter(object):
1745 def __init__(self, onlyfirst=False):
1741 def __init__(self, onlyfirst=False):
1746 self.startrev = nullrev
1742 self.startrev = nullrev
1747 self.roots = set()
1743 self.roots = set()
1748 self.onlyfirst = onlyfirst
1744 self.onlyfirst = onlyfirst
1749
1745
1750 def match(self, rev):
1746 def match(self, rev):
1751 def realparents(rev):
1747 def realparents(rev):
1752 if self.onlyfirst:
1748 if self.onlyfirst:
1753 return repo.changelog.parentrevs(rev)[0:1]
1749 return repo.changelog.parentrevs(rev)[0:1]
1754 else:
1750 else:
1755 return filter(lambda x: x != nullrev,
1751 return filter(lambda x: x != nullrev,
1756 repo.changelog.parentrevs(rev))
1752 repo.changelog.parentrevs(rev))
1757
1753
1758 if self.startrev == nullrev:
1754 if self.startrev == nullrev:
1759 self.startrev = rev
1755 self.startrev = rev
1760 return True
1756 return True
1761
1757
1762 if rev > self.startrev:
1758 if rev > self.startrev:
1763 # forward: all descendants
1759 # forward: all descendants
1764 if not self.roots:
1760 if not self.roots:
1765 self.roots.add(self.startrev)
1761 self.roots.add(self.startrev)
1766 for parent in realparents(rev):
1762 for parent in realparents(rev):
1767 if parent in self.roots:
1763 if parent in self.roots:
1768 self.roots.add(rev)
1764 self.roots.add(rev)
1769 return True
1765 return True
1770 else:
1766 else:
1771 # backwards: all parents
1767 # backwards: all parents
1772 if not self.roots:
1768 if not self.roots:
1773 self.roots.update(realparents(self.startrev))
1769 self.roots.update(realparents(self.startrev))
1774 if rev in self.roots:
1770 if rev in self.roots:
1775 self.roots.remove(rev)
1771 self.roots.remove(rev)
1776 self.roots.update(realparents(rev))
1772 self.roots.update(realparents(rev))
1777 return True
1773 return True
1778
1774
1779 return False
1775 return False
1780
1776
1781 # it might be worthwhile to do this in the iterator if the rev range
1777 # it might be worthwhile to do this in the iterator if the rev range
1782 # is descending and the prune args are all within that range
1778 # is descending and the prune args are all within that range
1783 for rev in opts.get('prune', ()):
1779 for rev in opts.get('prune', ()):
1784 rev = repo[rev].rev()
1780 rev = repo[rev].rev()
1785 ff = followfilter()
1781 ff = followfilter()
1786 stop = min(revs[0], revs[-1])
1782 stop = min(revs[0], revs[-1])
1787 for x in xrange(rev, stop - 1, -1):
1783 for x in xrange(rev, stop - 1, -1):
1788 if ff.match(x):
1784 if ff.match(x):
1789 wanted = wanted - [x]
1785 wanted = wanted - [x]
1790
1786
1791 # Now that wanted is correctly initialized, we can iterate over the
1787 # Now that wanted is correctly initialized, we can iterate over the
1792 # revision range, yielding only revisions in wanted.
1788 # revision range, yielding only revisions in wanted.
1793 def iterate():
1789 def iterate():
1794 if follow and not match.files():
1790 if follow and not match.files():
1795 ff = followfilter(onlyfirst=opts.get('follow_first'))
1791 ff = followfilter(onlyfirst=opts.get('follow_first'))
1796 def want(rev):
1792 def want(rev):
1797 return ff.match(rev) and rev in wanted
1793 return ff.match(rev) and rev in wanted
1798 else:
1794 else:
1799 def want(rev):
1795 def want(rev):
1800 return rev in wanted
1796 return rev in wanted
1801
1797
1802 it = iter(revs)
1798 it = iter(revs)
1803 stopiteration = False
1799 stopiteration = False
1804 for windowsize in increasingwindows():
1800 for windowsize in increasingwindows():
1805 nrevs = []
1801 nrevs = []
1806 for i in xrange(windowsize):
1802 for i in xrange(windowsize):
1807 try:
1803 try:
1808 rev = it.next()
1804 rev = it.next()
1809 if want(rev):
1805 if want(rev):
1810 nrevs.append(rev)
1806 nrevs.append(rev)
1811 except (StopIteration):
1807 except (StopIteration):
1812 stopiteration = True
1808 stopiteration = True
1813 break
1809 break
1814 for rev in sorted(nrevs):
1810 for rev in sorted(nrevs):
1815 fns = fncache.get(rev)
1811 fns = fncache.get(rev)
1816 ctx = change(rev)
1812 ctx = change(rev)
1817 if not fns:
1813 if not fns:
1818 def fns_generator():
1814 def fns_generator():
1819 for f in ctx.files():
1815 for f in ctx.files():
1820 if match(f):
1816 if match(f):
1821 yield f
1817 yield f
1822 fns = fns_generator()
1818 fns = fns_generator()
1823 prepare(ctx, fns)
1819 prepare(ctx, fns)
1824 for rev in nrevs:
1820 for rev in nrevs:
1825 yield change(rev)
1821 yield change(rev)
1826
1822
1827 if stopiteration:
1823 if stopiteration:
1828 break
1824 break
1829
1825
1830 return iterate()
1826 return iterate()
1831
1827
1832 def _makefollowlogfilematcher(repo, files, followfirst):
1828 def _makefollowlogfilematcher(repo, files, followfirst):
1833 # When displaying a revision with --patch --follow FILE, we have
1829 # When displaying a revision with --patch --follow FILE, we have
1834 # to know which file of the revision must be diffed. With
1830 # to know which file of the revision must be diffed. With
1835 # --follow, we want the names of the ancestors of FILE in the
1831 # --follow, we want the names of the ancestors of FILE in the
1836 # revision, stored in "fcache". "fcache" is populated by
1832 # revision, stored in "fcache". "fcache" is populated by
1837 # reproducing the graph traversal already done by --follow revset
1833 # reproducing the graph traversal already done by --follow revset
1838 # and relating linkrevs to file names (which is not "correct" but
1834 # and relating linkrevs to file names (which is not "correct" but
1839 # good enough).
1835 # good enough).
1840 fcache = {}
1836 fcache = {}
1841 fcacheready = [False]
1837 fcacheready = [False]
1842 pctx = repo['.']
1838 pctx = repo['.']
1843
1839
1844 def populate():
1840 def populate():
1845 for fn in files:
1841 for fn in files:
1846 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1842 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1847 for c in i:
1843 for c in i:
1848 fcache.setdefault(c.linkrev(), set()).add(c.path())
1844 fcache.setdefault(c.linkrev(), set()).add(c.path())
1849
1845
1850 def filematcher(rev):
1846 def filematcher(rev):
1851 if not fcacheready[0]:
1847 if not fcacheready[0]:
1852 # Lazy initialization
1848 # Lazy initialization
1853 fcacheready[0] = True
1849 fcacheready[0] = True
1854 populate()
1850 populate()
1855 return scmutil.matchfiles(repo, fcache.get(rev, []))
1851 return scmutil.matchfiles(repo, fcache.get(rev, []))
1856
1852
1857 return filematcher
1853 return filematcher
1858
1854
1859 def _makenofollowlogfilematcher(repo, pats, opts):
1855 def _makenofollowlogfilematcher(repo, pats, opts):
1860 '''hook for extensions to override the filematcher for non-follow cases'''
1856 '''hook for extensions to override the filematcher for non-follow cases'''
1861 return None
1857 return None
1862
1858
1863 def _makelogrevset(repo, pats, opts, revs):
1859 def _makelogrevset(repo, pats, opts, revs):
1864 """Return (expr, filematcher) where expr is a revset string built
1860 """Return (expr, filematcher) where expr is a revset string built
1865 from log options and file patterns or None. If --stat or --patch
1861 from log options and file patterns or None. If --stat or --patch
1866 are not passed filematcher is None. Otherwise it is a callable
1862 are not passed filematcher is None. Otherwise it is a callable
1867 taking a revision number and returning a match objects filtering
1863 taking a revision number and returning a match objects filtering
1868 the files to be detailed when displaying the revision.
1864 the files to be detailed when displaying the revision.
1869 """
1865 """
1870 opt2revset = {
1866 opt2revset = {
1871 'no_merges': ('not merge()', None),
1867 'no_merges': ('not merge()', None),
1872 'only_merges': ('merge()', None),
1868 'only_merges': ('merge()', None),
1873 '_ancestors': ('ancestors(%(val)s)', None),
1869 '_ancestors': ('ancestors(%(val)s)', None),
1874 '_fancestors': ('_firstancestors(%(val)s)', None),
1870 '_fancestors': ('_firstancestors(%(val)s)', None),
1875 '_descendants': ('descendants(%(val)s)', None),
1871 '_descendants': ('descendants(%(val)s)', None),
1876 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1872 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1877 '_matchfiles': ('_matchfiles(%(val)s)', None),
1873 '_matchfiles': ('_matchfiles(%(val)s)', None),
1878 'date': ('date(%(val)r)', None),
1874 'date': ('date(%(val)r)', None),
1879 'branch': ('branch(%(val)r)', ' or '),
1875 'branch': ('branch(%(val)r)', ' or '),
1880 '_patslog': ('filelog(%(val)r)', ' or '),
1876 '_patslog': ('filelog(%(val)r)', ' or '),
1881 '_patsfollow': ('follow(%(val)r)', ' or '),
1877 '_patsfollow': ('follow(%(val)r)', ' or '),
1882 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1878 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1883 'keyword': ('keyword(%(val)r)', ' or '),
1879 'keyword': ('keyword(%(val)r)', ' or '),
1884 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1880 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1885 'user': ('user(%(val)r)', ' or '),
1881 'user': ('user(%(val)r)', ' or '),
1886 }
1882 }
1887
1883
1888 opts = dict(opts)
1884 opts = dict(opts)
1889 # follow or not follow?
1885 # follow or not follow?
1890 follow = opts.get('follow') or opts.get('follow_first')
1886 follow = opts.get('follow') or opts.get('follow_first')
1891 if opts.get('follow_first'):
1887 if opts.get('follow_first'):
1892 followfirst = 1
1888 followfirst = 1
1893 else:
1889 else:
1894 followfirst = 0
1890 followfirst = 0
1895 # --follow with FILE behaviour depends on revs...
1891 # --follow with FILE behaviour depends on revs...
1896 it = iter(revs)
1892 it = iter(revs)
1897 startrev = it.next()
1893 startrev = it.next()
1898 try:
1894 try:
1899 followdescendants = startrev < it.next()
1895 followdescendants = startrev < it.next()
1900 except (StopIteration):
1896 except (StopIteration):
1901 followdescendants = False
1897 followdescendants = False
1902
1898
1903 # branch and only_branch are really aliases and must be handled at
1899 # branch and only_branch are really aliases and must be handled at
1904 # the same time
1900 # the same time
1905 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1901 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1906 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1902 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1907 # pats/include/exclude are passed to match.match() directly in
1903 # pats/include/exclude are passed to match.match() directly in
1908 # _matchfiles() revset but walkchangerevs() builds its matcher with
1904 # _matchfiles() revset but walkchangerevs() builds its matcher with
1909 # scmutil.match(). The difference is input pats are globbed on
1905 # scmutil.match(). The difference is input pats are globbed on
1910 # platforms without shell expansion (windows).
1906 # platforms without shell expansion (windows).
1911 pctx = repo[None]
1907 pctx = repo[None]
1912 match, pats = scmutil.matchandpats(pctx, pats, opts)
1908 match, pats = scmutil.matchandpats(pctx, pats, opts)
1913 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1909 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1914 if not slowpath:
1910 if not slowpath:
1915 for f in match.files():
1911 for f in match.files():
1916 if follow and f not in pctx:
1912 if follow and f not in pctx:
1917 # If the file exists, it may be a directory, so let it
1913 # If the file exists, it may be a directory, so let it
1918 # take the slow path.
1914 # take the slow path.
1919 if os.path.exists(repo.wjoin(f)):
1915 if os.path.exists(repo.wjoin(f)):
1920 slowpath = True
1916 slowpath = True
1921 continue
1917 continue
1922 else:
1918 else:
1923 raise util.Abort(_('cannot follow file not in parent '
1919 raise util.Abort(_('cannot follow file not in parent '
1924 'revision: "%s"') % f)
1920 'revision: "%s"') % f)
1925 filelog = repo.file(f)
1921 filelog = repo.file(f)
1926 if not filelog:
1922 if not filelog:
1927 # A zero count may be a directory or deleted file, so
1923 # A zero count may be a directory or deleted file, so
1928 # try to find matching entries on the slow path.
1924 # try to find matching entries on the slow path.
1929 if follow:
1925 if follow:
1930 raise util.Abort(
1926 raise util.Abort(
1931 _('cannot follow nonexistent file: "%s"') % f)
1927 _('cannot follow nonexistent file: "%s"') % f)
1932 slowpath = True
1928 slowpath = True
1933
1929
1934 # We decided to fall back to the slowpath because at least one
1930 # We decided to fall back to the slowpath because at least one
1935 # of the paths was not a file. Check to see if at least one of them
1931 # of the paths was not a file. Check to see if at least one of them
1936 # existed in history - in that case, we'll continue down the
1932 # existed in history - in that case, we'll continue down the
1937 # slowpath; otherwise, we can turn off the slowpath
1933 # slowpath; otherwise, we can turn off the slowpath
1938 if slowpath:
1934 if slowpath:
1939 for path in match.files():
1935 for path in match.files():
1940 if path == '.' or path in repo.store:
1936 if path == '.' or path in repo.store:
1941 break
1937 break
1942 else:
1938 else:
1943 slowpath = False
1939 slowpath = False
1944
1940
1945 fpats = ('_patsfollow', '_patsfollowfirst')
1941 fpats = ('_patsfollow', '_patsfollowfirst')
1946 fnopats = (('_ancestors', '_fancestors'),
1942 fnopats = (('_ancestors', '_fancestors'),
1947 ('_descendants', '_fdescendants'))
1943 ('_descendants', '_fdescendants'))
1948 if slowpath:
1944 if slowpath:
1949 # See walkchangerevs() slow path.
1945 # See walkchangerevs() slow path.
1950 #
1946 #
1951 # pats/include/exclude cannot be represented as separate
1947 # pats/include/exclude cannot be represented as separate
1952 # revset expressions as their filtering logic applies at file
1948 # revset expressions as their filtering logic applies at file
1953 # level. For instance "-I a -X a" matches a revision touching
1949 # level. For instance "-I a -X a" matches a revision touching
1954 # "a" and "b" while "file(a) and not file(b)" does
1950 # "a" and "b" while "file(a) and not file(b)" does
1955 # not. Besides, filesets are evaluated against the working
1951 # not. Besides, filesets are evaluated against the working
1956 # directory.
1952 # directory.
1957 matchargs = ['r:', 'd:relpath']
1953 matchargs = ['r:', 'd:relpath']
1958 for p in pats:
1954 for p in pats:
1959 matchargs.append('p:' + p)
1955 matchargs.append('p:' + p)
1960 for p in opts.get('include', []):
1956 for p in opts.get('include', []):
1961 matchargs.append('i:' + p)
1957 matchargs.append('i:' + p)
1962 for p in opts.get('exclude', []):
1958 for p in opts.get('exclude', []):
1963 matchargs.append('x:' + p)
1959 matchargs.append('x:' + p)
1964 matchargs = ','.join(('%r' % p) for p in matchargs)
1960 matchargs = ','.join(('%r' % p) for p in matchargs)
1965 opts['_matchfiles'] = matchargs
1961 opts['_matchfiles'] = matchargs
1966 if follow:
1962 if follow:
1967 opts[fnopats[0][followfirst]] = '.'
1963 opts[fnopats[0][followfirst]] = '.'
1968 else:
1964 else:
1969 if follow:
1965 if follow:
1970 if pats:
1966 if pats:
1971 # follow() revset interprets its file argument as a
1967 # follow() revset interprets its file argument as a
1972 # manifest entry, so use match.files(), not pats.
1968 # manifest entry, so use match.files(), not pats.
1973 opts[fpats[followfirst]] = list(match.files())
1969 opts[fpats[followfirst]] = list(match.files())
1974 else:
1970 else:
1975 op = fnopats[followdescendants][followfirst]
1971 op = fnopats[followdescendants][followfirst]
1976 opts[op] = 'rev(%d)' % startrev
1972 opts[op] = 'rev(%d)' % startrev
1977 else:
1973 else:
1978 opts['_patslog'] = list(pats)
1974 opts['_patslog'] = list(pats)
1979
1975
1980 filematcher = None
1976 filematcher = None
1981 if opts.get('patch') or opts.get('stat'):
1977 if opts.get('patch') or opts.get('stat'):
1982 # When following files, track renames via a special matcher.
1978 # When following files, track renames via a special matcher.
1983 # If we're forced to take the slowpath it means we're following
1979 # If we're forced to take the slowpath it means we're following
1984 # at least one pattern/directory, so don't bother with rename tracking.
1980 # at least one pattern/directory, so don't bother with rename tracking.
1985 if follow and not match.always() and not slowpath:
1981 if follow and not match.always() and not slowpath:
1986 # _makefollowlogfilematcher expects its files argument to be
1982 # _makefollowlogfilematcher expects its files argument to be
1987 # relative to the repo root, so use match.files(), not pats.
1983 # relative to the repo root, so use match.files(), not pats.
1988 filematcher = _makefollowlogfilematcher(repo, match.files(),
1984 filematcher = _makefollowlogfilematcher(repo, match.files(),
1989 followfirst)
1985 followfirst)
1990 else:
1986 else:
1991 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
1987 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
1992 if filematcher is None:
1988 if filematcher is None:
1993 filematcher = lambda rev: match
1989 filematcher = lambda rev: match
1994
1990
1995 expr = []
1991 expr = []
1996 for op, val in sorted(opts.iteritems()):
1992 for op, val in sorted(opts.iteritems()):
1997 if not val:
1993 if not val:
1998 continue
1994 continue
1999 if op not in opt2revset:
1995 if op not in opt2revset:
2000 continue
1996 continue
2001 revop, andor = opt2revset[op]
1997 revop, andor = opt2revset[op]
2002 if '%(val)' not in revop:
1998 if '%(val)' not in revop:
2003 expr.append(revop)
1999 expr.append(revop)
2004 else:
2000 else:
2005 if not isinstance(val, list):
2001 if not isinstance(val, list):
2006 e = revop % {'val': val}
2002 e = revop % {'val': val}
2007 else:
2003 else:
2008 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2004 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2009 expr.append(e)
2005 expr.append(e)
2010
2006
2011 if expr:
2007 if expr:
2012 expr = '(' + ' and '.join(expr) + ')'
2008 expr = '(' + ' and '.join(expr) + ')'
2013 else:
2009 else:
2014 expr = None
2010 expr = None
2015 return expr, filematcher
2011 return expr, filematcher
2016
2012
2017 def _logrevs(repo, opts):
2013 def _logrevs(repo, opts):
2018 # Default --rev value depends on --follow but --follow behaviour
2014 # Default --rev value depends on --follow but --follow behaviour
2019 # depends on revisions resolved from --rev...
2015 # depends on revisions resolved from --rev...
2020 follow = opts.get('follow') or opts.get('follow_first')
2016 follow = opts.get('follow') or opts.get('follow_first')
2021 if opts.get('rev'):
2017 if opts.get('rev'):
2022 revs = scmutil.revrange(repo, opts['rev'])
2018 revs = scmutil.revrange(repo, opts['rev'])
2023 elif follow and repo.dirstate.p1() == nullid:
2019 elif follow and repo.dirstate.p1() == nullid:
2024 revs = revset.baseset()
2020 revs = revset.baseset()
2025 elif follow:
2021 elif follow:
2026 revs = repo.revs('reverse(:.)')
2022 revs = repo.revs('reverse(:.)')
2027 else:
2023 else:
2028 revs = revset.spanset(repo)
2024 revs = revset.spanset(repo)
2029 revs.reverse()
2025 revs.reverse()
2030 return revs
2026 return revs
2031
2027
2032 def getgraphlogrevs(repo, pats, opts):
2028 def getgraphlogrevs(repo, pats, opts):
2033 """Return (revs, expr, filematcher) where revs is an iterable of
2029 """Return (revs, expr, filematcher) where revs is an iterable of
2034 revision numbers, expr is a revset string built from log options
2030 revision numbers, expr is a revset string built from log options
2035 and file patterns or None, and used to filter 'revs'. If --stat or
2031 and file patterns or None, and used to filter 'revs'. If --stat or
2036 --patch are not passed filematcher is None. Otherwise it is a
2032 --patch are not passed filematcher is None. Otherwise it is a
2037 callable taking a revision number and returning a match objects
2033 callable taking a revision number and returning a match objects
2038 filtering the files to be detailed when displaying the revision.
2034 filtering the files to be detailed when displaying the revision.
2039 """
2035 """
2040 limit = loglimit(opts)
2036 limit = loglimit(opts)
2041 revs = _logrevs(repo, opts)
2037 revs = _logrevs(repo, opts)
2042 if not revs:
2038 if not revs:
2043 return revset.baseset(), None, None
2039 return revset.baseset(), None, None
2044 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2040 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2045 if opts.get('rev'):
2041 if opts.get('rev'):
2046 # User-specified revs might be unsorted, but don't sort before
2042 # User-specified revs might be unsorted, but don't sort before
2047 # _makelogrevset because it might depend on the order of revs
2043 # _makelogrevset because it might depend on the order of revs
2048 revs.sort(reverse=True)
2044 revs.sort(reverse=True)
2049 if expr:
2045 if expr:
2050 # Revset matchers often operate faster on revisions in changelog
2046 # Revset matchers often operate faster on revisions in changelog
2051 # order, because most filters deal with the changelog.
2047 # order, because most filters deal with the changelog.
2052 revs.reverse()
2048 revs.reverse()
2053 matcher = revset.match(repo.ui, expr)
2049 matcher = revset.match(repo.ui, expr)
2054 # Revset matches can reorder revisions. "A or B" typically returns
2050 # Revset matches can reorder revisions. "A or B" typically returns
2055 # returns the revision matching A then the revision matching B. Sort
2051 # returns the revision matching A then the revision matching B. Sort
2056 # again to fix that.
2052 # again to fix that.
2057 revs = matcher(repo, revs)
2053 revs = matcher(repo, revs)
2058 revs.sort(reverse=True)
2054 revs.sort(reverse=True)
2059 if limit is not None:
2055 if limit is not None:
2060 limitedrevs = []
2056 limitedrevs = []
2061 for idx, rev in enumerate(revs):
2057 for idx, rev in enumerate(revs):
2062 if idx >= limit:
2058 if idx >= limit:
2063 break
2059 break
2064 limitedrevs.append(rev)
2060 limitedrevs.append(rev)
2065 revs = revset.baseset(limitedrevs)
2061 revs = revset.baseset(limitedrevs)
2066
2062
2067 return revs, expr, filematcher
2063 return revs, expr, filematcher
2068
2064
2069 def getlogrevs(repo, pats, opts):
2065 def getlogrevs(repo, pats, opts):
2070 """Return (revs, expr, filematcher) where revs is an iterable of
2066 """Return (revs, expr, filematcher) where revs is an iterable of
2071 revision numbers, expr is a revset string built from log options
2067 revision numbers, expr is a revset string built from log options
2072 and file patterns or None, and used to filter 'revs'. If --stat or
2068 and file patterns or None, and used to filter 'revs'. If --stat or
2073 --patch are not passed filematcher is None. Otherwise it is a
2069 --patch are not passed filematcher is None. Otherwise it is a
2074 callable taking a revision number and returning a match objects
2070 callable taking a revision number and returning a match objects
2075 filtering the files to be detailed when displaying the revision.
2071 filtering the files to be detailed when displaying the revision.
2076 """
2072 """
2077 limit = loglimit(opts)
2073 limit = loglimit(opts)
2078 revs = _logrevs(repo, opts)
2074 revs = _logrevs(repo, opts)
2079 if not revs:
2075 if not revs:
2080 return revset.baseset([]), None, None
2076 return revset.baseset([]), None, None
2081 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2077 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2082 if expr:
2078 if expr:
2083 # Revset matchers often operate faster on revisions in changelog
2079 # Revset matchers often operate faster on revisions in changelog
2084 # order, because most filters deal with the changelog.
2080 # order, because most filters deal with the changelog.
2085 if not opts.get('rev'):
2081 if not opts.get('rev'):
2086 revs.reverse()
2082 revs.reverse()
2087 matcher = revset.match(repo.ui, expr)
2083 matcher = revset.match(repo.ui, expr)
2088 # Revset matches can reorder revisions. "A or B" typically returns
2084 # Revset matches can reorder revisions. "A or B" typically returns
2089 # returns the revision matching A then the revision matching B. Sort
2085 # returns the revision matching A then the revision matching B. Sort
2090 # again to fix that.
2086 # again to fix that.
2091 revs = matcher(repo, revs)
2087 revs = matcher(repo, revs)
2092 if not opts.get('rev'):
2088 if not opts.get('rev'):
2093 revs.sort(reverse=True)
2089 revs.sort(reverse=True)
2094 if limit is not None:
2090 if limit is not None:
2095 count = 0
2091 count = 0
2096 limitedrevs = []
2092 limitedrevs = []
2097 it = iter(revs)
2093 it = iter(revs)
2098 while count < limit:
2094 while count < limit:
2099 try:
2095 try:
2100 limitedrevs.append(it.next())
2096 limitedrevs.append(it.next())
2101 except (StopIteration):
2097 except (StopIteration):
2102 break
2098 break
2103 count += 1
2099 count += 1
2104 revs = revset.baseset(limitedrevs)
2100 revs = revset.baseset(limitedrevs)
2105
2101
2106 return revs, expr, filematcher
2102 return revs, expr, filematcher
2107
2103
2108 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2104 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2109 filematcher=None):
2105 filematcher=None):
2110 seen, state = [], graphmod.asciistate()
2106 seen, state = [], graphmod.asciistate()
2111 for rev, type, ctx, parents in dag:
2107 for rev, type, ctx, parents in dag:
2112 char = 'o'
2108 char = 'o'
2113 if ctx.node() in showparents:
2109 if ctx.node() in showparents:
2114 char = '@'
2110 char = '@'
2115 elif ctx.obsolete():
2111 elif ctx.obsolete():
2116 char = 'x'
2112 char = 'x'
2117 elif ctx.closesbranch():
2113 elif ctx.closesbranch():
2118 char = '_'
2114 char = '_'
2119 copies = None
2115 copies = None
2120 if getrenamed and ctx.rev():
2116 if getrenamed and ctx.rev():
2121 copies = []
2117 copies = []
2122 for fn in ctx.files():
2118 for fn in ctx.files():
2123 rename = getrenamed(fn, ctx.rev())
2119 rename = getrenamed(fn, ctx.rev())
2124 if rename:
2120 if rename:
2125 copies.append((fn, rename[0]))
2121 copies.append((fn, rename[0]))
2126 revmatchfn = None
2122 revmatchfn = None
2127 if filematcher is not None:
2123 if filematcher is not None:
2128 revmatchfn = filematcher(ctx.rev())
2124 revmatchfn = filematcher(ctx.rev())
2129 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2125 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2130 lines = displayer.hunk.pop(rev).split('\n')
2126 lines = displayer.hunk.pop(rev).split('\n')
2131 if not lines[-1]:
2127 if not lines[-1]:
2132 del lines[-1]
2128 del lines[-1]
2133 displayer.flush(rev)
2129 displayer.flush(rev)
2134 edges = edgefn(type, char, lines, seen, rev, parents)
2130 edges = edgefn(type, char, lines, seen, rev, parents)
2135 for type, char, lines, coldata in edges:
2131 for type, char, lines, coldata in edges:
2136 graphmod.ascii(ui, state, type, char, lines, coldata)
2132 graphmod.ascii(ui, state, type, char, lines, coldata)
2137 displayer.close()
2133 displayer.close()
2138
2134
2139 def graphlog(ui, repo, *pats, **opts):
2135 def graphlog(ui, repo, *pats, **opts):
2140 # Parameters are identical to log command ones
2136 # Parameters are identical to log command ones
2141 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2137 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2142 revdag = graphmod.dagwalker(repo, revs)
2138 revdag = graphmod.dagwalker(repo, revs)
2143
2139
2144 getrenamed = None
2140 getrenamed = None
2145 if opts.get('copies'):
2141 if opts.get('copies'):
2146 endrev = None
2142 endrev = None
2147 if opts.get('rev'):
2143 if opts.get('rev'):
2148 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2144 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2149 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2145 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2150 displayer = show_changeset(ui, repo, opts, buffered=True)
2146 displayer = show_changeset(ui, repo, opts, buffered=True)
2151 showparents = [ctx.node() for ctx in repo[None].parents()]
2147 showparents = [ctx.node() for ctx in repo[None].parents()]
2152 displaygraph(ui, revdag, displayer, showparents,
2148 displaygraph(ui, revdag, displayer, showparents,
2153 graphmod.asciiedges, getrenamed, filematcher)
2149 graphmod.asciiedges, getrenamed, filematcher)
2154
2150
2155 def checkunsupportedgraphflags(pats, opts):
2151 def checkunsupportedgraphflags(pats, opts):
2156 for op in ["newest_first"]:
2152 for op in ["newest_first"]:
2157 if op in opts and opts[op]:
2153 if op in opts and opts[op]:
2158 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2154 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2159 % op.replace("_", "-"))
2155 % op.replace("_", "-"))
2160
2156
2161 def graphrevs(repo, nodes, opts):
2157 def graphrevs(repo, nodes, opts):
2162 limit = loglimit(opts)
2158 limit = loglimit(opts)
2163 nodes.reverse()
2159 nodes.reverse()
2164 if limit is not None:
2160 if limit is not None:
2165 nodes = nodes[:limit]
2161 nodes = nodes[:limit]
2166 return graphmod.nodes(repo, nodes)
2162 return graphmod.nodes(repo, nodes)
2167
2163
2168 def add(ui, repo, match, prefix, explicitonly, **opts):
2164 def add(ui, repo, match, prefix, explicitonly, **opts):
2169 join = lambda f: os.path.join(prefix, f)
2165 join = lambda f: os.path.join(prefix, f)
2170 bad = []
2166 bad = []
2171 oldbad = match.bad
2167 oldbad = match.bad
2172 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2168 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2173 names = []
2169 names = []
2174 wctx = repo[None]
2170 wctx = repo[None]
2175 cca = None
2171 cca = None
2176 abort, warn = scmutil.checkportabilityalert(ui)
2172 abort, warn = scmutil.checkportabilityalert(ui)
2177 if abort or warn:
2173 if abort or warn:
2178 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2174 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2179 for f in wctx.walk(match):
2175 for f in wctx.walk(match):
2180 exact = match.exact(f)
2176 exact = match.exact(f)
2181 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2177 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2182 if cca:
2178 if cca:
2183 cca(f)
2179 cca(f)
2184 names.append(f)
2180 names.append(f)
2185 if ui.verbose or not exact:
2181 if ui.verbose or not exact:
2186 ui.status(_('adding %s\n') % match.rel(f))
2182 ui.status(_('adding %s\n') % match.rel(f))
2187
2183
2188 for subpath in sorted(wctx.substate):
2184 for subpath in sorted(wctx.substate):
2189 sub = wctx.sub(subpath)
2185 sub = wctx.sub(subpath)
2190 try:
2186 try:
2191 submatch = matchmod.narrowmatcher(subpath, match)
2187 submatch = matchmod.narrowmatcher(subpath, match)
2192 if opts.get('subrepos'):
2188 if opts.get('subrepos'):
2193 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2189 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2194 else:
2190 else:
2195 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2191 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2196 except error.LookupError:
2192 except error.LookupError:
2197 ui.status(_("skipping missing subrepository: %s\n")
2193 ui.status(_("skipping missing subrepository: %s\n")
2198 % join(subpath))
2194 % join(subpath))
2199
2195
2200 if not opts.get('dry_run'):
2196 if not opts.get('dry_run'):
2201 rejected = wctx.add(names, prefix)
2197 rejected = wctx.add(names, prefix)
2202 bad.extend(f for f in rejected if f in match.files())
2198 bad.extend(f for f in rejected if f in match.files())
2203 return bad
2199 return bad
2204
2200
2205 def forget(ui, repo, match, prefix, explicitonly):
2201 def forget(ui, repo, match, prefix, explicitonly):
2206 join = lambda f: os.path.join(prefix, f)
2202 join = lambda f: os.path.join(prefix, f)
2207 bad = []
2203 bad = []
2208 oldbad = match.bad
2204 oldbad = match.bad
2209 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2205 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2210 wctx = repo[None]
2206 wctx = repo[None]
2211 forgot = []
2207 forgot = []
2212 s = repo.status(match=match, clean=True)
2208 s = repo.status(match=match, clean=True)
2213 forget = sorted(s[0] + s[1] + s[3] + s[6])
2209 forget = sorted(s[0] + s[1] + s[3] + s[6])
2214 if explicitonly:
2210 if explicitonly:
2215 forget = [f for f in forget if match.exact(f)]
2211 forget = [f for f in forget if match.exact(f)]
2216
2212
2217 for subpath in sorted(wctx.substate):
2213 for subpath in sorted(wctx.substate):
2218 sub = wctx.sub(subpath)
2214 sub = wctx.sub(subpath)
2219 try:
2215 try:
2220 submatch = matchmod.narrowmatcher(subpath, match)
2216 submatch = matchmod.narrowmatcher(subpath, match)
2221 subbad, subforgot = sub.forget(submatch, prefix)
2217 subbad, subforgot = sub.forget(submatch, prefix)
2222 bad.extend([subpath + '/' + f for f in subbad])
2218 bad.extend([subpath + '/' + f for f in subbad])
2223 forgot.extend([subpath + '/' + f for f in subforgot])
2219 forgot.extend([subpath + '/' + f for f in subforgot])
2224 except error.LookupError:
2220 except error.LookupError:
2225 ui.status(_("skipping missing subrepository: %s\n")
2221 ui.status(_("skipping missing subrepository: %s\n")
2226 % join(subpath))
2222 % join(subpath))
2227
2223
2228 if not explicitonly:
2224 if not explicitonly:
2229 for f in match.files():
2225 for f in match.files():
2230 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2226 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2231 if f not in forgot:
2227 if f not in forgot:
2232 if repo.wvfs.exists(f):
2228 if repo.wvfs.exists(f):
2233 ui.warn(_('not removing %s: '
2229 ui.warn(_('not removing %s: '
2234 'file is already untracked\n')
2230 'file is already untracked\n')
2235 % match.rel(f))
2231 % match.rel(f))
2236 bad.append(f)
2232 bad.append(f)
2237
2233
2238 for f in forget:
2234 for f in forget:
2239 if ui.verbose or not match.exact(f):
2235 if ui.verbose or not match.exact(f):
2240 ui.status(_('removing %s\n') % match.rel(f))
2236 ui.status(_('removing %s\n') % match.rel(f))
2241
2237
2242 rejected = wctx.forget(forget, prefix)
2238 rejected = wctx.forget(forget, prefix)
2243 bad.extend(f for f in rejected if f in match.files())
2239 bad.extend(f for f in rejected if f in match.files())
2244 forgot.extend(f for f in forget if f not in rejected)
2240 forgot.extend(f for f in forget if f not in rejected)
2245 return bad, forgot
2241 return bad, forgot
2246
2242
2247 def files(ui, ctx, m, fm, fmt):
2243 def files(ui, ctx, m, fm, fmt):
2248 rev = ctx.rev()
2244 rev = ctx.rev()
2249 ret = 1
2245 ret = 1
2250 ds = ctx.repo().dirstate
2246 ds = ctx.repo().dirstate
2251
2247
2252 for f in ctx.matches(m):
2248 for f in ctx.matches(m):
2253 if rev is None and ds[f] == 'r':
2249 if rev is None and ds[f] == 'r':
2254 continue
2250 continue
2255 fm.startitem()
2251 fm.startitem()
2256 if ui.verbose:
2252 if ui.verbose:
2257 fc = ctx[f]
2253 fc = ctx[f]
2258 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2254 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2259 fm.data(abspath=f)
2255 fm.data(abspath=f)
2260 fm.write('path', fmt, m.rel(f))
2256 fm.write('path', fmt, m.rel(f))
2261 ret = 0
2257 ret = 0
2262
2258
2263 return ret
2259 return ret
2264
2260
2265 def remove(ui, repo, m, prefix, after, force, subrepos):
2261 def remove(ui, repo, m, prefix, after, force, subrepos):
2266 join = lambda f: os.path.join(prefix, f)
2262 join = lambda f: os.path.join(prefix, f)
2267 ret = 0
2263 ret = 0
2268 s = repo.status(match=m, clean=True)
2264 s = repo.status(match=m, clean=True)
2269 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2265 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2270
2266
2271 wctx = repo[None]
2267 wctx = repo[None]
2272
2268
2273 for subpath in sorted(wctx.substate):
2269 for subpath in sorted(wctx.substate):
2274 def matchessubrepo(matcher, subpath):
2270 def matchessubrepo(matcher, subpath):
2275 if matcher.exact(subpath):
2271 if matcher.exact(subpath):
2276 return True
2272 return True
2277 for f in matcher.files():
2273 for f in matcher.files():
2278 if f.startswith(subpath):
2274 if f.startswith(subpath):
2279 return True
2275 return True
2280 return False
2276 return False
2281
2277
2282 if subrepos or matchessubrepo(m, subpath):
2278 if subrepos or matchessubrepo(m, subpath):
2283 sub = wctx.sub(subpath)
2279 sub = wctx.sub(subpath)
2284 try:
2280 try:
2285 submatch = matchmod.narrowmatcher(subpath, m)
2281 submatch = matchmod.narrowmatcher(subpath, m)
2286 if sub.removefiles(submatch, prefix, after, force, subrepos):
2282 if sub.removefiles(submatch, prefix, after, force, subrepos):
2287 ret = 1
2283 ret = 1
2288 except error.LookupError:
2284 except error.LookupError:
2289 ui.status(_("skipping missing subrepository: %s\n")
2285 ui.status(_("skipping missing subrepository: %s\n")
2290 % join(subpath))
2286 % join(subpath))
2291
2287
2292 # warn about failure to delete explicit files/dirs
2288 # warn about failure to delete explicit files/dirs
2293 deleteddirs = scmutil.dirs(deleted)
2289 deleteddirs = scmutil.dirs(deleted)
2294 for f in m.files():
2290 for f in m.files():
2295 def insubrepo():
2291 def insubrepo():
2296 for subpath in wctx.substate:
2292 for subpath in wctx.substate:
2297 if f.startswith(subpath):
2293 if f.startswith(subpath):
2298 return True
2294 return True
2299 return False
2295 return False
2300
2296
2301 isdir = f in deleteddirs or f in wctx.dirs()
2297 isdir = f in deleteddirs or f in wctx.dirs()
2302 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2298 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2303 continue
2299 continue
2304
2300
2305 if repo.wvfs.exists(f):
2301 if repo.wvfs.exists(f):
2306 if repo.wvfs.isdir(f):
2302 if repo.wvfs.isdir(f):
2307 ui.warn(_('not removing %s: no tracked files\n')
2303 ui.warn(_('not removing %s: no tracked files\n')
2308 % m.rel(f))
2304 % m.rel(f))
2309 else:
2305 else:
2310 ui.warn(_('not removing %s: file is untracked\n')
2306 ui.warn(_('not removing %s: file is untracked\n')
2311 % m.rel(f))
2307 % m.rel(f))
2312 # missing files will generate a warning elsewhere
2308 # missing files will generate a warning elsewhere
2313 ret = 1
2309 ret = 1
2314
2310
2315 if force:
2311 if force:
2316 list = modified + deleted + clean + added
2312 list = modified + deleted + clean + added
2317 elif after:
2313 elif after:
2318 list = deleted
2314 list = deleted
2319 for f in modified + added + clean:
2315 for f in modified + added + clean:
2320 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2316 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2321 ret = 1
2317 ret = 1
2322 else:
2318 else:
2323 list = deleted + clean
2319 list = deleted + clean
2324 for f in modified:
2320 for f in modified:
2325 ui.warn(_('not removing %s: file is modified (use -f'
2321 ui.warn(_('not removing %s: file is modified (use -f'
2326 ' to force removal)\n') % m.rel(f))
2322 ' to force removal)\n') % m.rel(f))
2327 ret = 1
2323 ret = 1
2328 for f in added:
2324 for f in added:
2329 ui.warn(_('not removing %s: file has been marked for add'
2325 ui.warn(_('not removing %s: file has been marked for add'
2330 ' (use forget to undo)\n') % m.rel(f))
2326 ' (use forget to undo)\n') % m.rel(f))
2331 ret = 1
2327 ret = 1
2332
2328
2333 for f in sorted(list):
2329 for f in sorted(list):
2334 if ui.verbose or not m.exact(f):
2330 if ui.verbose or not m.exact(f):
2335 ui.status(_('removing %s\n') % m.rel(f))
2331 ui.status(_('removing %s\n') % m.rel(f))
2336
2332
2337 wlock = repo.wlock()
2333 wlock = repo.wlock()
2338 try:
2334 try:
2339 if not after:
2335 if not after:
2340 for f in list:
2336 for f in list:
2341 if f in added:
2337 if f in added:
2342 continue # we never unlink added files on remove
2338 continue # we never unlink added files on remove
2343 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2339 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2344 repo[None].forget(list)
2340 repo[None].forget(list)
2345 finally:
2341 finally:
2346 wlock.release()
2342 wlock.release()
2347
2343
2348 return ret
2344 return ret
2349
2345
2350 def cat(ui, repo, ctx, matcher, prefix, **opts):
2346 def cat(ui, repo, ctx, matcher, prefix, **opts):
2351 err = 1
2347 err = 1
2352
2348
2353 def write(path):
2349 def write(path):
2354 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2350 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2355 pathname=os.path.join(prefix, path))
2351 pathname=os.path.join(prefix, path))
2356 data = ctx[path].data()
2352 data = ctx[path].data()
2357 if opts.get('decode'):
2353 if opts.get('decode'):
2358 data = repo.wwritedata(path, data)
2354 data = repo.wwritedata(path, data)
2359 fp.write(data)
2355 fp.write(data)
2360 fp.close()
2356 fp.close()
2361
2357
2362 # Automation often uses hg cat on single files, so special case it
2358 # Automation often uses hg cat on single files, so special case it
2363 # for performance to avoid the cost of parsing the manifest.
2359 # for performance to avoid the cost of parsing the manifest.
2364 if len(matcher.files()) == 1 and not matcher.anypats():
2360 if len(matcher.files()) == 1 and not matcher.anypats():
2365 file = matcher.files()[0]
2361 file = matcher.files()[0]
2366 mf = repo.manifest
2362 mf = repo.manifest
2367 mfnode = ctx._changeset[0]
2363 mfnode = ctx._changeset[0]
2368 if mf.find(mfnode, file)[0]:
2364 if mf.find(mfnode, file)[0]:
2369 write(file)
2365 write(file)
2370 return 0
2366 return 0
2371
2367
2372 # Don't warn about "missing" files that are really in subrepos
2368 # Don't warn about "missing" files that are really in subrepos
2373 bad = matcher.bad
2369 bad = matcher.bad
2374
2370
2375 def badfn(path, msg):
2371 def badfn(path, msg):
2376 for subpath in ctx.substate:
2372 for subpath in ctx.substate:
2377 if path.startswith(subpath):
2373 if path.startswith(subpath):
2378 return
2374 return
2379 bad(path, msg)
2375 bad(path, msg)
2380
2376
2381 matcher.bad = badfn
2377 matcher.bad = badfn
2382
2378
2383 for abs in ctx.walk(matcher):
2379 for abs in ctx.walk(matcher):
2384 write(abs)
2380 write(abs)
2385 err = 0
2381 err = 0
2386
2382
2387 matcher.bad = bad
2383 matcher.bad = bad
2388
2384
2389 for subpath in sorted(ctx.substate):
2385 for subpath in sorted(ctx.substate):
2390 sub = ctx.sub(subpath)
2386 sub = ctx.sub(subpath)
2391 try:
2387 try:
2392 submatch = matchmod.narrowmatcher(subpath, matcher)
2388 submatch = matchmod.narrowmatcher(subpath, matcher)
2393
2389
2394 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2390 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2395 **opts):
2391 **opts):
2396 err = 0
2392 err = 0
2397 except error.RepoLookupError:
2393 except error.RepoLookupError:
2398 ui.status(_("skipping missing subrepository: %s\n")
2394 ui.status(_("skipping missing subrepository: %s\n")
2399 % os.path.join(prefix, subpath))
2395 % os.path.join(prefix, subpath))
2400
2396
2401 return err
2397 return err
2402
2398
2403 def commit(ui, repo, commitfunc, pats, opts):
2399 def commit(ui, repo, commitfunc, pats, opts):
2404 '''commit the specified files or all outstanding changes'''
2400 '''commit the specified files or all outstanding changes'''
2405 date = opts.get('date')
2401 date = opts.get('date')
2406 if date:
2402 if date:
2407 opts['date'] = util.parsedate(date)
2403 opts['date'] = util.parsedate(date)
2408 message = logmessage(ui, opts)
2404 message = logmessage(ui, opts)
2409 matcher = scmutil.match(repo[None], pats, opts)
2405 matcher = scmutil.match(repo[None], pats, opts)
2410
2406
2411 # extract addremove carefully -- this function can be called from a command
2407 # extract addremove carefully -- this function can be called from a command
2412 # that doesn't support addremove
2408 # that doesn't support addremove
2413 if opts.get('addremove'):
2409 if opts.get('addremove'):
2414 if scmutil.addremove(repo, matcher, "", opts) != 0:
2410 if scmutil.addremove(repo, matcher, "", opts) != 0:
2415 raise util.Abort(
2411 raise util.Abort(
2416 _("failed to mark all new/missing files as added/removed"))
2412 _("failed to mark all new/missing files as added/removed"))
2417
2413
2418 return commitfunc(ui, repo, message, matcher, opts)
2414 return commitfunc(ui, repo, message, matcher, opts)
2419
2415
2420 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2416 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2421 # amend will reuse the existing user if not specified, but the obsolete
2417 # amend will reuse the existing user if not specified, but the obsolete
2422 # marker creation requires that the current user's name is specified.
2418 # marker creation requires that the current user's name is specified.
2423 if obsolete._enabled:
2419 if obsolete._enabled:
2424 ui.username() # raise exception if username not set
2420 ui.username() # raise exception if username not set
2425
2421
2426 ui.note(_('amending changeset %s\n') % old)
2422 ui.note(_('amending changeset %s\n') % old)
2427 base = old.p1()
2423 base = old.p1()
2428
2424
2429 wlock = lock = newid = None
2425 wlock = lock = newid = None
2430 try:
2426 try:
2431 wlock = repo.wlock()
2427 wlock = repo.wlock()
2432 lock = repo.lock()
2428 lock = repo.lock()
2433 tr = repo.transaction('amend')
2429 tr = repo.transaction('amend')
2434 try:
2430 try:
2435 # See if we got a message from -m or -l, if not, open the editor
2431 # See if we got a message from -m or -l, if not, open the editor
2436 # with the message of the changeset to amend
2432 # with the message of the changeset to amend
2437 message = logmessage(ui, opts)
2433 message = logmessage(ui, opts)
2438 # ensure logfile does not conflict with later enforcement of the
2434 # ensure logfile does not conflict with later enforcement of the
2439 # message. potential logfile content has been processed by
2435 # message. potential logfile content has been processed by
2440 # `logmessage` anyway.
2436 # `logmessage` anyway.
2441 opts.pop('logfile')
2437 opts.pop('logfile')
2442 # First, do a regular commit to record all changes in the working
2438 # First, do a regular commit to record all changes in the working
2443 # directory (if there are any)
2439 # directory (if there are any)
2444 ui.callhooks = False
2440 ui.callhooks = False
2445 currentbookmark = repo._bookmarkcurrent
2441 currentbookmark = repo._bookmarkcurrent
2446 try:
2442 try:
2447 repo._bookmarkcurrent = None
2443 repo._bookmarkcurrent = None
2448 opts['message'] = 'temporary amend commit for %s' % old
2444 opts['message'] = 'temporary amend commit for %s' % old
2449 node = commit(ui, repo, commitfunc, pats, opts)
2445 node = commit(ui, repo, commitfunc, pats, opts)
2450 finally:
2446 finally:
2451 repo._bookmarkcurrent = currentbookmark
2447 repo._bookmarkcurrent = currentbookmark
2452 ui.callhooks = True
2448 ui.callhooks = True
2453 ctx = repo[node]
2449 ctx = repo[node]
2454
2450
2455 # Participating changesets:
2451 # Participating changesets:
2456 #
2452 #
2457 # node/ctx o - new (intermediate) commit that contains changes
2453 # node/ctx o - new (intermediate) commit that contains changes
2458 # | from working dir to go into amending commit
2454 # | from working dir to go into amending commit
2459 # | (or a workingctx if there were no changes)
2455 # | (or a workingctx if there were no changes)
2460 # |
2456 # |
2461 # old o - changeset to amend
2457 # old o - changeset to amend
2462 # |
2458 # |
2463 # base o - parent of amending changeset
2459 # base o - parent of amending changeset
2464
2460
2465 # Update extra dict from amended commit (e.g. to preserve graft
2461 # Update extra dict from amended commit (e.g. to preserve graft
2466 # source)
2462 # source)
2467 extra.update(old.extra())
2463 extra.update(old.extra())
2468
2464
2469 # Also update it from the intermediate commit or from the wctx
2465 # Also update it from the intermediate commit or from the wctx
2470 extra.update(ctx.extra())
2466 extra.update(ctx.extra())
2471
2467
2472 if len(old.parents()) > 1:
2468 if len(old.parents()) > 1:
2473 # ctx.files() isn't reliable for merges, so fall back to the
2469 # ctx.files() isn't reliable for merges, so fall back to the
2474 # slower repo.status() method
2470 # slower repo.status() method
2475 files = set([fn for st in repo.status(base, old)[:3]
2471 files = set([fn for st in repo.status(base, old)[:3]
2476 for fn in st])
2472 for fn in st])
2477 else:
2473 else:
2478 files = set(old.files())
2474 files = set(old.files())
2479
2475
2480 # Second, we use either the commit we just did, or if there were no
2476 # Second, we use either the commit we just did, or if there were no
2481 # changes the parent of the working directory as the version of the
2477 # changes the parent of the working directory as the version of the
2482 # files in the final amend commit
2478 # files in the final amend commit
2483 if node:
2479 if node:
2484 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2480 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2485
2481
2486 user = ctx.user()
2482 user = ctx.user()
2487 date = ctx.date()
2483 date = ctx.date()
2488 # Recompute copies (avoid recording a -> b -> a)
2484 # Recompute copies (avoid recording a -> b -> a)
2489 copied = copies.pathcopies(base, ctx)
2485 copied = copies.pathcopies(base, ctx)
2490 if old.p2:
2486 if old.p2:
2491 copied.update(copies.pathcopies(old.p2(), ctx))
2487 copied.update(copies.pathcopies(old.p2(), ctx))
2492
2488
2493 # Prune files which were reverted by the updates: if old
2489 # Prune files which were reverted by the updates: if old
2494 # introduced file X and our intermediate commit, node,
2490 # introduced file X and our intermediate commit, node,
2495 # renamed that file, then those two files are the same and
2491 # renamed that file, then those two files are the same and
2496 # we can discard X from our list of files. Likewise if X
2492 # we can discard X from our list of files. Likewise if X
2497 # was deleted, it's no longer relevant
2493 # was deleted, it's no longer relevant
2498 files.update(ctx.files())
2494 files.update(ctx.files())
2499
2495
2500 def samefile(f):
2496 def samefile(f):
2501 if f in ctx.manifest():
2497 if f in ctx.manifest():
2502 a = ctx.filectx(f)
2498 a = ctx.filectx(f)
2503 if f in base.manifest():
2499 if f in base.manifest():
2504 b = base.filectx(f)
2500 b = base.filectx(f)
2505 return (not a.cmp(b)
2501 return (not a.cmp(b)
2506 and a.flags() == b.flags())
2502 and a.flags() == b.flags())
2507 else:
2503 else:
2508 return False
2504 return False
2509 else:
2505 else:
2510 return f not in base.manifest()
2506 return f not in base.manifest()
2511 files = [f for f in files if not samefile(f)]
2507 files = [f for f in files if not samefile(f)]
2512
2508
2513 def filectxfn(repo, ctx_, path):
2509 def filectxfn(repo, ctx_, path):
2514 try:
2510 try:
2515 fctx = ctx[path]
2511 fctx = ctx[path]
2516 flags = fctx.flags()
2512 flags = fctx.flags()
2517 mctx = context.memfilectx(repo,
2513 mctx = context.memfilectx(repo,
2518 fctx.path(), fctx.data(),
2514 fctx.path(), fctx.data(),
2519 islink='l' in flags,
2515 islink='l' in flags,
2520 isexec='x' in flags,
2516 isexec='x' in flags,
2521 copied=copied.get(path))
2517 copied=copied.get(path))
2522 return mctx
2518 return mctx
2523 except KeyError:
2519 except KeyError:
2524 return None
2520 return None
2525 else:
2521 else:
2526 ui.note(_('copying changeset %s to %s\n') % (old, base))
2522 ui.note(_('copying changeset %s to %s\n') % (old, base))
2527
2523
2528 # Use version of files as in the old cset
2524 # Use version of files as in the old cset
2529 def filectxfn(repo, ctx_, path):
2525 def filectxfn(repo, ctx_, path):
2530 try:
2526 try:
2531 return old.filectx(path)
2527 return old.filectx(path)
2532 except KeyError:
2528 except KeyError:
2533 return None
2529 return None
2534
2530
2535 user = opts.get('user') or old.user()
2531 user = opts.get('user') or old.user()
2536 date = opts.get('date') or old.date()
2532 date = opts.get('date') or old.date()
2537 editform = mergeeditform(old, 'commit.amend')
2533 editform = mergeeditform(old, 'commit.amend')
2538 editor = getcommiteditor(editform=editform, **opts)
2534 editor = getcommiteditor(editform=editform, **opts)
2539 if not message:
2535 if not message:
2540 editor = getcommiteditor(edit=True, editform=editform)
2536 editor = getcommiteditor(edit=True, editform=editform)
2541 message = old.description()
2537 message = old.description()
2542
2538
2543 pureextra = extra.copy()
2539 pureextra = extra.copy()
2544 extra['amend_source'] = old.hex()
2540 extra['amend_source'] = old.hex()
2545
2541
2546 new = context.memctx(repo,
2542 new = context.memctx(repo,
2547 parents=[base.node(), old.p2().node()],
2543 parents=[base.node(), old.p2().node()],
2548 text=message,
2544 text=message,
2549 files=files,
2545 files=files,
2550 filectxfn=filectxfn,
2546 filectxfn=filectxfn,
2551 user=user,
2547 user=user,
2552 date=date,
2548 date=date,
2553 extra=extra,
2549 extra=extra,
2554 editor=editor)
2550 editor=editor)
2555
2551
2556 newdesc = changelog.stripdesc(new.description())
2552 newdesc = changelog.stripdesc(new.description())
2557 if ((not node)
2553 if ((not node)
2558 and newdesc == old.description()
2554 and newdesc == old.description()
2559 and user == old.user()
2555 and user == old.user()
2560 and date == old.date()
2556 and date == old.date()
2561 and pureextra == old.extra()):
2557 and pureextra == old.extra()):
2562 # nothing changed. continuing here would create a new node
2558 # nothing changed. continuing here would create a new node
2563 # anyway because of the amend_source noise.
2559 # anyway because of the amend_source noise.
2564 #
2560 #
2565 # This not what we expect from amend.
2561 # This not what we expect from amend.
2566 return old.node()
2562 return old.node()
2567
2563
2568 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2564 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2569 try:
2565 try:
2570 if opts.get('secret'):
2566 if opts.get('secret'):
2571 commitphase = 'secret'
2567 commitphase = 'secret'
2572 else:
2568 else:
2573 commitphase = old.phase()
2569 commitphase = old.phase()
2574 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2570 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2575 newid = repo.commitctx(new)
2571 newid = repo.commitctx(new)
2576 finally:
2572 finally:
2577 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2573 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2578 if newid != old.node():
2574 if newid != old.node():
2579 # Reroute the working copy parent to the new changeset
2575 # Reroute the working copy parent to the new changeset
2580 repo.setparents(newid, nullid)
2576 repo.setparents(newid, nullid)
2581
2577
2582 # Move bookmarks from old parent to amend commit
2578 # Move bookmarks from old parent to amend commit
2583 bms = repo.nodebookmarks(old.node())
2579 bms = repo.nodebookmarks(old.node())
2584 if bms:
2580 if bms:
2585 marks = repo._bookmarks
2581 marks = repo._bookmarks
2586 for bm in bms:
2582 for bm in bms:
2587 marks[bm] = newid
2583 marks[bm] = newid
2588 marks.write()
2584 marks.write()
2589 #commit the whole amend process
2585 #commit the whole amend process
2590 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2586 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2591 if createmarkers and newid != old.node():
2587 if createmarkers and newid != old.node():
2592 # mark the new changeset as successor of the rewritten one
2588 # mark the new changeset as successor of the rewritten one
2593 new = repo[newid]
2589 new = repo[newid]
2594 obs = [(old, (new,))]
2590 obs = [(old, (new,))]
2595 if node:
2591 if node:
2596 obs.append((ctx, ()))
2592 obs.append((ctx, ()))
2597
2593
2598 obsolete.createmarkers(repo, obs)
2594 obsolete.createmarkers(repo, obs)
2599 tr.close()
2595 tr.close()
2600 finally:
2596 finally:
2601 tr.release()
2597 tr.release()
2602 if not createmarkers and newid != old.node():
2598 if not createmarkers and newid != old.node():
2603 # Strip the intermediate commit (if there was one) and the amended
2599 # Strip the intermediate commit (if there was one) and the amended
2604 # commit
2600 # commit
2605 if node:
2601 if node:
2606 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2602 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2607 ui.note(_('stripping amended changeset %s\n') % old)
2603 ui.note(_('stripping amended changeset %s\n') % old)
2608 repair.strip(ui, repo, old.node(), topic='amend-backup')
2604 repair.strip(ui, repo, old.node(), topic='amend-backup')
2609 finally:
2605 finally:
2610 if newid is None:
2606 if newid is None:
2611 repo.dirstate.invalidate()
2607 repo.dirstate.invalidate()
2612 lockmod.release(lock, wlock)
2608 lockmod.release(lock, wlock)
2613 return newid
2609 return newid
2614
2610
2615 def commiteditor(repo, ctx, subs, editform=''):
2611 def commiteditor(repo, ctx, subs, editform=''):
2616 if ctx.description():
2612 if ctx.description():
2617 return ctx.description()
2613 return ctx.description()
2618 return commitforceeditor(repo, ctx, subs, editform=editform)
2614 return commitforceeditor(repo, ctx, subs, editform=editform)
2619
2615
2620 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2616 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2621 editform=''):
2617 editform=''):
2622 if not extramsg:
2618 if not extramsg:
2623 extramsg = _("Leave message empty to abort commit.")
2619 extramsg = _("Leave message empty to abort commit.")
2624
2620
2625 forms = [e for e in editform.split('.') if e]
2621 forms = [e for e in editform.split('.') if e]
2626 forms.insert(0, 'changeset')
2622 forms.insert(0, 'changeset')
2627 while forms:
2623 while forms:
2628 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2624 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2629 if tmpl:
2625 if tmpl:
2630 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2626 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2631 break
2627 break
2632 forms.pop()
2628 forms.pop()
2633 else:
2629 else:
2634 committext = buildcommittext(repo, ctx, subs, extramsg)
2630 committext = buildcommittext(repo, ctx, subs, extramsg)
2635
2631
2636 # run editor in the repository root
2632 # run editor in the repository root
2637 olddir = os.getcwd()
2633 olddir = os.getcwd()
2638 os.chdir(repo.root)
2634 os.chdir(repo.root)
2639 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2635 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2640 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2636 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2641 os.chdir(olddir)
2637 os.chdir(olddir)
2642
2638
2643 if finishdesc:
2639 if finishdesc:
2644 text = finishdesc(text)
2640 text = finishdesc(text)
2645 if not text.strip():
2641 if not text.strip():
2646 raise util.Abort(_("empty commit message"))
2642 raise util.Abort(_("empty commit message"))
2647
2643
2648 return text
2644 return text
2649
2645
2650 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2646 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2651 ui = repo.ui
2647 ui = repo.ui
2652 tmpl, mapfile = gettemplate(ui, tmpl, None)
2648 tmpl, mapfile = gettemplate(ui, tmpl, None)
2653
2649
2654 try:
2650 try:
2655 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2651 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2656 except SyntaxError, inst:
2652 except SyntaxError, inst:
2657 raise util.Abort(inst.args[0])
2653 raise util.Abort(inst.args[0])
2658
2654
2659 for k, v in repo.ui.configitems('committemplate'):
2655 for k, v in repo.ui.configitems('committemplate'):
2660 if k != 'changeset':
2656 if k != 'changeset':
2661 t.t.cache[k] = v
2657 t.t.cache[k] = v
2662
2658
2663 if not extramsg:
2659 if not extramsg:
2664 extramsg = '' # ensure that extramsg is string
2660 extramsg = '' # ensure that extramsg is string
2665
2661
2666 ui.pushbuffer()
2662 ui.pushbuffer()
2667 t.show(ctx, extramsg=extramsg)
2663 t.show(ctx, extramsg=extramsg)
2668 return ui.popbuffer()
2664 return ui.popbuffer()
2669
2665
2670 def buildcommittext(repo, ctx, subs, extramsg):
2666 def buildcommittext(repo, ctx, subs, extramsg):
2671 edittext = []
2667 edittext = []
2672 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2668 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2673 if ctx.description():
2669 if ctx.description():
2674 edittext.append(ctx.description())
2670 edittext.append(ctx.description())
2675 edittext.append("")
2671 edittext.append("")
2676 edittext.append("") # Empty line between message and comments.
2672 edittext.append("") # Empty line between message and comments.
2677 edittext.append(_("HG: Enter commit message."
2673 edittext.append(_("HG: Enter commit message."
2678 " Lines beginning with 'HG:' are removed."))
2674 " Lines beginning with 'HG:' are removed."))
2679 edittext.append("HG: %s" % extramsg)
2675 edittext.append("HG: %s" % extramsg)
2680 edittext.append("HG: --")
2676 edittext.append("HG: --")
2681 edittext.append(_("HG: user: %s") % ctx.user())
2677 edittext.append(_("HG: user: %s") % ctx.user())
2682 if ctx.p2():
2678 if ctx.p2():
2683 edittext.append(_("HG: branch merge"))
2679 edittext.append(_("HG: branch merge"))
2684 if ctx.branch():
2680 if ctx.branch():
2685 edittext.append(_("HG: branch '%s'") % ctx.branch())
2681 edittext.append(_("HG: branch '%s'") % ctx.branch())
2686 if bookmarks.iscurrent(repo):
2682 if bookmarks.iscurrent(repo):
2687 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2683 edittext.append(_("HG: bookmark '%s'") % repo._bookmarkcurrent)
2688 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2684 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2689 edittext.extend([_("HG: added %s") % f for f in added])
2685 edittext.extend([_("HG: added %s") % f for f in added])
2690 edittext.extend([_("HG: changed %s") % f for f in modified])
2686 edittext.extend([_("HG: changed %s") % f for f in modified])
2691 edittext.extend([_("HG: removed %s") % f for f in removed])
2687 edittext.extend([_("HG: removed %s") % f for f in removed])
2692 if not added and not modified and not removed:
2688 if not added and not modified and not removed:
2693 edittext.append(_("HG: no files changed"))
2689 edittext.append(_("HG: no files changed"))
2694 edittext.append("")
2690 edittext.append("")
2695
2691
2696 return "\n".join(edittext)
2692 return "\n".join(edittext)
2697
2693
2698 def commitstatus(repo, node, branch, bheads=None, opts={}):
2694 def commitstatus(repo, node, branch, bheads=None, opts={}):
2699 ctx = repo[node]
2695 ctx = repo[node]
2700 parents = ctx.parents()
2696 parents = ctx.parents()
2701
2697
2702 if (not opts.get('amend') and bheads and node not in bheads and not
2698 if (not opts.get('amend') and bheads and node not in bheads and not
2703 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2699 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2704 repo.ui.status(_('created new head\n'))
2700 repo.ui.status(_('created new head\n'))
2705 # The message is not printed for initial roots. For the other
2701 # The message is not printed for initial roots. For the other
2706 # changesets, it is printed in the following situations:
2702 # changesets, it is printed in the following situations:
2707 #
2703 #
2708 # Par column: for the 2 parents with ...
2704 # Par column: for the 2 parents with ...
2709 # N: null or no parent
2705 # N: null or no parent
2710 # B: parent is on another named branch
2706 # B: parent is on another named branch
2711 # C: parent is a regular non head changeset
2707 # C: parent is a regular non head changeset
2712 # H: parent was a branch head of the current branch
2708 # H: parent was a branch head of the current branch
2713 # Msg column: whether we print "created new head" message
2709 # Msg column: whether we print "created new head" message
2714 # In the following, it is assumed that there already exists some
2710 # In the following, it is assumed that there already exists some
2715 # initial branch heads of the current branch, otherwise nothing is
2711 # initial branch heads of the current branch, otherwise nothing is
2716 # printed anyway.
2712 # printed anyway.
2717 #
2713 #
2718 # Par Msg Comment
2714 # Par Msg Comment
2719 # N N y additional topo root
2715 # N N y additional topo root
2720 #
2716 #
2721 # B N y additional branch root
2717 # B N y additional branch root
2722 # C N y additional topo head
2718 # C N y additional topo head
2723 # H N n usual case
2719 # H N n usual case
2724 #
2720 #
2725 # B B y weird additional branch root
2721 # B B y weird additional branch root
2726 # C B y branch merge
2722 # C B y branch merge
2727 # H B n merge with named branch
2723 # H B n merge with named branch
2728 #
2724 #
2729 # C C y additional head from merge
2725 # C C y additional head from merge
2730 # C H n merge with a head
2726 # C H n merge with a head
2731 #
2727 #
2732 # H H n head merge: head count decreases
2728 # H H n head merge: head count decreases
2733
2729
2734 if not opts.get('close_branch'):
2730 if not opts.get('close_branch'):
2735 for r in parents:
2731 for r in parents:
2736 if r.closesbranch() and r.branch() == branch:
2732 if r.closesbranch() and r.branch() == branch:
2737 repo.ui.status(_('reopening closed branch head %d\n') % r)
2733 repo.ui.status(_('reopening closed branch head %d\n') % r)
2738
2734
2739 if repo.ui.debugflag:
2735 if repo.ui.debugflag:
2740 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2736 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2741 elif repo.ui.verbose:
2737 elif repo.ui.verbose:
2742 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2738 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2743
2739
2744 def revert(ui, repo, ctx, parents, *pats, **opts):
2740 def revert(ui, repo, ctx, parents, *pats, **opts):
2745 parent, p2 = parents
2741 parent, p2 = parents
2746 node = ctx.node()
2742 node = ctx.node()
2747
2743
2748 mf = ctx.manifest()
2744 mf = ctx.manifest()
2749 if node == p2:
2745 if node == p2:
2750 parent = p2
2746 parent = p2
2751 if node == parent:
2747 if node == parent:
2752 pmf = mf
2748 pmf = mf
2753 else:
2749 else:
2754 pmf = None
2750 pmf = None
2755
2751
2756 # need all matching names in dirstate and manifest of target rev,
2752 # need all matching names in dirstate and manifest of target rev,
2757 # so have to walk both. do not print errors if files exist in one
2753 # so have to walk both. do not print errors if files exist in one
2758 # but not other.
2754 # but not other.
2759
2755
2760 # `names` is a mapping for all elements in working copy and target revision
2756 # `names` is a mapping for all elements in working copy and target revision
2761 # The mapping is in the form:
2757 # The mapping is in the form:
2762 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2758 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2763 names = {}
2759 names = {}
2764
2760
2765 wlock = repo.wlock()
2761 wlock = repo.wlock()
2766 try:
2762 try:
2767 ## filling of the `names` mapping
2763 ## filling of the `names` mapping
2768 # walk dirstate to fill `names`
2764 # walk dirstate to fill `names`
2769
2765
2770 m = scmutil.match(repo[None], pats, opts)
2766 m = scmutil.match(repo[None], pats, opts)
2771 if not m.always() or node != parent:
2767 if not m.always() or node != parent:
2772 m.bad = lambda x, y: False
2768 m.bad = lambda x, y: False
2773 for abs in repo.walk(m):
2769 for abs in repo.walk(m):
2774 names[abs] = m.rel(abs), m.exact(abs)
2770 names[abs] = m.rel(abs), m.exact(abs)
2775
2771
2776 # walk target manifest to fill `names`
2772 # walk target manifest to fill `names`
2777
2773
2778 def badfn(path, msg):
2774 def badfn(path, msg):
2779 if path in names:
2775 if path in names:
2780 return
2776 return
2781 if path in ctx.substate:
2777 if path in ctx.substate:
2782 return
2778 return
2783 path_ = path + '/'
2779 path_ = path + '/'
2784 for f in names:
2780 for f in names:
2785 if f.startswith(path_):
2781 if f.startswith(path_):
2786 return
2782 return
2787 ui.warn("%s: %s\n" % (m.rel(path), msg))
2783 ui.warn("%s: %s\n" % (m.rel(path), msg))
2788
2784
2789 m = scmutil.match(ctx, pats, opts)
2785 m = scmutil.match(ctx, pats, opts)
2790 m.bad = badfn
2786 m.bad = badfn
2791 for abs in ctx.walk(m):
2787 for abs in ctx.walk(m):
2792 if abs not in names:
2788 if abs not in names:
2793 names[abs] = m.rel(abs), m.exact(abs)
2789 names[abs] = m.rel(abs), m.exact(abs)
2794
2790
2795 # Find status of all file in `names`.
2791 # Find status of all file in `names`.
2796 m = scmutil.matchfiles(repo, names)
2792 m = scmutil.matchfiles(repo, names)
2797
2793
2798 changes = repo.status(node1=node, match=m,
2794 changes = repo.status(node1=node, match=m,
2799 unknown=True, ignored=True, clean=True)
2795 unknown=True, ignored=True, clean=True)
2800 else:
2796 else:
2801 changes = repo.status(match=m)
2797 changes = repo.status(match=m)
2802 for kind in changes:
2798 for kind in changes:
2803 for abs in kind:
2799 for abs in kind:
2804 names[abs] = m.rel(abs), m.exact(abs)
2800 names[abs] = m.rel(abs), m.exact(abs)
2805
2801
2806 m = scmutil.matchfiles(repo, names)
2802 m = scmutil.matchfiles(repo, names)
2807
2803
2808 modified = set(changes.modified)
2804 modified = set(changes.modified)
2809 added = set(changes.added)
2805 added = set(changes.added)
2810 removed = set(changes.removed)
2806 removed = set(changes.removed)
2811 _deleted = set(changes.deleted)
2807 _deleted = set(changes.deleted)
2812 unknown = set(changes.unknown)
2808 unknown = set(changes.unknown)
2813 unknown.update(changes.ignored)
2809 unknown.update(changes.ignored)
2814 clean = set(changes.clean)
2810 clean = set(changes.clean)
2815 modadded = set()
2811 modadded = set()
2816
2812
2817 # split between files known in target manifest and the others
2813 # split between files known in target manifest and the others
2818 smf = set(mf)
2814 smf = set(mf)
2819
2815
2820 # determine the exact nature of the deleted changesets
2816 # determine the exact nature of the deleted changesets
2821 deladded = _deleted - smf
2817 deladded = _deleted - smf
2822 deleted = _deleted - deladded
2818 deleted = _deleted - deladded
2823
2819
2824 # We need to account for the state of the file in the dirstate,
2820 # We need to account for the state of the file in the dirstate,
2825 # even when we revert against something else than parent. This will
2821 # even when we revert against something else than parent. This will
2826 # slightly alter the behavior of revert (doing back up or not, delete
2822 # slightly alter the behavior of revert (doing back up or not, delete
2827 # or just forget etc).
2823 # or just forget etc).
2828 if parent == node:
2824 if parent == node:
2829 dsmodified = modified
2825 dsmodified = modified
2830 dsadded = added
2826 dsadded = added
2831 dsremoved = removed
2827 dsremoved = removed
2832 # store all local modifications, useful later for rename detection
2828 # store all local modifications, useful later for rename detection
2833 localchanges = dsmodified | dsadded
2829 localchanges = dsmodified | dsadded
2834 modified, added, removed = set(), set(), set()
2830 modified, added, removed = set(), set(), set()
2835 else:
2831 else:
2836 changes = repo.status(node1=parent, match=m)
2832 changes = repo.status(node1=parent, match=m)
2837 dsmodified = set(changes.modified)
2833 dsmodified = set(changes.modified)
2838 dsadded = set(changes.added)
2834 dsadded = set(changes.added)
2839 dsremoved = set(changes.removed)
2835 dsremoved = set(changes.removed)
2840 # store all local modifications, useful later for rename detection
2836 # store all local modifications, useful later for rename detection
2841 localchanges = dsmodified | dsadded
2837 localchanges = dsmodified | dsadded
2842
2838
2843 # only take into account for removes between wc and target
2839 # only take into account for removes between wc and target
2844 clean |= dsremoved - removed
2840 clean |= dsremoved - removed
2845 dsremoved &= removed
2841 dsremoved &= removed
2846 # distinct between dirstate remove and other
2842 # distinct between dirstate remove and other
2847 removed -= dsremoved
2843 removed -= dsremoved
2848
2844
2849 modadded = added & dsmodified
2845 modadded = added & dsmodified
2850 added -= modadded
2846 added -= modadded
2851
2847
2852 # tell newly modified apart.
2848 # tell newly modified apart.
2853 dsmodified &= modified
2849 dsmodified &= modified
2854 dsmodified |= modified & dsadded # dirstate added may needs backup
2850 dsmodified |= modified & dsadded # dirstate added may needs backup
2855 modified -= dsmodified
2851 modified -= dsmodified
2856
2852
2857 # We need to wait for some post-processing to update this set
2853 # We need to wait for some post-processing to update this set
2858 # before making the distinction. The dirstate will be used for
2854 # before making the distinction. The dirstate will be used for
2859 # that purpose.
2855 # that purpose.
2860 dsadded = added
2856 dsadded = added
2861
2857
2862 # in case of merge, files that are actually added can be reported as
2858 # in case of merge, files that are actually added can be reported as
2863 # modified, we need to post process the result
2859 # modified, we need to post process the result
2864 if p2 != nullid:
2860 if p2 != nullid:
2865 if pmf is None:
2861 if pmf is None:
2866 # only need parent manifest in the merge case,
2862 # only need parent manifest in the merge case,
2867 # so do not read by default
2863 # so do not read by default
2868 pmf = repo[parent].manifest()
2864 pmf = repo[parent].manifest()
2869 mergeadd = dsmodified - set(pmf)
2865 mergeadd = dsmodified - set(pmf)
2870 dsadded |= mergeadd
2866 dsadded |= mergeadd
2871 dsmodified -= mergeadd
2867 dsmodified -= mergeadd
2872
2868
2873 # if f is a rename, update `names` to also revert the source
2869 # if f is a rename, update `names` to also revert the source
2874 cwd = repo.getcwd()
2870 cwd = repo.getcwd()
2875 for f in localchanges:
2871 for f in localchanges:
2876 src = repo.dirstate.copied(f)
2872 src = repo.dirstate.copied(f)
2877 # XXX should we check for rename down to target node?
2873 # XXX should we check for rename down to target node?
2878 if src and src not in names and repo.dirstate[src] == 'r':
2874 if src and src not in names and repo.dirstate[src] == 'r':
2879 dsremoved.add(src)
2875 dsremoved.add(src)
2880 names[src] = (repo.pathto(src, cwd), True)
2876 names[src] = (repo.pathto(src, cwd), True)
2881
2877
2882 # distinguish between file to forget and the other
2878 # distinguish between file to forget and the other
2883 added = set()
2879 added = set()
2884 for abs in dsadded:
2880 for abs in dsadded:
2885 if repo.dirstate[abs] != 'a':
2881 if repo.dirstate[abs] != 'a':
2886 added.add(abs)
2882 added.add(abs)
2887 dsadded -= added
2883 dsadded -= added
2888
2884
2889 for abs in deladded:
2885 for abs in deladded:
2890 if repo.dirstate[abs] == 'a':
2886 if repo.dirstate[abs] == 'a':
2891 dsadded.add(abs)
2887 dsadded.add(abs)
2892 deladded -= dsadded
2888 deladded -= dsadded
2893
2889
2894 # For files marked as removed, we check if an unknown file is present at
2890 # For files marked as removed, we check if an unknown file is present at
2895 # the same path. If a such file exists it may need to be backed up.
2891 # the same path. If a such file exists it may need to be backed up.
2896 # Making the distinction at this stage helps have simpler backup
2892 # Making the distinction at this stage helps have simpler backup
2897 # logic.
2893 # logic.
2898 removunk = set()
2894 removunk = set()
2899 for abs in removed:
2895 for abs in removed:
2900 target = repo.wjoin(abs)
2896 target = repo.wjoin(abs)
2901 if os.path.lexists(target):
2897 if os.path.lexists(target):
2902 removunk.add(abs)
2898 removunk.add(abs)
2903 removed -= removunk
2899 removed -= removunk
2904
2900
2905 dsremovunk = set()
2901 dsremovunk = set()
2906 for abs in dsremoved:
2902 for abs in dsremoved:
2907 target = repo.wjoin(abs)
2903 target = repo.wjoin(abs)
2908 if os.path.lexists(target):
2904 if os.path.lexists(target):
2909 dsremovunk.add(abs)
2905 dsremovunk.add(abs)
2910 dsremoved -= dsremovunk
2906 dsremoved -= dsremovunk
2911
2907
2912 # action to be actually performed by revert
2908 # action to be actually performed by revert
2913 # (<list of file>, message>) tuple
2909 # (<list of file>, message>) tuple
2914 actions = {'revert': ([], _('reverting %s\n')),
2910 actions = {'revert': ([], _('reverting %s\n')),
2915 'add': ([], _('adding %s\n')),
2911 'add': ([], _('adding %s\n')),
2916 'remove': ([], _('removing %s\n')),
2912 'remove': ([], _('removing %s\n')),
2917 'drop': ([], _('removing %s\n')),
2913 'drop': ([], _('removing %s\n')),
2918 'forget': ([], _('forgetting %s\n')),
2914 'forget': ([], _('forgetting %s\n')),
2919 'undelete': ([], _('undeleting %s\n')),
2915 'undelete': ([], _('undeleting %s\n')),
2920 'noop': (None, _('no changes needed to %s\n')),
2916 'noop': (None, _('no changes needed to %s\n')),
2921 'unknown': (None, _('file not managed: %s\n')),
2917 'unknown': (None, _('file not managed: %s\n')),
2922 }
2918 }
2923
2919
2924 # "constant" that convey the backup strategy.
2920 # "constant" that convey the backup strategy.
2925 # All set to `discard` if `no-backup` is set do avoid checking
2921 # All set to `discard` if `no-backup` is set do avoid checking
2926 # no_backup lower in the code.
2922 # no_backup lower in the code.
2927 # These values are ordered for comparison purposes
2923 # These values are ordered for comparison purposes
2928 backup = 2 # unconditionally do backup
2924 backup = 2 # unconditionally do backup
2929 check = 1 # check if the existing file differs from target
2925 check = 1 # check if the existing file differs from target
2930 discard = 0 # never do backup
2926 discard = 0 # never do backup
2931 if opts.get('no_backup'):
2927 if opts.get('no_backup'):
2932 backup = check = discard
2928 backup = check = discard
2933
2929
2934 backupanddel = actions['remove']
2930 backupanddel = actions['remove']
2935 if not opts.get('no_backup'):
2931 if not opts.get('no_backup'):
2936 backupanddel = actions['drop']
2932 backupanddel = actions['drop']
2937
2933
2938 disptable = (
2934 disptable = (
2939 # dispatch table:
2935 # dispatch table:
2940 # file state
2936 # file state
2941 # action
2937 # action
2942 # make backup
2938 # make backup
2943
2939
2944 ## Sets that results that will change file on disk
2940 ## Sets that results that will change file on disk
2945 # Modified compared to target, no local change
2941 # Modified compared to target, no local change
2946 (modified, actions['revert'], discard),
2942 (modified, actions['revert'], discard),
2947 # Modified compared to target, but local file is deleted
2943 # Modified compared to target, but local file is deleted
2948 (deleted, actions['revert'], discard),
2944 (deleted, actions['revert'], discard),
2949 # Modified compared to target, local change
2945 # Modified compared to target, local change
2950 (dsmodified, actions['revert'], backup),
2946 (dsmodified, actions['revert'], backup),
2951 # Added since target
2947 # Added since target
2952 (added, actions['remove'], discard),
2948 (added, actions['remove'], discard),
2953 # Added in working directory
2949 # Added in working directory
2954 (dsadded, actions['forget'], discard),
2950 (dsadded, actions['forget'], discard),
2955 # Added since target, have local modification
2951 # Added since target, have local modification
2956 (modadded, backupanddel, backup),
2952 (modadded, backupanddel, backup),
2957 # Added since target but file is missing in working directory
2953 # Added since target but file is missing in working directory
2958 (deladded, actions['drop'], discard),
2954 (deladded, actions['drop'], discard),
2959 # Removed since target, before working copy parent
2955 # Removed since target, before working copy parent
2960 (removed, actions['add'], discard),
2956 (removed, actions['add'], discard),
2961 # Same as `removed` but an unknown file exists at the same path
2957 # Same as `removed` but an unknown file exists at the same path
2962 (removunk, actions['add'], check),
2958 (removunk, actions['add'], check),
2963 # Removed since targe, marked as such in working copy parent
2959 # Removed since targe, marked as such in working copy parent
2964 (dsremoved, actions['undelete'], discard),
2960 (dsremoved, actions['undelete'], discard),
2965 # Same as `dsremoved` but an unknown file exists at the same path
2961 # Same as `dsremoved` but an unknown file exists at the same path
2966 (dsremovunk, actions['undelete'], check),
2962 (dsremovunk, actions['undelete'], check),
2967 ## the following sets does not result in any file changes
2963 ## the following sets does not result in any file changes
2968 # File with no modification
2964 # File with no modification
2969 (clean, actions['noop'], discard),
2965 (clean, actions['noop'], discard),
2970 # Existing file, not tracked anywhere
2966 # Existing file, not tracked anywhere
2971 (unknown, actions['unknown'], discard),
2967 (unknown, actions['unknown'], discard),
2972 )
2968 )
2973
2969
2974 wctx = repo[None]
2970 wctx = repo[None]
2975 for abs, (rel, exact) in sorted(names.items()):
2971 for abs, (rel, exact) in sorted(names.items()):
2976 # target file to be touch on disk (relative to cwd)
2972 # target file to be touch on disk (relative to cwd)
2977 target = repo.wjoin(abs)
2973 target = repo.wjoin(abs)
2978 # search the entry in the dispatch table.
2974 # search the entry in the dispatch table.
2979 # if the file is in any of these sets, it was touched in the working
2975 # if the file is in any of these sets, it was touched in the working
2980 # directory parent and we are sure it needs to be reverted.
2976 # directory parent and we are sure it needs to be reverted.
2981 for table, (xlist, msg), dobackup in disptable:
2977 for table, (xlist, msg), dobackup in disptable:
2982 if abs not in table:
2978 if abs not in table:
2983 continue
2979 continue
2984 if xlist is not None:
2980 if xlist is not None:
2985 xlist.append(abs)
2981 xlist.append(abs)
2986 if dobackup and (backup <= dobackup
2982 if dobackup and (backup <= dobackup
2987 or wctx[abs].cmp(ctx[abs])):
2983 or wctx[abs].cmp(ctx[abs])):
2988 bakname = "%s.orig" % rel
2984 bakname = "%s.orig" % rel
2989 ui.note(_('saving current version of %s as %s\n') %
2985 ui.note(_('saving current version of %s as %s\n') %
2990 (rel, bakname))
2986 (rel, bakname))
2991 if not opts.get('dry_run'):
2987 if not opts.get('dry_run'):
2992 util.rename(target, bakname)
2988 util.rename(target, bakname)
2993 if ui.verbose or not exact:
2989 if ui.verbose or not exact:
2994 if not isinstance(msg, basestring):
2990 if not isinstance(msg, basestring):
2995 msg = msg(abs)
2991 msg = msg(abs)
2996 ui.status(msg % rel)
2992 ui.status(msg % rel)
2997 elif exact:
2993 elif exact:
2998 ui.warn(msg % rel)
2994 ui.warn(msg % rel)
2999 break
2995 break
3000
2996
3001
2997
3002 if not opts.get('dry_run'):
2998 if not opts.get('dry_run'):
3003 needdata = ('revert', 'add', 'undelete')
2999 needdata = ('revert', 'add', 'undelete')
3004 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3000 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3005
3001
3006 _performrevert(repo, parents, ctx, actions)
3002 _performrevert(repo, parents, ctx, actions)
3007
3003
3008 # get the list of subrepos that must be reverted
3004 # get the list of subrepos that must be reverted
3009 subrepomatch = scmutil.match(ctx, pats, opts)
3005 subrepomatch = scmutil.match(ctx, pats, opts)
3010 targetsubs = sorted(s for s in ctx.substate if subrepomatch(s))
3006 targetsubs = sorted(s for s in ctx.substate if subrepomatch(s))
3011
3007
3012 if targetsubs:
3008 if targetsubs:
3013 # Revert the subrepos on the revert list
3009 # Revert the subrepos on the revert list
3014 for sub in targetsubs:
3010 for sub in targetsubs:
3015 ctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3011 ctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3016 finally:
3012 finally:
3017 wlock.release()
3013 wlock.release()
3018
3014
3019 def _revertprefetch(repo, ctx, *files):
3015 def _revertprefetch(repo, ctx, *files):
3020 """Let extension changing the storage layer prefetch content"""
3016 """Let extension changing the storage layer prefetch content"""
3021 pass
3017 pass
3022
3018
3023 def _performrevert(repo, parents, ctx, actions):
3019 def _performrevert(repo, parents, ctx, actions):
3024 """function that actually perform all the actions computed for revert
3020 """function that actually perform all the actions computed for revert
3025
3021
3026 This is an independent function to let extension to plug in and react to
3022 This is an independent function to let extension to plug in and react to
3027 the imminent revert.
3023 the imminent revert.
3028
3024
3029 Make sure you have the working directory locked when calling this function.
3025 Make sure you have the working directory locked when calling this function.
3030 """
3026 """
3031 parent, p2 = parents
3027 parent, p2 = parents
3032 node = ctx.node()
3028 node = ctx.node()
3033 def checkout(f):
3029 def checkout(f):
3034 fc = ctx[f]
3030 fc = ctx[f]
3035 repo.wwrite(f, fc.data(), fc.flags())
3031 repo.wwrite(f, fc.data(), fc.flags())
3036
3032
3037 audit_path = pathutil.pathauditor(repo.root)
3033 audit_path = pathutil.pathauditor(repo.root)
3038 for f in actions['forget'][0]:
3034 for f in actions['forget'][0]:
3039 repo.dirstate.drop(f)
3035 repo.dirstate.drop(f)
3040 for f in actions['remove'][0]:
3036 for f in actions['remove'][0]:
3041 audit_path(f)
3037 audit_path(f)
3042 util.unlinkpath(repo.wjoin(f))
3038 util.unlinkpath(repo.wjoin(f))
3043 repo.dirstate.remove(f)
3039 repo.dirstate.remove(f)
3044 for f in actions['drop'][0]:
3040 for f in actions['drop'][0]:
3045 audit_path(f)
3041 audit_path(f)
3046 repo.dirstate.remove(f)
3042 repo.dirstate.remove(f)
3047
3043
3048 normal = None
3044 normal = None
3049 if node == parent:
3045 if node == parent:
3050 # We're reverting to our parent. If possible, we'd like status
3046 # We're reverting to our parent. If possible, we'd like status
3051 # to report the file as clean. We have to use normallookup for
3047 # to report the file as clean. We have to use normallookup for
3052 # merges to avoid losing information about merged/dirty files.
3048 # merges to avoid losing information about merged/dirty files.
3053 if p2 != nullid:
3049 if p2 != nullid:
3054 normal = repo.dirstate.normallookup
3050 normal = repo.dirstate.normallookup
3055 else:
3051 else:
3056 normal = repo.dirstate.normal
3052 normal = repo.dirstate.normal
3057 for f in actions['revert'][0]:
3053 for f in actions['revert'][0]:
3058 checkout(f)
3054 checkout(f)
3059 if normal:
3055 if normal:
3060 normal(f)
3056 normal(f)
3061
3057
3062 for f in actions['add'][0]:
3058 for f in actions['add'][0]:
3063 checkout(f)
3059 checkout(f)
3064 repo.dirstate.add(f)
3060 repo.dirstate.add(f)
3065
3061
3066 normal = repo.dirstate.normallookup
3062 normal = repo.dirstate.normallookup
3067 if node == parent and p2 == nullid:
3063 if node == parent and p2 == nullid:
3068 normal = repo.dirstate.normal
3064 normal = repo.dirstate.normal
3069 for f in actions['undelete'][0]:
3065 for f in actions['undelete'][0]:
3070 checkout(f)
3066 checkout(f)
3071 normal(f)
3067 normal(f)
3072
3068
3073 copied = copies.pathcopies(repo[parent], ctx)
3069 copied = copies.pathcopies(repo[parent], ctx)
3074
3070
3075 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3071 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3076 if f in copied:
3072 if f in copied:
3077 repo.dirstate.copy(copied[f], f)
3073 repo.dirstate.copy(copied[f], f)
3078
3074
3079 def command(table):
3075 def command(table):
3080 """Returns a function object to be used as a decorator for making commands.
3076 """Returns a function object to be used as a decorator for making commands.
3081
3077
3082 This function receives a command table as its argument. The table should
3078 This function receives a command table as its argument. The table should
3083 be a dict.
3079 be a dict.
3084
3080
3085 The returned function can be used as a decorator for adding commands
3081 The returned function can be used as a decorator for adding commands
3086 to that command table. This function accepts multiple arguments to define
3082 to that command table. This function accepts multiple arguments to define
3087 a command.
3083 a command.
3088
3084
3089 The first argument is the command name.
3085 The first argument is the command name.
3090
3086
3091 The options argument is an iterable of tuples defining command arguments.
3087 The options argument is an iterable of tuples defining command arguments.
3092 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3088 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3093
3089
3094 The synopsis argument defines a short, one line summary of how to use the
3090 The synopsis argument defines a short, one line summary of how to use the
3095 command. This shows up in the help output.
3091 command. This shows up in the help output.
3096
3092
3097 The norepo argument defines whether the command does not require a
3093 The norepo argument defines whether the command does not require a
3098 local repository. Most commands operate against a repository, thus the
3094 local repository. Most commands operate against a repository, thus the
3099 default is False.
3095 default is False.
3100
3096
3101 The optionalrepo argument defines whether the command optionally requires
3097 The optionalrepo argument defines whether the command optionally requires
3102 a local repository.
3098 a local repository.
3103
3099
3104 The inferrepo argument defines whether to try to find a repository from the
3100 The inferrepo argument defines whether to try to find a repository from the
3105 command line arguments. If True, arguments will be examined for potential
3101 command line arguments. If True, arguments will be examined for potential
3106 repository locations. See ``findrepo()``. If a repository is found, it
3102 repository locations. See ``findrepo()``. If a repository is found, it
3107 will be used.
3103 will be used.
3108 """
3104 """
3109 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3105 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3110 inferrepo=False):
3106 inferrepo=False):
3111 def decorator(func):
3107 def decorator(func):
3112 if synopsis:
3108 if synopsis:
3113 table[name] = func, list(options), synopsis
3109 table[name] = func, list(options), synopsis
3114 else:
3110 else:
3115 table[name] = func, list(options)
3111 table[name] = func, list(options)
3116
3112
3117 if norepo:
3113 if norepo:
3118 # Avoid import cycle.
3114 # Avoid import cycle.
3119 import commands
3115 import commands
3120 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3116 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3121
3117
3122 if optionalrepo:
3118 if optionalrepo:
3123 import commands
3119 import commands
3124 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3120 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3125
3121
3126 if inferrepo:
3122 if inferrepo:
3127 import commands
3123 import commands
3128 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3124 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3129
3125
3130 return func
3126 return func
3131 return decorator
3127 return decorator
3132
3128
3133 return cmd
3129 return cmd
3134
3130
3135 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3131 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3136 # commands.outgoing. "missing" is "missing" of the result of
3132 # commands.outgoing. "missing" is "missing" of the result of
3137 # "findcommonoutgoing()"
3133 # "findcommonoutgoing()"
3138 outgoinghooks = util.hooks()
3134 outgoinghooks = util.hooks()
3139
3135
3140 # a list of (ui, repo) functions called by commands.summary
3136 # a list of (ui, repo) functions called by commands.summary
3141 summaryhooks = util.hooks()
3137 summaryhooks = util.hooks()
3142
3138
3143 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3139 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3144 #
3140 #
3145 # functions should return tuple of booleans below, if 'changes' is None:
3141 # functions should return tuple of booleans below, if 'changes' is None:
3146 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3142 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3147 #
3143 #
3148 # otherwise, 'changes' is a tuple of tuples below:
3144 # otherwise, 'changes' is a tuple of tuples below:
3149 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3145 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3150 # - (desturl, destbranch, destpeer, outgoing)
3146 # - (desturl, destbranch, destpeer, outgoing)
3151 summaryremotehooks = util.hooks()
3147 summaryremotehooks = util.hooks()
3152
3148
3153 # A list of state files kept by multistep operations like graft.
3149 # A list of state files kept by multistep operations like graft.
3154 # Since graft cannot be aborted, it is considered 'clearable' by update.
3150 # Since graft cannot be aborted, it is considered 'clearable' by update.
3155 # note: bisect is intentionally excluded
3151 # note: bisect is intentionally excluded
3156 # (state file, clearable, allowcommit, error, hint)
3152 # (state file, clearable, allowcommit, error, hint)
3157 unfinishedstates = [
3153 unfinishedstates = [
3158 ('graftstate', True, False, _('graft in progress'),
3154 ('graftstate', True, False, _('graft in progress'),
3159 _("use 'hg graft --continue' or 'hg update' to abort")),
3155 _("use 'hg graft --continue' or 'hg update' to abort")),
3160 ('updatestate', True, False, _('last update was interrupted'),
3156 ('updatestate', True, False, _('last update was interrupted'),
3161 _("use 'hg update' to get a consistent checkout"))
3157 _("use 'hg update' to get a consistent checkout"))
3162 ]
3158 ]
3163
3159
3164 def checkunfinished(repo, commit=False):
3160 def checkunfinished(repo, commit=False):
3165 '''Look for an unfinished multistep operation, like graft, and abort
3161 '''Look for an unfinished multistep operation, like graft, and abort
3166 if found. It's probably good to check this right before
3162 if found. It's probably good to check this right before
3167 bailifchanged().
3163 bailifchanged().
3168 '''
3164 '''
3169 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3165 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3170 if commit and allowcommit:
3166 if commit and allowcommit:
3171 continue
3167 continue
3172 if repo.vfs.exists(f):
3168 if repo.vfs.exists(f):
3173 raise util.Abort(msg, hint=hint)
3169 raise util.Abort(msg, hint=hint)
3174
3170
3175 def clearunfinished(repo):
3171 def clearunfinished(repo):
3176 '''Check for unfinished operations (as above), and clear the ones
3172 '''Check for unfinished operations (as above), and clear the ones
3177 that are clearable.
3173 that are clearable.
3178 '''
3174 '''
3179 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3175 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3180 if not clearable and repo.vfs.exists(f):
3176 if not clearable and repo.vfs.exists(f):
3181 raise util.Abort(msg, hint=hint)
3177 raise util.Abort(msg, hint=hint)
3182 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3178 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3183 if clearable and repo.vfs.exists(f):
3179 if clearable and repo.vfs.exists(f):
3184 util.unlink(repo.join(f))
3180 util.unlink(repo.join(f))
@@ -1,2396 +1,2401 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 import cStringIO, email, os, errno, re, posixpath, copy
9 import cStringIO, email, os, errno, re, posixpath, copy
10 import tempfile, zlib, shutil
10 import tempfile, zlib, shutil
11 # On python2.4 you have to import these by name or they fail to
11 # On python2.4 you have to import these by name or they fail to
12 # load. This was not a problem on Python 2.7.
12 # load. This was not a problem on Python 2.7.
13 import email.Generator
13 import email.Generator
14 import email.Parser
14 import email.Parser
15
15
16 from i18n import _
16 from i18n import _
17 from node import hex, short
17 from node import hex, short
18 import cStringIO
18 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19 import base85, mdiff, scmutil, util, diffhelpers, copies, encoding, error
19
20
20 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 gitre = re.compile('diff --git a/(.*) b/(.*)')
21 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22 tabsplitter = re.compile(r'(\t+|[^\t]+)')
22
23
23 class PatchError(Exception):
24 class PatchError(Exception):
24 pass
25 pass
25
26
26
27
27 # public functions
28 # public functions
28
29
29 def split(stream):
30 def split(stream):
30 '''return an iterator of individual patches from a stream'''
31 '''return an iterator of individual patches from a stream'''
31 def isheader(line, inheader):
32 def isheader(line, inheader):
32 if inheader and line[0] in (' ', '\t'):
33 if inheader and line[0] in (' ', '\t'):
33 # continuation
34 # continuation
34 return True
35 return True
35 if line[0] in (' ', '-', '+'):
36 if line[0] in (' ', '-', '+'):
36 # diff line - don't check for header pattern in there
37 # diff line - don't check for header pattern in there
37 return False
38 return False
38 l = line.split(': ', 1)
39 l = line.split(': ', 1)
39 return len(l) == 2 and ' ' not in l[0]
40 return len(l) == 2 and ' ' not in l[0]
40
41
41 def chunk(lines):
42 def chunk(lines):
42 return cStringIO.StringIO(''.join(lines))
43 return cStringIO.StringIO(''.join(lines))
43
44
44 def hgsplit(stream, cur):
45 def hgsplit(stream, cur):
45 inheader = True
46 inheader = True
46
47
47 for line in stream:
48 for line in stream:
48 if not line.strip():
49 if not line.strip():
49 inheader = False
50 inheader = False
50 if not inheader and line.startswith('# HG changeset patch'):
51 if not inheader and line.startswith('# HG changeset patch'):
51 yield chunk(cur)
52 yield chunk(cur)
52 cur = []
53 cur = []
53 inheader = True
54 inheader = True
54
55
55 cur.append(line)
56 cur.append(line)
56
57
57 if cur:
58 if cur:
58 yield chunk(cur)
59 yield chunk(cur)
59
60
60 def mboxsplit(stream, cur):
61 def mboxsplit(stream, cur):
61 for line in stream:
62 for line in stream:
62 if line.startswith('From '):
63 if line.startswith('From '):
63 for c in split(chunk(cur[1:])):
64 for c in split(chunk(cur[1:])):
64 yield c
65 yield c
65 cur = []
66 cur = []
66
67
67 cur.append(line)
68 cur.append(line)
68
69
69 if cur:
70 if cur:
70 for c in split(chunk(cur[1:])):
71 for c in split(chunk(cur[1:])):
71 yield c
72 yield c
72
73
73 def mimesplit(stream, cur):
74 def mimesplit(stream, cur):
74 def msgfp(m):
75 def msgfp(m):
75 fp = cStringIO.StringIO()
76 fp = cStringIO.StringIO()
76 g = email.Generator.Generator(fp, mangle_from_=False)
77 g = email.Generator.Generator(fp, mangle_from_=False)
77 g.flatten(m)
78 g.flatten(m)
78 fp.seek(0)
79 fp.seek(0)
79 return fp
80 return fp
80
81
81 for line in stream:
82 for line in stream:
82 cur.append(line)
83 cur.append(line)
83 c = chunk(cur)
84 c = chunk(cur)
84
85
85 m = email.Parser.Parser().parse(c)
86 m = email.Parser.Parser().parse(c)
86 if not m.is_multipart():
87 if not m.is_multipart():
87 yield msgfp(m)
88 yield msgfp(m)
88 else:
89 else:
89 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
90 for part in m.walk():
91 for part in m.walk():
91 ct = part.get_content_type()
92 ct = part.get_content_type()
92 if ct not in ok_types:
93 if ct not in ok_types:
93 continue
94 continue
94 yield msgfp(part)
95 yield msgfp(part)
95
96
96 def headersplit(stream, cur):
97 def headersplit(stream, cur):
97 inheader = False
98 inheader = False
98
99
99 for line in stream:
100 for line in stream:
100 if not inheader and isheader(line, inheader):
101 if not inheader and isheader(line, inheader):
101 yield chunk(cur)
102 yield chunk(cur)
102 cur = []
103 cur = []
103 inheader = True
104 inheader = True
104 if inheader and not isheader(line, inheader):
105 if inheader and not isheader(line, inheader):
105 inheader = False
106 inheader = False
106
107
107 cur.append(line)
108 cur.append(line)
108
109
109 if cur:
110 if cur:
110 yield chunk(cur)
111 yield chunk(cur)
111
112
112 def remainder(cur):
113 def remainder(cur):
113 yield chunk(cur)
114 yield chunk(cur)
114
115
115 class fiter(object):
116 class fiter(object):
116 def __init__(self, fp):
117 def __init__(self, fp):
117 self.fp = fp
118 self.fp = fp
118
119
119 def __iter__(self):
120 def __iter__(self):
120 return self
121 return self
121
122
122 def next(self):
123 def next(self):
123 l = self.fp.readline()
124 l = self.fp.readline()
124 if not l:
125 if not l:
125 raise StopIteration
126 raise StopIteration
126 return l
127 return l
127
128
128 inheader = False
129 inheader = False
129 cur = []
130 cur = []
130
131
131 mimeheaders = ['content-type']
132 mimeheaders = ['content-type']
132
133
133 if not util.safehasattr(stream, 'next'):
134 if not util.safehasattr(stream, 'next'):
134 # http responses, for example, have readline but not next
135 # http responses, for example, have readline but not next
135 stream = fiter(stream)
136 stream = fiter(stream)
136
137
137 for line in stream:
138 for line in stream:
138 cur.append(line)
139 cur.append(line)
139 if line.startswith('# HG changeset patch'):
140 if line.startswith('# HG changeset patch'):
140 return hgsplit(stream, cur)
141 return hgsplit(stream, cur)
141 elif line.startswith('From '):
142 elif line.startswith('From '):
142 return mboxsplit(stream, cur)
143 return mboxsplit(stream, cur)
143 elif isheader(line, inheader):
144 elif isheader(line, inheader):
144 inheader = True
145 inheader = True
145 if line.split(':', 1)[0].lower() in mimeheaders:
146 if line.split(':', 1)[0].lower() in mimeheaders:
146 # let email parser handle this
147 # let email parser handle this
147 return mimesplit(stream, cur)
148 return mimesplit(stream, cur)
148 elif line.startswith('--- ') and inheader:
149 elif line.startswith('--- ') and inheader:
149 # No evil headers seen by diff start, split by hand
150 # No evil headers seen by diff start, split by hand
150 return headersplit(stream, cur)
151 return headersplit(stream, cur)
151 # Not enough info, keep reading
152 # Not enough info, keep reading
152
153
153 # if we are here, we have a very plain patch
154 # if we are here, we have a very plain patch
154 return remainder(cur)
155 return remainder(cur)
155
156
156 def extract(ui, fileobj):
157 def extract(ui, fileobj):
157 '''extract patch from data read from fileobj.
158 '''extract patch from data read from fileobj.
158
159
159 patch can be a normal patch or contained in an email message.
160 patch can be a normal patch or contained in an email message.
160
161
161 return tuple (filename, message, user, date, branch, node, p1, p2).
162 return tuple (filename, message, user, date, branch, node, p1, p2).
162 Any item in the returned tuple can be None. If filename is None,
163 Any item in the returned tuple can be None. If filename is None,
163 fileobj did not contain a patch. Caller must unlink filename when done.'''
164 fileobj did not contain a patch. Caller must unlink filename when done.'''
164
165
165 # attempt to detect the start of a patch
166 # attempt to detect the start of a patch
166 # (this heuristic is borrowed from quilt)
167 # (this heuristic is borrowed from quilt)
167 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 diffre = re.compile(r'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
168 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'retrieving revision [0-9]+(\.[0-9]+)*$|'
169 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'---[ \t].*?^\+\+\+[ \t]|'
170 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171 r'\*\*\*[ \t].*?^---[ \t])', re.MULTILINE|re.DOTALL)
171
172
172 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
173 tmpfp = os.fdopen(fd, 'w')
174 tmpfp = os.fdopen(fd, 'w')
174 try:
175 try:
175 msg = email.Parser.Parser().parse(fileobj)
176 msg = email.Parser.Parser().parse(fileobj)
176
177
177 subject = msg['Subject']
178 subject = msg['Subject']
178 user = msg['From']
179 user = msg['From']
179 if not subject and not user:
180 if not subject and not user:
180 # Not an email, restore parsed headers if any
181 # Not an email, restore parsed headers if any
181 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
182
183
183 # should try to parse msg['Date']
184 # should try to parse msg['Date']
184 date = None
185 date = None
185 nodeid = None
186 nodeid = None
186 branch = None
187 branch = None
187 parents = []
188 parents = []
188
189
189 if subject:
190 if subject:
190 if subject.startswith('[PATCH'):
191 if subject.startswith('[PATCH'):
191 pend = subject.find(']')
192 pend = subject.find(']')
192 if pend >= 0:
193 if pend >= 0:
193 subject = subject[pend + 1:].lstrip()
194 subject = subject[pend + 1:].lstrip()
194 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 subject = re.sub(r'\n[ \t]+', ' ', subject)
195 ui.debug('Subject: %s\n' % subject)
196 ui.debug('Subject: %s\n' % subject)
196 if user:
197 if user:
197 ui.debug('From: %s\n' % user)
198 ui.debug('From: %s\n' % user)
198 diffs_seen = 0
199 diffs_seen = 0
199 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
200 message = ''
201 message = ''
201 for part in msg.walk():
202 for part in msg.walk():
202 content_type = part.get_content_type()
203 content_type = part.get_content_type()
203 ui.debug('Content-Type: %s\n' % content_type)
204 ui.debug('Content-Type: %s\n' % content_type)
204 if content_type not in ok_types:
205 if content_type not in ok_types:
205 continue
206 continue
206 payload = part.get_payload(decode=True)
207 payload = part.get_payload(decode=True)
207 m = diffre.search(payload)
208 m = diffre.search(payload)
208 if m:
209 if m:
209 hgpatch = False
210 hgpatch = False
210 hgpatchheader = False
211 hgpatchheader = False
211 ignoretext = False
212 ignoretext = False
212
213
213 ui.debug('found patch at byte %d\n' % m.start(0))
214 ui.debug('found patch at byte %d\n' % m.start(0))
214 diffs_seen += 1
215 diffs_seen += 1
215 cfp = cStringIO.StringIO()
216 cfp = cStringIO.StringIO()
216 for line in payload[:m.start(0)].splitlines():
217 for line in payload[:m.start(0)].splitlines():
217 if line.startswith('# HG changeset patch') and not hgpatch:
218 if line.startswith('# HG changeset patch') and not hgpatch:
218 ui.debug('patch generated by hg export\n')
219 ui.debug('patch generated by hg export\n')
219 hgpatch = True
220 hgpatch = True
220 hgpatchheader = True
221 hgpatchheader = True
221 # drop earlier commit message content
222 # drop earlier commit message content
222 cfp.seek(0)
223 cfp.seek(0)
223 cfp.truncate()
224 cfp.truncate()
224 subject = None
225 subject = None
225 elif hgpatchheader:
226 elif hgpatchheader:
226 if line.startswith('# User '):
227 if line.startswith('# User '):
227 user = line[7:]
228 user = line[7:]
228 ui.debug('From: %s\n' % user)
229 ui.debug('From: %s\n' % user)
229 elif line.startswith("# Date "):
230 elif line.startswith("# Date "):
230 date = line[7:]
231 date = line[7:]
231 elif line.startswith("# Branch "):
232 elif line.startswith("# Branch "):
232 branch = line[9:]
233 branch = line[9:]
233 elif line.startswith("# Node ID "):
234 elif line.startswith("# Node ID "):
234 nodeid = line[10:]
235 nodeid = line[10:]
235 elif line.startswith("# Parent "):
236 elif line.startswith("# Parent "):
236 parents.append(line[9:].lstrip())
237 parents.append(line[9:].lstrip())
237 elif not line.startswith("# "):
238 elif not line.startswith("# "):
238 hgpatchheader = False
239 hgpatchheader = False
239 elif line == '---':
240 elif line == '---':
240 ignoretext = True
241 ignoretext = True
241 if not hgpatchheader and not ignoretext:
242 if not hgpatchheader and not ignoretext:
242 cfp.write(line)
243 cfp.write(line)
243 cfp.write('\n')
244 cfp.write('\n')
244 message = cfp.getvalue()
245 message = cfp.getvalue()
245 if tmpfp:
246 if tmpfp:
246 tmpfp.write(payload)
247 tmpfp.write(payload)
247 if not payload.endswith('\n'):
248 if not payload.endswith('\n'):
248 tmpfp.write('\n')
249 tmpfp.write('\n')
249 elif not diffs_seen and message and content_type == 'text/plain':
250 elif not diffs_seen and message and content_type == 'text/plain':
250 message += '\n' + payload
251 message += '\n' + payload
251 except: # re-raises
252 except: # re-raises
252 tmpfp.close()
253 tmpfp.close()
253 os.unlink(tmpname)
254 os.unlink(tmpname)
254 raise
255 raise
255
256
256 if subject and not message.startswith(subject):
257 if subject and not message.startswith(subject):
257 message = '%s\n%s' % (subject, message)
258 message = '%s\n%s' % (subject, message)
258 tmpfp.close()
259 tmpfp.close()
259 if not diffs_seen:
260 if not diffs_seen:
260 os.unlink(tmpname)
261 os.unlink(tmpname)
261 return None, message, user, date, branch, None, None, None
262 return None, message, user, date, branch, None, None, None
262
263
263 if parents:
264 if parents:
264 p1 = parents.pop(0)
265 p1 = parents.pop(0)
265 else:
266 else:
266 p1 = None
267 p1 = None
267
268
268 if parents:
269 if parents:
269 p2 = parents.pop(0)
270 p2 = parents.pop(0)
270 else:
271 else:
271 p2 = None
272 p2 = None
272
273
273 return tmpname, message, user, date, branch, nodeid, p1, p2
274 return tmpname, message, user, date, branch, nodeid, p1, p2
274
275
275 class patchmeta(object):
276 class patchmeta(object):
276 """Patched file metadata
277 """Patched file metadata
277
278
278 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
279 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
279 or COPY. 'path' is patched file path. 'oldpath' is set to the
280 or COPY. 'path' is patched file path. 'oldpath' is set to the
280 origin file when 'op' is either COPY or RENAME, None otherwise. If
281 origin file when 'op' is either COPY or RENAME, None otherwise. If
281 file mode is changed, 'mode' is a tuple (islink, isexec) where
282 file mode is changed, 'mode' is a tuple (islink, isexec) where
282 'islink' is True if the file is a symlink and 'isexec' is True if
283 'islink' is True if the file is a symlink and 'isexec' is True if
283 the file is executable. Otherwise, 'mode' is None.
284 the file is executable. Otherwise, 'mode' is None.
284 """
285 """
285 def __init__(self, path):
286 def __init__(self, path):
286 self.path = path
287 self.path = path
287 self.oldpath = None
288 self.oldpath = None
288 self.mode = None
289 self.mode = None
289 self.op = 'MODIFY'
290 self.op = 'MODIFY'
290 self.binary = False
291 self.binary = False
291
292
292 def setmode(self, mode):
293 def setmode(self, mode):
293 islink = mode & 020000
294 islink = mode & 020000
294 isexec = mode & 0100
295 isexec = mode & 0100
295 self.mode = (islink, isexec)
296 self.mode = (islink, isexec)
296
297
297 def copy(self):
298 def copy(self):
298 other = patchmeta(self.path)
299 other = patchmeta(self.path)
299 other.oldpath = self.oldpath
300 other.oldpath = self.oldpath
300 other.mode = self.mode
301 other.mode = self.mode
301 other.op = self.op
302 other.op = self.op
302 other.binary = self.binary
303 other.binary = self.binary
303 return other
304 return other
304
305
305 def _ispatchinga(self, afile):
306 def _ispatchinga(self, afile):
306 if afile == '/dev/null':
307 if afile == '/dev/null':
307 return self.op == 'ADD'
308 return self.op == 'ADD'
308 return afile == 'a/' + (self.oldpath or self.path)
309 return afile == 'a/' + (self.oldpath or self.path)
309
310
310 def _ispatchingb(self, bfile):
311 def _ispatchingb(self, bfile):
311 if bfile == '/dev/null':
312 if bfile == '/dev/null':
312 return self.op == 'DELETE'
313 return self.op == 'DELETE'
313 return bfile == 'b/' + self.path
314 return bfile == 'b/' + self.path
314
315
315 def ispatching(self, afile, bfile):
316 def ispatching(self, afile, bfile):
316 return self._ispatchinga(afile) and self._ispatchingb(bfile)
317 return self._ispatchinga(afile) and self._ispatchingb(bfile)
317
318
318 def __repr__(self):
319 def __repr__(self):
319 return "<patchmeta %s %r>" % (self.op, self.path)
320 return "<patchmeta %s %r>" % (self.op, self.path)
320
321
321 def readgitpatch(lr):
322 def readgitpatch(lr):
322 """extract git-style metadata about patches from <patchname>"""
323 """extract git-style metadata about patches from <patchname>"""
323
324
324 # Filter patch for git information
325 # Filter patch for git information
325 gp = None
326 gp = None
326 gitpatches = []
327 gitpatches = []
327 for line in lr:
328 for line in lr:
328 line = line.rstrip(' \r\n')
329 line = line.rstrip(' \r\n')
329 if line.startswith('diff --git a/'):
330 if line.startswith('diff --git a/'):
330 m = gitre.match(line)
331 m = gitre.match(line)
331 if m:
332 if m:
332 if gp:
333 if gp:
333 gitpatches.append(gp)
334 gitpatches.append(gp)
334 dst = m.group(2)
335 dst = m.group(2)
335 gp = patchmeta(dst)
336 gp = patchmeta(dst)
336 elif gp:
337 elif gp:
337 if line.startswith('--- '):
338 if line.startswith('--- '):
338 gitpatches.append(gp)
339 gitpatches.append(gp)
339 gp = None
340 gp = None
340 continue
341 continue
341 if line.startswith('rename from '):
342 if line.startswith('rename from '):
342 gp.op = 'RENAME'
343 gp.op = 'RENAME'
343 gp.oldpath = line[12:]
344 gp.oldpath = line[12:]
344 elif line.startswith('rename to '):
345 elif line.startswith('rename to '):
345 gp.path = line[10:]
346 gp.path = line[10:]
346 elif line.startswith('copy from '):
347 elif line.startswith('copy from '):
347 gp.op = 'COPY'
348 gp.op = 'COPY'
348 gp.oldpath = line[10:]
349 gp.oldpath = line[10:]
349 elif line.startswith('copy to '):
350 elif line.startswith('copy to '):
350 gp.path = line[8:]
351 gp.path = line[8:]
351 elif line.startswith('deleted file'):
352 elif line.startswith('deleted file'):
352 gp.op = 'DELETE'
353 gp.op = 'DELETE'
353 elif line.startswith('new file mode '):
354 elif line.startswith('new file mode '):
354 gp.op = 'ADD'
355 gp.op = 'ADD'
355 gp.setmode(int(line[-6:], 8))
356 gp.setmode(int(line[-6:], 8))
356 elif line.startswith('new mode '):
357 elif line.startswith('new mode '):
357 gp.setmode(int(line[-6:], 8))
358 gp.setmode(int(line[-6:], 8))
358 elif line.startswith('GIT binary patch'):
359 elif line.startswith('GIT binary patch'):
359 gp.binary = True
360 gp.binary = True
360 if gp:
361 if gp:
361 gitpatches.append(gp)
362 gitpatches.append(gp)
362
363
363 return gitpatches
364 return gitpatches
364
365
365 class linereader(object):
366 class linereader(object):
366 # simple class to allow pushing lines back into the input stream
367 # simple class to allow pushing lines back into the input stream
367 def __init__(self, fp):
368 def __init__(self, fp):
368 self.fp = fp
369 self.fp = fp
369 self.buf = []
370 self.buf = []
370
371
371 def push(self, line):
372 def push(self, line):
372 if line is not None:
373 if line is not None:
373 self.buf.append(line)
374 self.buf.append(line)
374
375
375 def readline(self):
376 def readline(self):
376 if self.buf:
377 if self.buf:
377 l = self.buf[0]
378 l = self.buf[0]
378 del self.buf[0]
379 del self.buf[0]
379 return l
380 return l
380 return self.fp.readline()
381 return self.fp.readline()
381
382
382 def __iter__(self):
383 def __iter__(self):
383 while True:
384 while True:
384 l = self.readline()
385 l = self.readline()
385 if not l:
386 if not l:
386 break
387 break
387 yield l
388 yield l
388
389
389 class abstractbackend(object):
390 class abstractbackend(object):
390 def __init__(self, ui):
391 def __init__(self, ui):
391 self.ui = ui
392 self.ui = ui
392
393
393 def getfile(self, fname):
394 def getfile(self, fname):
394 """Return target file data and flags as a (data, (islink,
395 """Return target file data and flags as a (data, (islink,
395 isexec)) tuple. Data is None if file is missing/deleted.
396 isexec)) tuple. Data is None if file is missing/deleted.
396 """
397 """
397 raise NotImplementedError
398 raise NotImplementedError
398
399
399 def setfile(self, fname, data, mode, copysource):
400 def setfile(self, fname, data, mode, copysource):
400 """Write data to target file fname and set its mode. mode is a
401 """Write data to target file fname and set its mode. mode is a
401 (islink, isexec) tuple. If data is None, the file content should
402 (islink, isexec) tuple. If data is None, the file content should
402 be left unchanged. If the file is modified after being copied,
403 be left unchanged. If the file is modified after being copied,
403 copysource is set to the original file name.
404 copysource is set to the original file name.
404 """
405 """
405 raise NotImplementedError
406 raise NotImplementedError
406
407
407 def unlink(self, fname):
408 def unlink(self, fname):
408 """Unlink target file."""
409 """Unlink target file."""
409 raise NotImplementedError
410 raise NotImplementedError
410
411
411 def writerej(self, fname, failed, total, lines):
412 def writerej(self, fname, failed, total, lines):
412 """Write rejected lines for fname. total is the number of hunks
413 """Write rejected lines for fname. total is the number of hunks
413 which failed to apply and total the total number of hunks for this
414 which failed to apply and total the total number of hunks for this
414 files.
415 files.
415 """
416 """
416 pass
417 pass
417
418
418 def exists(self, fname):
419 def exists(self, fname):
419 raise NotImplementedError
420 raise NotImplementedError
420
421
421 class fsbackend(abstractbackend):
422 class fsbackend(abstractbackend):
422 def __init__(self, ui, basedir):
423 def __init__(self, ui, basedir):
423 super(fsbackend, self).__init__(ui)
424 super(fsbackend, self).__init__(ui)
424 self.opener = scmutil.opener(basedir)
425 self.opener = scmutil.opener(basedir)
425
426
426 def _join(self, f):
427 def _join(self, f):
427 return os.path.join(self.opener.base, f)
428 return os.path.join(self.opener.base, f)
428
429
429 def getfile(self, fname):
430 def getfile(self, fname):
430 if self.opener.islink(fname):
431 if self.opener.islink(fname):
431 return (self.opener.readlink(fname), (True, False))
432 return (self.opener.readlink(fname), (True, False))
432
433
433 isexec = False
434 isexec = False
434 try:
435 try:
435 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
436 isexec = self.opener.lstat(fname).st_mode & 0100 != 0
436 except OSError, e:
437 except OSError, e:
437 if e.errno != errno.ENOENT:
438 if e.errno != errno.ENOENT:
438 raise
439 raise
439 try:
440 try:
440 return (self.opener.read(fname), (False, isexec))
441 return (self.opener.read(fname), (False, isexec))
441 except IOError, e:
442 except IOError, e:
442 if e.errno != errno.ENOENT:
443 if e.errno != errno.ENOENT:
443 raise
444 raise
444 return None, None
445 return None, None
445
446
446 def setfile(self, fname, data, mode, copysource):
447 def setfile(self, fname, data, mode, copysource):
447 islink, isexec = mode
448 islink, isexec = mode
448 if data is None:
449 if data is None:
449 self.opener.setflags(fname, islink, isexec)
450 self.opener.setflags(fname, islink, isexec)
450 return
451 return
451 if islink:
452 if islink:
452 self.opener.symlink(data, fname)
453 self.opener.symlink(data, fname)
453 else:
454 else:
454 self.opener.write(fname, data)
455 self.opener.write(fname, data)
455 if isexec:
456 if isexec:
456 self.opener.setflags(fname, False, True)
457 self.opener.setflags(fname, False, True)
457
458
458 def unlink(self, fname):
459 def unlink(self, fname):
459 self.opener.unlinkpath(fname, ignoremissing=True)
460 self.opener.unlinkpath(fname, ignoremissing=True)
460
461
461 def writerej(self, fname, failed, total, lines):
462 def writerej(self, fname, failed, total, lines):
462 fname = fname + ".rej"
463 fname = fname + ".rej"
463 self.ui.warn(
464 self.ui.warn(
464 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
465 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
465 (failed, total, fname))
466 (failed, total, fname))
466 fp = self.opener(fname, 'w')
467 fp = self.opener(fname, 'w')
467 fp.writelines(lines)
468 fp.writelines(lines)
468 fp.close()
469 fp.close()
469
470
470 def exists(self, fname):
471 def exists(self, fname):
471 return self.opener.lexists(fname)
472 return self.opener.lexists(fname)
472
473
473 class workingbackend(fsbackend):
474 class workingbackend(fsbackend):
474 def __init__(self, ui, repo, similarity):
475 def __init__(self, ui, repo, similarity):
475 super(workingbackend, self).__init__(ui, repo.root)
476 super(workingbackend, self).__init__(ui, repo.root)
476 self.repo = repo
477 self.repo = repo
477 self.similarity = similarity
478 self.similarity = similarity
478 self.removed = set()
479 self.removed = set()
479 self.changed = set()
480 self.changed = set()
480 self.copied = []
481 self.copied = []
481
482
482 def _checkknown(self, fname):
483 def _checkknown(self, fname):
483 if self.repo.dirstate[fname] == '?' and self.exists(fname):
484 if self.repo.dirstate[fname] == '?' and self.exists(fname):
484 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
485 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
485
486
486 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
487 self._checkknown(fname)
488 self._checkknown(fname)
488 super(workingbackend, self).setfile(fname, data, mode, copysource)
489 super(workingbackend, self).setfile(fname, data, mode, copysource)
489 if copysource is not None:
490 if copysource is not None:
490 self.copied.append((copysource, fname))
491 self.copied.append((copysource, fname))
491 self.changed.add(fname)
492 self.changed.add(fname)
492
493
493 def unlink(self, fname):
494 def unlink(self, fname):
494 self._checkknown(fname)
495 self._checkknown(fname)
495 super(workingbackend, self).unlink(fname)
496 super(workingbackend, self).unlink(fname)
496 self.removed.add(fname)
497 self.removed.add(fname)
497 self.changed.add(fname)
498 self.changed.add(fname)
498
499
499 def close(self):
500 def close(self):
500 wctx = self.repo[None]
501 wctx = self.repo[None]
501 changed = set(self.changed)
502 changed = set(self.changed)
502 for src, dst in self.copied:
503 for src, dst in self.copied:
503 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
504 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
504 if self.removed:
505 if self.removed:
505 wctx.forget(sorted(self.removed))
506 wctx.forget(sorted(self.removed))
506 for f in self.removed:
507 for f in self.removed:
507 if f not in self.repo.dirstate:
508 if f not in self.repo.dirstate:
508 # File was deleted and no longer belongs to the
509 # File was deleted and no longer belongs to the
509 # dirstate, it was probably marked added then
510 # dirstate, it was probably marked added then
510 # deleted, and should not be considered by
511 # deleted, and should not be considered by
511 # marktouched().
512 # marktouched().
512 changed.discard(f)
513 changed.discard(f)
513 if changed:
514 if changed:
514 scmutil.marktouched(self.repo, changed, self.similarity)
515 scmutil.marktouched(self.repo, changed, self.similarity)
515 return sorted(self.changed)
516 return sorted(self.changed)
516
517
517 class filestore(object):
518 class filestore(object):
518 def __init__(self, maxsize=None):
519 def __init__(self, maxsize=None):
519 self.opener = None
520 self.opener = None
520 self.files = {}
521 self.files = {}
521 self.created = 0
522 self.created = 0
522 self.maxsize = maxsize
523 self.maxsize = maxsize
523 if self.maxsize is None:
524 if self.maxsize is None:
524 self.maxsize = 4*(2**20)
525 self.maxsize = 4*(2**20)
525 self.size = 0
526 self.size = 0
526 self.data = {}
527 self.data = {}
527
528
528 def setfile(self, fname, data, mode, copied=None):
529 def setfile(self, fname, data, mode, copied=None):
529 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
530 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
530 self.data[fname] = (data, mode, copied)
531 self.data[fname] = (data, mode, copied)
531 self.size += len(data)
532 self.size += len(data)
532 else:
533 else:
533 if self.opener is None:
534 if self.opener is None:
534 root = tempfile.mkdtemp(prefix='hg-patch-')
535 root = tempfile.mkdtemp(prefix='hg-patch-')
535 self.opener = scmutil.opener(root)
536 self.opener = scmutil.opener(root)
536 # Avoid filename issues with these simple names
537 # Avoid filename issues with these simple names
537 fn = str(self.created)
538 fn = str(self.created)
538 self.opener.write(fn, data)
539 self.opener.write(fn, data)
539 self.created += 1
540 self.created += 1
540 self.files[fname] = (fn, mode, copied)
541 self.files[fname] = (fn, mode, copied)
541
542
542 def getfile(self, fname):
543 def getfile(self, fname):
543 if fname in self.data:
544 if fname in self.data:
544 return self.data[fname]
545 return self.data[fname]
545 if not self.opener or fname not in self.files:
546 if not self.opener or fname not in self.files:
546 return None, None, None
547 return None, None, None
547 fn, mode, copied = self.files[fname]
548 fn, mode, copied = self.files[fname]
548 return self.opener.read(fn), mode, copied
549 return self.opener.read(fn), mode, copied
549
550
550 def close(self):
551 def close(self):
551 if self.opener:
552 if self.opener:
552 shutil.rmtree(self.opener.base)
553 shutil.rmtree(self.opener.base)
553
554
554 class repobackend(abstractbackend):
555 class repobackend(abstractbackend):
555 def __init__(self, ui, repo, ctx, store):
556 def __init__(self, ui, repo, ctx, store):
556 super(repobackend, self).__init__(ui)
557 super(repobackend, self).__init__(ui)
557 self.repo = repo
558 self.repo = repo
558 self.ctx = ctx
559 self.ctx = ctx
559 self.store = store
560 self.store = store
560 self.changed = set()
561 self.changed = set()
561 self.removed = set()
562 self.removed = set()
562 self.copied = {}
563 self.copied = {}
563
564
564 def _checkknown(self, fname):
565 def _checkknown(self, fname):
565 if fname not in self.ctx:
566 if fname not in self.ctx:
566 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
567 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
567
568
568 def getfile(self, fname):
569 def getfile(self, fname):
569 try:
570 try:
570 fctx = self.ctx[fname]
571 fctx = self.ctx[fname]
571 except error.LookupError:
572 except error.LookupError:
572 return None, None
573 return None, None
573 flags = fctx.flags()
574 flags = fctx.flags()
574 return fctx.data(), ('l' in flags, 'x' in flags)
575 return fctx.data(), ('l' in flags, 'x' in flags)
575
576
576 def setfile(self, fname, data, mode, copysource):
577 def setfile(self, fname, data, mode, copysource):
577 if copysource:
578 if copysource:
578 self._checkknown(copysource)
579 self._checkknown(copysource)
579 if data is None:
580 if data is None:
580 data = self.ctx[fname].data()
581 data = self.ctx[fname].data()
581 self.store.setfile(fname, data, mode, copysource)
582 self.store.setfile(fname, data, mode, copysource)
582 self.changed.add(fname)
583 self.changed.add(fname)
583 if copysource:
584 if copysource:
584 self.copied[fname] = copysource
585 self.copied[fname] = copysource
585
586
586 def unlink(self, fname):
587 def unlink(self, fname):
587 self._checkknown(fname)
588 self._checkknown(fname)
588 self.removed.add(fname)
589 self.removed.add(fname)
589
590
590 def exists(self, fname):
591 def exists(self, fname):
591 return fname in self.ctx
592 return fname in self.ctx
592
593
593 def close(self):
594 def close(self):
594 return self.changed | self.removed
595 return self.changed | self.removed
595
596
596 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
597 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
597 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
598 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
598 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
599 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
599 eolmodes = ['strict', 'crlf', 'lf', 'auto']
600 eolmodes = ['strict', 'crlf', 'lf', 'auto']
600
601
601 class patchfile(object):
602 class patchfile(object):
602 def __init__(self, ui, gp, backend, store, eolmode='strict'):
603 def __init__(self, ui, gp, backend, store, eolmode='strict'):
603 self.fname = gp.path
604 self.fname = gp.path
604 self.eolmode = eolmode
605 self.eolmode = eolmode
605 self.eol = None
606 self.eol = None
606 self.backend = backend
607 self.backend = backend
607 self.ui = ui
608 self.ui = ui
608 self.lines = []
609 self.lines = []
609 self.exists = False
610 self.exists = False
610 self.missing = True
611 self.missing = True
611 self.mode = gp.mode
612 self.mode = gp.mode
612 self.copysource = gp.oldpath
613 self.copysource = gp.oldpath
613 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
614 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
614 self.remove = gp.op == 'DELETE'
615 self.remove = gp.op == 'DELETE'
615 if self.copysource is None:
616 if self.copysource is None:
616 data, mode = backend.getfile(self.fname)
617 data, mode = backend.getfile(self.fname)
617 else:
618 else:
618 data, mode = store.getfile(self.copysource)[:2]
619 data, mode = store.getfile(self.copysource)[:2]
619 if data is not None:
620 if data is not None:
620 self.exists = self.copysource is None or backend.exists(self.fname)
621 self.exists = self.copysource is None or backend.exists(self.fname)
621 self.missing = False
622 self.missing = False
622 if data:
623 if data:
623 self.lines = mdiff.splitnewlines(data)
624 self.lines = mdiff.splitnewlines(data)
624 if self.mode is None:
625 if self.mode is None:
625 self.mode = mode
626 self.mode = mode
626 if self.lines:
627 if self.lines:
627 # Normalize line endings
628 # Normalize line endings
628 if self.lines[0].endswith('\r\n'):
629 if self.lines[0].endswith('\r\n'):
629 self.eol = '\r\n'
630 self.eol = '\r\n'
630 elif self.lines[0].endswith('\n'):
631 elif self.lines[0].endswith('\n'):
631 self.eol = '\n'
632 self.eol = '\n'
632 if eolmode != 'strict':
633 if eolmode != 'strict':
633 nlines = []
634 nlines = []
634 for l in self.lines:
635 for l in self.lines:
635 if l.endswith('\r\n'):
636 if l.endswith('\r\n'):
636 l = l[:-2] + '\n'
637 l = l[:-2] + '\n'
637 nlines.append(l)
638 nlines.append(l)
638 self.lines = nlines
639 self.lines = nlines
639 else:
640 else:
640 if self.create:
641 if self.create:
641 self.missing = False
642 self.missing = False
642 if self.mode is None:
643 if self.mode is None:
643 self.mode = (False, False)
644 self.mode = (False, False)
644 if self.missing:
645 if self.missing:
645 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
646 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
646
647
647 self.hash = {}
648 self.hash = {}
648 self.dirty = 0
649 self.dirty = 0
649 self.offset = 0
650 self.offset = 0
650 self.skew = 0
651 self.skew = 0
651 self.rej = []
652 self.rej = []
652 self.fileprinted = False
653 self.fileprinted = False
653 self.printfile(False)
654 self.printfile(False)
654 self.hunks = 0
655 self.hunks = 0
655
656
656 def writelines(self, fname, lines, mode):
657 def writelines(self, fname, lines, mode):
657 if self.eolmode == 'auto':
658 if self.eolmode == 'auto':
658 eol = self.eol
659 eol = self.eol
659 elif self.eolmode == 'crlf':
660 elif self.eolmode == 'crlf':
660 eol = '\r\n'
661 eol = '\r\n'
661 else:
662 else:
662 eol = '\n'
663 eol = '\n'
663
664
664 if self.eolmode != 'strict' and eol and eol != '\n':
665 if self.eolmode != 'strict' and eol and eol != '\n':
665 rawlines = []
666 rawlines = []
666 for l in lines:
667 for l in lines:
667 if l and l[-1] == '\n':
668 if l and l[-1] == '\n':
668 l = l[:-1] + eol
669 l = l[:-1] + eol
669 rawlines.append(l)
670 rawlines.append(l)
670 lines = rawlines
671 lines = rawlines
671
672
672 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
673 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
673
674
674 def printfile(self, warn):
675 def printfile(self, warn):
675 if self.fileprinted:
676 if self.fileprinted:
676 return
677 return
677 if warn or self.ui.verbose:
678 if warn or self.ui.verbose:
678 self.fileprinted = True
679 self.fileprinted = True
679 s = _("patching file %s\n") % self.fname
680 s = _("patching file %s\n") % self.fname
680 if warn:
681 if warn:
681 self.ui.warn(s)
682 self.ui.warn(s)
682 else:
683 else:
683 self.ui.note(s)
684 self.ui.note(s)
684
685
685
686
686 def findlines(self, l, linenum):
687 def findlines(self, l, linenum):
687 # looks through the hash and finds candidate lines. The
688 # looks through the hash and finds candidate lines. The
688 # result is a list of line numbers sorted based on distance
689 # result is a list of line numbers sorted based on distance
689 # from linenum
690 # from linenum
690
691
691 cand = self.hash.get(l, [])
692 cand = self.hash.get(l, [])
692 if len(cand) > 1:
693 if len(cand) > 1:
693 # resort our list of potentials forward then back.
694 # resort our list of potentials forward then back.
694 cand.sort(key=lambda x: abs(x - linenum))
695 cand.sort(key=lambda x: abs(x - linenum))
695 return cand
696 return cand
696
697
697 def write_rej(self):
698 def write_rej(self):
698 # our rejects are a little different from patch(1). This always
699 # our rejects are a little different from patch(1). This always
699 # creates rejects in the same form as the original patch. A file
700 # creates rejects in the same form as the original patch. A file
700 # header is inserted so that you can run the reject through patch again
701 # header is inserted so that you can run the reject through patch again
701 # without having to type the filename.
702 # without having to type the filename.
702 if not self.rej:
703 if not self.rej:
703 return
704 return
704 base = os.path.basename(self.fname)
705 base = os.path.basename(self.fname)
705 lines = ["--- %s\n+++ %s\n" % (base, base)]
706 lines = ["--- %s\n+++ %s\n" % (base, base)]
706 for x in self.rej:
707 for x in self.rej:
707 for l in x.hunk:
708 for l in x.hunk:
708 lines.append(l)
709 lines.append(l)
709 if l[-1] != '\n':
710 if l[-1] != '\n':
710 lines.append("\n\ No newline at end of file\n")
711 lines.append("\n\ No newline at end of file\n")
711 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
712 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
712
713
713 def apply(self, h):
714 def apply(self, h):
714 if not h.complete():
715 if not h.complete():
715 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
716 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
716 (h.number, h.desc, len(h.a), h.lena, len(h.b),
717 (h.number, h.desc, len(h.a), h.lena, len(h.b),
717 h.lenb))
718 h.lenb))
718
719
719 self.hunks += 1
720 self.hunks += 1
720
721
721 if self.missing:
722 if self.missing:
722 self.rej.append(h)
723 self.rej.append(h)
723 return -1
724 return -1
724
725
725 if self.exists and self.create:
726 if self.exists and self.create:
726 if self.copysource:
727 if self.copysource:
727 self.ui.warn(_("cannot create %s: destination already "
728 self.ui.warn(_("cannot create %s: destination already "
728 "exists\n") % self.fname)
729 "exists\n") % self.fname)
729 else:
730 else:
730 self.ui.warn(_("file %s already exists\n") % self.fname)
731 self.ui.warn(_("file %s already exists\n") % self.fname)
731 self.rej.append(h)
732 self.rej.append(h)
732 return -1
733 return -1
733
734
734 if isinstance(h, binhunk):
735 if isinstance(h, binhunk):
735 if self.remove:
736 if self.remove:
736 self.backend.unlink(self.fname)
737 self.backend.unlink(self.fname)
737 else:
738 else:
738 l = h.new(self.lines)
739 l = h.new(self.lines)
739 self.lines[:] = l
740 self.lines[:] = l
740 self.offset += len(l)
741 self.offset += len(l)
741 self.dirty = True
742 self.dirty = True
742 return 0
743 return 0
743
744
744 horig = h
745 horig = h
745 if (self.eolmode in ('crlf', 'lf')
746 if (self.eolmode in ('crlf', 'lf')
746 or self.eolmode == 'auto' and self.eol):
747 or self.eolmode == 'auto' and self.eol):
747 # If new eols are going to be normalized, then normalize
748 # If new eols are going to be normalized, then normalize
748 # hunk data before patching. Otherwise, preserve input
749 # hunk data before patching. Otherwise, preserve input
749 # line-endings.
750 # line-endings.
750 h = h.getnormalized()
751 h = h.getnormalized()
751
752
752 # fast case first, no offsets, no fuzz
753 # fast case first, no offsets, no fuzz
753 old, oldstart, new, newstart = h.fuzzit(0, False)
754 old, oldstart, new, newstart = h.fuzzit(0, False)
754 oldstart += self.offset
755 oldstart += self.offset
755 orig_start = oldstart
756 orig_start = oldstart
756 # if there's skew we want to emit the "(offset %d lines)" even
757 # if there's skew we want to emit the "(offset %d lines)" even
757 # when the hunk cleanly applies at start + skew, so skip the
758 # when the hunk cleanly applies at start + skew, so skip the
758 # fast case code
759 # fast case code
759 if (self.skew == 0 and
760 if (self.skew == 0 and
760 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
761 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
761 if self.remove:
762 if self.remove:
762 self.backend.unlink(self.fname)
763 self.backend.unlink(self.fname)
763 else:
764 else:
764 self.lines[oldstart:oldstart + len(old)] = new
765 self.lines[oldstart:oldstart + len(old)] = new
765 self.offset += len(new) - len(old)
766 self.offset += len(new) - len(old)
766 self.dirty = True
767 self.dirty = True
767 return 0
768 return 0
768
769
769 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
770 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
770 self.hash = {}
771 self.hash = {}
771 for x, s in enumerate(self.lines):
772 for x, s in enumerate(self.lines):
772 self.hash.setdefault(s, []).append(x)
773 self.hash.setdefault(s, []).append(x)
773
774
774 for fuzzlen in xrange(3):
775 for fuzzlen in xrange(3):
775 for toponly in [True, False]:
776 for toponly in [True, False]:
776 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
777 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
777 oldstart = oldstart + self.offset + self.skew
778 oldstart = oldstart + self.offset + self.skew
778 oldstart = min(oldstart, len(self.lines))
779 oldstart = min(oldstart, len(self.lines))
779 if old:
780 if old:
780 cand = self.findlines(old[0][1:], oldstart)
781 cand = self.findlines(old[0][1:], oldstart)
781 else:
782 else:
782 # Only adding lines with no or fuzzed context, just
783 # Only adding lines with no or fuzzed context, just
783 # take the skew in account
784 # take the skew in account
784 cand = [oldstart]
785 cand = [oldstart]
785
786
786 for l in cand:
787 for l in cand:
787 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
788 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
788 self.lines[l : l + len(old)] = new
789 self.lines[l : l + len(old)] = new
789 self.offset += len(new) - len(old)
790 self.offset += len(new) - len(old)
790 self.skew = l - orig_start
791 self.skew = l - orig_start
791 self.dirty = True
792 self.dirty = True
792 offset = l - orig_start - fuzzlen
793 offset = l - orig_start - fuzzlen
793 if fuzzlen:
794 if fuzzlen:
794 msg = _("Hunk #%d succeeded at %d "
795 msg = _("Hunk #%d succeeded at %d "
795 "with fuzz %d "
796 "with fuzz %d "
796 "(offset %d lines).\n")
797 "(offset %d lines).\n")
797 self.printfile(True)
798 self.printfile(True)
798 self.ui.warn(msg %
799 self.ui.warn(msg %
799 (h.number, l + 1, fuzzlen, offset))
800 (h.number, l + 1, fuzzlen, offset))
800 else:
801 else:
801 msg = _("Hunk #%d succeeded at %d "
802 msg = _("Hunk #%d succeeded at %d "
802 "(offset %d lines).\n")
803 "(offset %d lines).\n")
803 self.ui.note(msg % (h.number, l + 1, offset))
804 self.ui.note(msg % (h.number, l + 1, offset))
804 return fuzzlen
805 return fuzzlen
805 self.printfile(True)
806 self.printfile(True)
806 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
807 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
807 self.rej.append(horig)
808 self.rej.append(horig)
808 return -1
809 return -1
809
810
810 def close(self):
811 def close(self):
811 if self.dirty:
812 if self.dirty:
812 self.writelines(self.fname, self.lines, self.mode)
813 self.writelines(self.fname, self.lines, self.mode)
813 self.write_rej()
814 self.write_rej()
814 return len(self.rej)
815 return len(self.rej)
815
816
816 class header(object):
817 class header(object):
817 """patch header
818 """patch header
818 """
819 """
819 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
820 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
820 diff_re = re.compile('diff -r .* (.*)$')
821 diff_re = re.compile('diff -r .* (.*)$')
821 allhunks_re = re.compile('(?:index|deleted file) ')
822 allhunks_re = re.compile('(?:index|deleted file) ')
822 pretty_re = re.compile('(?:new file|deleted file) ')
823 pretty_re = re.compile('(?:new file|deleted file) ')
823 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
824 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
824
825
825 def __init__(self, header):
826 def __init__(self, header):
826 self.header = header
827 self.header = header
827 self.hunks = []
828 self.hunks = []
828
829
829 def binary(self):
830 def binary(self):
830 return util.any(h.startswith('index ') for h in self.header)
831 return util.any(h.startswith('index ') for h in self.header)
831
832
832 def pretty(self, fp):
833 def pretty(self, fp):
833 for h in self.header:
834 for h in self.header:
834 if h.startswith('index '):
835 if h.startswith('index '):
835 fp.write(_('this modifies a binary file (all or nothing)\n'))
836 fp.write(_('this modifies a binary file (all or nothing)\n'))
836 break
837 break
837 if self.pretty_re.match(h):
838 if self.pretty_re.match(h):
838 fp.write(h)
839 fp.write(h)
839 if self.binary():
840 if self.binary():
840 fp.write(_('this is a binary file\n'))
841 fp.write(_('this is a binary file\n'))
841 break
842 break
842 if h.startswith('---'):
843 if h.startswith('---'):
843 fp.write(_('%d hunks, %d lines changed\n') %
844 fp.write(_('%d hunks, %d lines changed\n') %
844 (len(self.hunks),
845 (len(self.hunks),
845 sum([max(h.added, h.removed) for h in self.hunks])))
846 sum([max(h.added, h.removed) for h in self.hunks])))
846 break
847 break
847 fp.write(h)
848 fp.write(h)
848
849
849 def write(self, fp):
850 def write(self, fp):
850 fp.write(''.join(self.header))
851 fp.write(''.join(self.header))
851
852
852 def allhunks(self):
853 def allhunks(self):
853 return util.any(self.allhunks_re.match(h) for h in self.header)
854 return util.any(self.allhunks_re.match(h) for h in self.header)
854
855
855 def files(self):
856 def files(self):
856 match = self.diffgit_re.match(self.header[0])
857 match = self.diffgit_re.match(self.header[0])
857 if match:
858 if match:
858 fromfile, tofile = match.groups()
859 fromfile, tofile = match.groups()
859 if fromfile == tofile:
860 if fromfile == tofile:
860 return [fromfile]
861 return [fromfile]
861 return [fromfile, tofile]
862 return [fromfile, tofile]
862 else:
863 else:
863 return self.diff_re.match(self.header[0]).groups()
864 return self.diff_re.match(self.header[0]).groups()
864
865
865 def filename(self):
866 def filename(self):
866 return self.files()[-1]
867 return self.files()[-1]
867
868
868 def __repr__(self):
869 def __repr__(self):
869 return '<header %s>' % (' '.join(map(repr, self.files())))
870 return '<header %s>' % (' '.join(map(repr, self.files())))
870
871
871 def special(self):
872 def special(self):
872 return util.any(self.special_re.match(h) for h in self.header)
873 return util.any(self.special_re.match(h) for h in self.header)
873
874
874 class recordhunk(object):
875 class recordhunk(object):
875 """patch hunk
876 """patch hunk
876
877
877 XXX shouldn't we merge this with the other hunk class?
878 XXX shouldn't we merge this with the other hunk class?
878 """
879 """
879 maxcontext = 3
880 maxcontext = 3
880
881
881 def __init__(self, header, fromline, toline, proc, before, hunk, after):
882 def __init__(self, header, fromline, toline, proc, before, hunk, after):
882 def trimcontext(number, lines):
883 def trimcontext(number, lines):
883 delta = len(lines) - self.maxcontext
884 delta = len(lines) - self.maxcontext
884 if False and delta > 0:
885 if False and delta > 0:
885 return number + delta, lines[:self.maxcontext]
886 return number + delta, lines[:self.maxcontext]
886 return number, lines
887 return number, lines
887
888
888 self.header = header
889 self.header = header
889 self.fromline, self.before = trimcontext(fromline, before)
890 self.fromline, self.before = trimcontext(fromline, before)
890 self.toline, self.after = trimcontext(toline, after)
891 self.toline, self.after = trimcontext(toline, after)
891 self.proc = proc
892 self.proc = proc
892 self.hunk = hunk
893 self.hunk = hunk
893 self.added, self.removed = self.countchanges(self.hunk)
894 self.added, self.removed = self.countchanges(self.hunk)
894
895
895 def countchanges(self, hunk):
896 def countchanges(self, hunk):
896 """hunk -> (n+,n-)"""
897 """hunk -> (n+,n-)"""
897 add = len([h for h in hunk if h[0] == '+'])
898 add = len([h for h in hunk if h[0] == '+'])
898 rem = len([h for h in hunk if h[0] == '-'])
899 rem = len([h for h in hunk if h[0] == '-'])
899 return add, rem
900 return add, rem
900
901
901 def write(self, fp):
902 def write(self, fp):
902 delta = len(self.before) + len(self.after)
903 delta = len(self.before) + len(self.after)
903 if self.after and self.after[-1] == '\\ No newline at end of file\n':
904 if self.after and self.after[-1] == '\\ No newline at end of file\n':
904 delta -= 1
905 delta -= 1
905 fromlen = delta + self.removed
906 fromlen = delta + self.removed
906 tolen = delta + self.added
907 tolen = delta + self.added
907 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
908 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
908 (self.fromline, fromlen, self.toline, tolen,
909 (self.fromline, fromlen, self.toline, tolen,
909 self.proc and (' ' + self.proc)))
910 self.proc and (' ' + self.proc)))
910 fp.write(''.join(self.before + self.hunk + self.after))
911 fp.write(''.join(self.before + self.hunk + self.after))
911
912
912 pretty = write
913 pretty = write
913
914
914 def filename(self):
915 def filename(self):
915 return self.header.filename()
916 return self.header.filename()
916
917
917 def __repr__(self):
918 def __repr__(self):
918 return '<hunk %r@%d>' % (self.filename(), self.fromline)
919 return '<hunk %r@%d>' % (self.filename(), self.fromline)
919
920
920 def filterpatch(ui, headers):
921 def filterpatch(ui, headers):
921 """Interactively filter patch chunks into applied-only chunks"""
922 """Interactively filter patch chunks into applied-only chunks"""
922
923
923 def prompt(skipfile, skipall, query, chunk):
924 def prompt(skipfile, skipall, query, chunk):
924 """prompt query, and process base inputs
925 """prompt query, and process base inputs
925
926
926 - y/n for the rest of file
927 - y/n for the rest of file
927 - y/n for the rest
928 - y/n for the rest
928 - ? (help)
929 - ? (help)
929 - q (quit)
930 - q (quit)
930
931
931 Return True/False and possibly updated skipfile and skipall.
932 Return True/False and possibly updated skipfile and skipall.
932 """
933 """
933 newpatches = None
934 newpatches = None
934 if skipall is not None:
935 if skipall is not None:
935 return skipall, skipfile, skipall, newpatches
936 return skipall, skipfile, skipall, newpatches
936 if skipfile is not None:
937 if skipfile is not None:
937 return skipfile, skipfile, skipall, newpatches
938 return skipfile, skipfile, skipall, newpatches
938 while True:
939 while True:
939 resps = _('[Ynesfdaq?]'
940 resps = _('[Ynesfdaq?]'
940 '$$ &Yes, record this change'
941 '$$ &Yes, record this change'
941 '$$ &No, skip this change'
942 '$$ &No, skip this change'
942 '$$ &Edit this change manually'
943 '$$ &Edit this change manually'
943 '$$ &Skip remaining changes to this file'
944 '$$ &Skip remaining changes to this file'
944 '$$ Record remaining changes to this &file'
945 '$$ Record remaining changes to this &file'
945 '$$ &Done, skip remaining changes and files'
946 '$$ &Done, skip remaining changes and files'
946 '$$ Record &all changes to all remaining files'
947 '$$ Record &all changes to all remaining files'
947 '$$ &Quit, recording no changes'
948 '$$ &Quit, recording no changes'
948 '$$ &? (display help)')
949 '$$ &? (display help)')
949 r = ui.promptchoice("%s %s" % (query, resps))
950 r = ui.promptchoice("%s %s" % (query, resps))
950 ui.write("\n")
951 ui.write("\n")
951 if r == 8: # ?
952 if r == 8: # ?
952 for c, t in ui.extractchoices(resps)[1]:
953 for c, t in ui.extractchoices(resps)[1]:
953 ui.write('%s - %s\n' % (c, t.lower()))
954 ui.write('%s - %s\n' % (c, t.lower()))
954 continue
955 continue
955 elif r == 0: # yes
956 elif r == 0: # yes
956 ret = True
957 ret = True
957 elif r == 1: # no
958 elif r == 1: # no
958 ret = False
959 ret = False
959 elif r == 2: # Edit patch
960 elif r == 2: # Edit patch
960 if chunk is None:
961 if chunk is None:
961 ui.write(_('cannot edit patch for whole file'))
962 ui.write(_('cannot edit patch for whole file'))
962 ui.write("\n")
963 ui.write("\n")
963 continue
964 continue
964 if chunk.header.binary():
965 if chunk.header.binary():
965 ui.write(_('cannot edit patch for binary file'))
966 ui.write(_('cannot edit patch for binary file'))
966 ui.write("\n")
967 ui.write("\n")
967 continue
968 continue
968 # Patch comment based on the Git one (based on comment at end of
969 # Patch comment based on the Git one (based on comment at end of
969 # http://mercurial.selenic.com/wiki/RecordExtension)
970 # http://mercurial.selenic.com/wiki/RecordExtension)
970 phelp = '---' + _("""
971 phelp = '---' + _("""
971 To remove '-' lines, make them ' ' lines (context).
972 To remove '-' lines, make them ' ' lines (context).
972 To remove '+' lines, delete them.
973 To remove '+' lines, delete them.
973 Lines starting with # will be removed from the patch.
974 Lines starting with # will be removed from the patch.
974
975
975 If the patch applies cleanly, the edited hunk will immediately be
976 If the patch applies cleanly, the edited hunk will immediately be
976 added to the record list. If it does not apply cleanly, a rejects
977 added to the record list. If it does not apply cleanly, a rejects
977 file will be generated: you can use that when you try again. If
978 file will be generated: you can use that when you try again. If
978 all lines of the hunk are removed, then the edit is aborted and
979 all lines of the hunk are removed, then the edit is aborted and
979 the hunk is left unchanged.
980 the hunk is left unchanged.
980 """)
981 """)
981 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
982 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
982 suffix=".diff", text=True)
983 suffix=".diff", text=True)
983 ncpatchfp = None
984 ncpatchfp = None
984 try:
985 try:
985 # Write the initial patch
986 # Write the initial patch
986 f = os.fdopen(patchfd, "w")
987 f = os.fdopen(patchfd, "w")
987 chunk.header.write(f)
988 chunk.header.write(f)
988 chunk.write(f)
989 chunk.write(f)
989 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
990 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
990 f.close()
991 f.close()
991 # Start the editor and wait for it to complete
992 # Start the editor and wait for it to complete
992 editor = ui.geteditor()
993 editor = ui.geteditor()
993 ui.system("%s \"%s\"" % (editor, patchfn),
994 ui.system("%s \"%s\"" % (editor, patchfn),
994 environ={'HGUSER': ui.username()},
995 environ={'HGUSER': ui.username()},
995 onerr=util.Abort, errprefix=_("edit failed"))
996 onerr=util.Abort, errprefix=_("edit failed"))
996 # Remove comment lines
997 # Remove comment lines
997 patchfp = open(patchfn)
998 patchfp = open(patchfn)
998 ncpatchfp = cStringIO.StringIO()
999 ncpatchfp = cStringIO.StringIO()
999 for line in patchfp:
1000 for line in patchfp:
1000 if not line.startswith('#'):
1001 if not line.startswith('#'):
1001 ncpatchfp.write(line)
1002 ncpatchfp.write(line)
1002 patchfp.close()
1003 patchfp.close()
1003 ncpatchfp.seek(0)
1004 ncpatchfp.seek(0)
1004 newpatches = parsepatch(ncpatchfp)
1005 newpatches = parsepatch(ncpatchfp)
1005 finally:
1006 finally:
1006 os.unlink(patchfn)
1007 os.unlink(patchfn)
1007 del ncpatchfp
1008 del ncpatchfp
1008 # Signal that the chunk shouldn't be applied as-is, but
1009 # Signal that the chunk shouldn't be applied as-is, but
1009 # provide the new patch to be used instead.
1010 # provide the new patch to be used instead.
1010 ret = False
1011 ret = False
1011 elif r == 3: # Skip
1012 elif r == 3: # Skip
1012 ret = skipfile = False
1013 ret = skipfile = False
1013 elif r == 4: # file (Record remaining)
1014 elif r == 4: # file (Record remaining)
1014 ret = skipfile = True
1015 ret = skipfile = True
1015 elif r == 5: # done, skip remaining
1016 elif r == 5: # done, skip remaining
1016 ret = skipall = False
1017 ret = skipall = False
1017 elif r == 6: # all
1018 elif r == 6: # all
1018 ret = skipall = True
1019 ret = skipall = True
1019 elif r == 7: # quit
1020 elif r == 7: # quit
1020 raise util.Abort(_('user quit'))
1021 raise util.Abort(_('user quit'))
1021 return ret, skipfile, skipall, newpatches
1022 return ret, skipfile, skipall, newpatches
1022
1023
1023 seen = set()
1024 seen = set()
1024 applied = {} # 'filename' -> [] of chunks
1025 applied = {} # 'filename' -> [] of chunks
1025 skipfile, skipall = None, None
1026 skipfile, skipall = None, None
1026 pos, total = 1, sum(len(h.hunks) for h in headers)
1027 pos, total = 1, sum(len(h.hunks) for h in headers)
1027 for h in headers:
1028 for h in headers:
1028 pos += len(h.hunks)
1029 pos += len(h.hunks)
1029 skipfile = None
1030 skipfile = None
1030 fixoffset = 0
1031 fixoffset = 0
1031 hdr = ''.join(h.header)
1032 hdr = ''.join(h.header)
1032 if hdr in seen:
1033 if hdr in seen:
1033 continue
1034 continue
1034 seen.add(hdr)
1035 seen.add(hdr)
1035 if skipall is None:
1036 if skipall is None:
1036 h.pretty(ui)
1037 h.pretty(ui)
1037 msg = (_('examine changes to %s?') %
1038 msg = (_('examine changes to %s?') %
1038 _(' and ').join("'%s'" % f for f in h.files()))
1039 _(' and ').join("'%s'" % f for f in h.files()))
1039 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1040 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1040 if not r:
1041 if not r:
1041 continue
1042 continue
1042 applied[h.filename()] = [h]
1043 applied[h.filename()] = [h]
1043 if h.allhunks():
1044 if h.allhunks():
1044 applied[h.filename()] += h.hunks
1045 applied[h.filename()] += h.hunks
1045 continue
1046 continue
1046 for i, chunk in enumerate(h.hunks):
1047 for i, chunk in enumerate(h.hunks):
1047 if skipfile is None and skipall is None:
1048 if skipfile is None and skipall is None:
1048 chunk.pretty(ui)
1049 chunk.pretty(ui)
1049 if total == 1:
1050 if total == 1:
1050 msg = _("record this change to '%s'?") % chunk.filename()
1051 msg = _("record this change to '%s'?") % chunk.filename()
1051 else:
1052 else:
1052 idx = pos - len(h.hunks) + i
1053 idx = pos - len(h.hunks) + i
1053 msg = _("record change %d/%d to '%s'?") % (idx, total,
1054 msg = _("record change %d/%d to '%s'?") % (idx, total,
1054 chunk.filename())
1055 chunk.filename())
1055 r, skipfile, skipall, newpatches = prompt(skipfile,
1056 r, skipfile, skipall, newpatches = prompt(skipfile,
1056 skipall, msg, chunk)
1057 skipall, msg, chunk)
1057 if r:
1058 if r:
1058 if fixoffset:
1059 if fixoffset:
1059 chunk = copy.copy(chunk)
1060 chunk = copy.copy(chunk)
1060 chunk.toline += fixoffset
1061 chunk.toline += fixoffset
1061 applied[chunk.filename()].append(chunk)
1062 applied[chunk.filename()].append(chunk)
1062 elif newpatches is not None:
1063 elif newpatches is not None:
1063 for newpatch in newpatches:
1064 for newpatch in newpatches:
1064 for newhunk in newpatch.hunks:
1065 for newhunk in newpatch.hunks:
1065 if fixoffset:
1066 if fixoffset:
1066 newhunk.toline += fixoffset
1067 newhunk.toline += fixoffset
1067 applied[newhunk.filename()].append(newhunk)
1068 applied[newhunk.filename()].append(newhunk)
1068 else:
1069 else:
1069 fixoffset += chunk.removed - chunk.added
1070 fixoffset += chunk.removed - chunk.added
1070 return sum([h for h in applied.itervalues()
1071 return sum([h for h in applied.itervalues()
1071 if h[0].special() or len(h) > 1], [])
1072 if h[0].special() or len(h) > 1], [])
1072 class hunk(object):
1073 class hunk(object):
1073 def __init__(self, desc, num, lr, context):
1074 def __init__(self, desc, num, lr, context):
1074 self.number = num
1075 self.number = num
1075 self.desc = desc
1076 self.desc = desc
1076 self.hunk = [desc]
1077 self.hunk = [desc]
1077 self.a = []
1078 self.a = []
1078 self.b = []
1079 self.b = []
1079 self.starta = self.lena = None
1080 self.starta = self.lena = None
1080 self.startb = self.lenb = None
1081 self.startb = self.lenb = None
1081 if lr is not None:
1082 if lr is not None:
1082 if context:
1083 if context:
1083 self.read_context_hunk(lr)
1084 self.read_context_hunk(lr)
1084 else:
1085 else:
1085 self.read_unified_hunk(lr)
1086 self.read_unified_hunk(lr)
1086
1087
1087 def getnormalized(self):
1088 def getnormalized(self):
1088 """Return a copy with line endings normalized to LF."""
1089 """Return a copy with line endings normalized to LF."""
1089
1090
1090 def normalize(lines):
1091 def normalize(lines):
1091 nlines = []
1092 nlines = []
1092 for line in lines:
1093 for line in lines:
1093 if line.endswith('\r\n'):
1094 if line.endswith('\r\n'):
1094 line = line[:-2] + '\n'
1095 line = line[:-2] + '\n'
1095 nlines.append(line)
1096 nlines.append(line)
1096 return nlines
1097 return nlines
1097
1098
1098 # Dummy object, it is rebuilt manually
1099 # Dummy object, it is rebuilt manually
1099 nh = hunk(self.desc, self.number, None, None)
1100 nh = hunk(self.desc, self.number, None, None)
1100 nh.number = self.number
1101 nh.number = self.number
1101 nh.desc = self.desc
1102 nh.desc = self.desc
1102 nh.hunk = self.hunk
1103 nh.hunk = self.hunk
1103 nh.a = normalize(self.a)
1104 nh.a = normalize(self.a)
1104 nh.b = normalize(self.b)
1105 nh.b = normalize(self.b)
1105 nh.starta = self.starta
1106 nh.starta = self.starta
1106 nh.startb = self.startb
1107 nh.startb = self.startb
1107 nh.lena = self.lena
1108 nh.lena = self.lena
1108 nh.lenb = self.lenb
1109 nh.lenb = self.lenb
1109 return nh
1110 return nh
1110
1111
1111 def read_unified_hunk(self, lr):
1112 def read_unified_hunk(self, lr):
1112 m = unidesc.match(self.desc)
1113 m = unidesc.match(self.desc)
1113 if not m:
1114 if not m:
1114 raise PatchError(_("bad hunk #%d") % self.number)
1115 raise PatchError(_("bad hunk #%d") % self.number)
1115 self.starta, self.lena, self.startb, self.lenb = m.groups()
1116 self.starta, self.lena, self.startb, self.lenb = m.groups()
1116 if self.lena is None:
1117 if self.lena is None:
1117 self.lena = 1
1118 self.lena = 1
1118 else:
1119 else:
1119 self.lena = int(self.lena)
1120 self.lena = int(self.lena)
1120 if self.lenb is None:
1121 if self.lenb is None:
1121 self.lenb = 1
1122 self.lenb = 1
1122 else:
1123 else:
1123 self.lenb = int(self.lenb)
1124 self.lenb = int(self.lenb)
1124 self.starta = int(self.starta)
1125 self.starta = int(self.starta)
1125 self.startb = int(self.startb)
1126 self.startb = int(self.startb)
1126 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1127 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1127 self.b)
1128 self.b)
1128 # if we hit eof before finishing out the hunk, the last line will
1129 # if we hit eof before finishing out the hunk, the last line will
1129 # be zero length. Lets try to fix it up.
1130 # be zero length. Lets try to fix it up.
1130 while len(self.hunk[-1]) == 0:
1131 while len(self.hunk[-1]) == 0:
1131 del self.hunk[-1]
1132 del self.hunk[-1]
1132 del self.a[-1]
1133 del self.a[-1]
1133 del self.b[-1]
1134 del self.b[-1]
1134 self.lena -= 1
1135 self.lena -= 1
1135 self.lenb -= 1
1136 self.lenb -= 1
1136 self._fixnewline(lr)
1137 self._fixnewline(lr)
1137
1138
1138 def read_context_hunk(self, lr):
1139 def read_context_hunk(self, lr):
1139 self.desc = lr.readline()
1140 self.desc = lr.readline()
1140 m = contextdesc.match(self.desc)
1141 m = contextdesc.match(self.desc)
1141 if not m:
1142 if not m:
1142 raise PatchError(_("bad hunk #%d") % self.number)
1143 raise PatchError(_("bad hunk #%d") % self.number)
1143 self.starta, aend = m.groups()
1144 self.starta, aend = m.groups()
1144 self.starta = int(self.starta)
1145 self.starta = int(self.starta)
1145 if aend is None:
1146 if aend is None:
1146 aend = self.starta
1147 aend = self.starta
1147 self.lena = int(aend) - self.starta
1148 self.lena = int(aend) - self.starta
1148 if self.starta:
1149 if self.starta:
1149 self.lena += 1
1150 self.lena += 1
1150 for x in xrange(self.lena):
1151 for x in xrange(self.lena):
1151 l = lr.readline()
1152 l = lr.readline()
1152 if l.startswith('---'):
1153 if l.startswith('---'):
1153 # lines addition, old block is empty
1154 # lines addition, old block is empty
1154 lr.push(l)
1155 lr.push(l)
1155 break
1156 break
1156 s = l[2:]
1157 s = l[2:]
1157 if l.startswith('- ') or l.startswith('! '):
1158 if l.startswith('- ') or l.startswith('! '):
1158 u = '-' + s
1159 u = '-' + s
1159 elif l.startswith(' '):
1160 elif l.startswith(' '):
1160 u = ' ' + s
1161 u = ' ' + s
1161 else:
1162 else:
1162 raise PatchError(_("bad hunk #%d old text line %d") %
1163 raise PatchError(_("bad hunk #%d old text line %d") %
1163 (self.number, x))
1164 (self.number, x))
1164 self.a.append(u)
1165 self.a.append(u)
1165 self.hunk.append(u)
1166 self.hunk.append(u)
1166
1167
1167 l = lr.readline()
1168 l = lr.readline()
1168 if l.startswith('\ '):
1169 if l.startswith('\ '):
1169 s = self.a[-1][:-1]
1170 s = self.a[-1][:-1]
1170 self.a[-1] = s
1171 self.a[-1] = s
1171 self.hunk[-1] = s
1172 self.hunk[-1] = s
1172 l = lr.readline()
1173 l = lr.readline()
1173 m = contextdesc.match(l)
1174 m = contextdesc.match(l)
1174 if not m:
1175 if not m:
1175 raise PatchError(_("bad hunk #%d") % self.number)
1176 raise PatchError(_("bad hunk #%d") % self.number)
1176 self.startb, bend = m.groups()
1177 self.startb, bend = m.groups()
1177 self.startb = int(self.startb)
1178 self.startb = int(self.startb)
1178 if bend is None:
1179 if bend is None:
1179 bend = self.startb
1180 bend = self.startb
1180 self.lenb = int(bend) - self.startb
1181 self.lenb = int(bend) - self.startb
1181 if self.startb:
1182 if self.startb:
1182 self.lenb += 1
1183 self.lenb += 1
1183 hunki = 1
1184 hunki = 1
1184 for x in xrange(self.lenb):
1185 for x in xrange(self.lenb):
1185 l = lr.readline()
1186 l = lr.readline()
1186 if l.startswith('\ '):
1187 if l.startswith('\ '):
1187 # XXX: the only way to hit this is with an invalid line range.
1188 # XXX: the only way to hit this is with an invalid line range.
1188 # The no-eol marker is not counted in the line range, but I
1189 # The no-eol marker is not counted in the line range, but I
1189 # guess there are diff(1) out there which behave differently.
1190 # guess there are diff(1) out there which behave differently.
1190 s = self.b[-1][:-1]
1191 s = self.b[-1][:-1]
1191 self.b[-1] = s
1192 self.b[-1] = s
1192 self.hunk[hunki - 1] = s
1193 self.hunk[hunki - 1] = s
1193 continue
1194 continue
1194 if not l:
1195 if not l:
1195 # line deletions, new block is empty and we hit EOF
1196 # line deletions, new block is empty and we hit EOF
1196 lr.push(l)
1197 lr.push(l)
1197 break
1198 break
1198 s = l[2:]
1199 s = l[2:]
1199 if l.startswith('+ ') or l.startswith('! '):
1200 if l.startswith('+ ') or l.startswith('! '):
1200 u = '+' + s
1201 u = '+' + s
1201 elif l.startswith(' '):
1202 elif l.startswith(' '):
1202 u = ' ' + s
1203 u = ' ' + s
1203 elif len(self.b) == 0:
1204 elif len(self.b) == 0:
1204 # line deletions, new block is empty
1205 # line deletions, new block is empty
1205 lr.push(l)
1206 lr.push(l)
1206 break
1207 break
1207 else:
1208 else:
1208 raise PatchError(_("bad hunk #%d old text line %d") %
1209 raise PatchError(_("bad hunk #%d old text line %d") %
1209 (self.number, x))
1210 (self.number, x))
1210 self.b.append(s)
1211 self.b.append(s)
1211 while True:
1212 while True:
1212 if hunki >= len(self.hunk):
1213 if hunki >= len(self.hunk):
1213 h = ""
1214 h = ""
1214 else:
1215 else:
1215 h = self.hunk[hunki]
1216 h = self.hunk[hunki]
1216 hunki += 1
1217 hunki += 1
1217 if h == u:
1218 if h == u:
1218 break
1219 break
1219 elif h.startswith('-'):
1220 elif h.startswith('-'):
1220 continue
1221 continue
1221 else:
1222 else:
1222 self.hunk.insert(hunki - 1, u)
1223 self.hunk.insert(hunki - 1, u)
1223 break
1224 break
1224
1225
1225 if not self.a:
1226 if not self.a:
1226 # this happens when lines were only added to the hunk
1227 # this happens when lines were only added to the hunk
1227 for x in self.hunk:
1228 for x in self.hunk:
1228 if x.startswith('-') or x.startswith(' '):
1229 if x.startswith('-') or x.startswith(' '):
1229 self.a.append(x)
1230 self.a.append(x)
1230 if not self.b:
1231 if not self.b:
1231 # this happens when lines were only deleted from the hunk
1232 # this happens when lines were only deleted from the hunk
1232 for x in self.hunk:
1233 for x in self.hunk:
1233 if x.startswith('+') or x.startswith(' '):
1234 if x.startswith('+') or x.startswith(' '):
1234 self.b.append(x[1:])
1235 self.b.append(x[1:])
1235 # @@ -start,len +start,len @@
1236 # @@ -start,len +start,len @@
1236 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1237 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1237 self.startb, self.lenb)
1238 self.startb, self.lenb)
1238 self.hunk[0] = self.desc
1239 self.hunk[0] = self.desc
1239 self._fixnewline(lr)
1240 self._fixnewline(lr)
1240
1241
1241 def _fixnewline(self, lr):
1242 def _fixnewline(self, lr):
1242 l = lr.readline()
1243 l = lr.readline()
1243 if l.startswith('\ '):
1244 if l.startswith('\ '):
1244 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1245 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1245 else:
1246 else:
1246 lr.push(l)
1247 lr.push(l)
1247
1248
1248 def complete(self):
1249 def complete(self):
1249 return len(self.a) == self.lena and len(self.b) == self.lenb
1250 return len(self.a) == self.lena and len(self.b) == self.lenb
1250
1251
1251 def _fuzzit(self, old, new, fuzz, toponly):
1252 def _fuzzit(self, old, new, fuzz, toponly):
1252 # this removes context lines from the top and bottom of list 'l'. It
1253 # this removes context lines from the top and bottom of list 'l'. It
1253 # checks the hunk to make sure only context lines are removed, and then
1254 # checks the hunk to make sure only context lines are removed, and then
1254 # returns a new shortened list of lines.
1255 # returns a new shortened list of lines.
1255 fuzz = min(fuzz, len(old))
1256 fuzz = min(fuzz, len(old))
1256 if fuzz:
1257 if fuzz:
1257 top = 0
1258 top = 0
1258 bot = 0
1259 bot = 0
1259 hlen = len(self.hunk)
1260 hlen = len(self.hunk)
1260 for x in xrange(hlen - 1):
1261 for x in xrange(hlen - 1):
1261 # the hunk starts with the @@ line, so use x+1
1262 # the hunk starts with the @@ line, so use x+1
1262 if self.hunk[x + 1][0] == ' ':
1263 if self.hunk[x + 1][0] == ' ':
1263 top += 1
1264 top += 1
1264 else:
1265 else:
1265 break
1266 break
1266 if not toponly:
1267 if not toponly:
1267 for x in xrange(hlen - 1):
1268 for x in xrange(hlen - 1):
1268 if self.hunk[hlen - bot - 1][0] == ' ':
1269 if self.hunk[hlen - bot - 1][0] == ' ':
1269 bot += 1
1270 bot += 1
1270 else:
1271 else:
1271 break
1272 break
1272
1273
1273 bot = min(fuzz, bot)
1274 bot = min(fuzz, bot)
1274 top = min(fuzz, top)
1275 top = min(fuzz, top)
1275 return old[top:len(old) - bot], new[top:len(new) - bot], top
1276 return old[top:len(old) - bot], new[top:len(new) - bot], top
1276 return old, new, 0
1277 return old, new, 0
1277
1278
1278 def fuzzit(self, fuzz, toponly):
1279 def fuzzit(self, fuzz, toponly):
1279 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1280 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1280 oldstart = self.starta + top
1281 oldstart = self.starta + top
1281 newstart = self.startb + top
1282 newstart = self.startb + top
1282 # zero length hunk ranges already have their start decremented
1283 # zero length hunk ranges already have their start decremented
1283 if self.lena and oldstart > 0:
1284 if self.lena and oldstart > 0:
1284 oldstart -= 1
1285 oldstart -= 1
1285 if self.lenb and newstart > 0:
1286 if self.lenb and newstart > 0:
1286 newstart -= 1
1287 newstart -= 1
1287 return old, oldstart, new, newstart
1288 return old, oldstart, new, newstart
1288
1289
1289 class binhunk(object):
1290 class binhunk(object):
1290 'A binary patch file.'
1291 'A binary patch file.'
1291 def __init__(self, lr, fname):
1292 def __init__(self, lr, fname):
1292 self.text = None
1293 self.text = None
1293 self.delta = False
1294 self.delta = False
1294 self.hunk = ['GIT binary patch\n']
1295 self.hunk = ['GIT binary patch\n']
1295 self._fname = fname
1296 self._fname = fname
1296 self._read(lr)
1297 self._read(lr)
1297
1298
1298 def complete(self):
1299 def complete(self):
1299 return self.text is not None
1300 return self.text is not None
1300
1301
1301 def new(self, lines):
1302 def new(self, lines):
1302 if self.delta:
1303 if self.delta:
1303 return [applybindelta(self.text, ''.join(lines))]
1304 return [applybindelta(self.text, ''.join(lines))]
1304 return [self.text]
1305 return [self.text]
1305
1306
1306 def _read(self, lr):
1307 def _read(self, lr):
1307 def getline(lr, hunk):
1308 def getline(lr, hunk):
1308 l = lr.readline()
1309 l = lr.readline()
1309 hunk.append(l)
1310 hunk.append(l)
1310 return l.rstrip('\r\n')
1311 return l.rstrip('\r\n')
1311
1312
1312 size = 0
1313 size = 0
1313 while True:
1314 while True:
1314 line = getline(lr, self.hunk)
1315 line = getline(lr, self.hunk)
1315 if not line:
1316 if not line:
1316 raise PatchError(_('could not extract "%s" binary data')
1317 raise PatchError(_('could not extract "%s" binary data')
1317 % self._fname)
1318 % self._fname)
1318 if line.startswith('literal '):
1319 if line.startswith('literal '):
1319 size = int(line[8:].rstrip())
1320 size = int(line[8:].rstrip())
1320 break
1321 break
1321 if line.startswith('delta '):
1322 if line.startswith('delta '):
1322 size = int(line[6:].rstrip())
1323 size = int(line[6:].rstrip())
1323 self.delta = True
1324 self.delta = True
1324 break
1325 break
1325 dec = []
1326 dec = []
1326 line = getline(lr, self.hunk)
1327 line = getline(lr, self.hunk)
1327 while len(line) > 1:
1328 while len(line) > 1:
1328 l = line[0]
1329 l = line[0]
1329 if l <= 'Z' and l >= 'A':
1330 if l <= 'Z' and l >= 'A':
1330 l = ord(l) - ord('A') + 1
1331 l = ord(l) - ord('A') + 1
1331 else:
1332 else:
1332 l = ord(l) - ord('a') + 27
1333 l = ord(l) - ord('a') + 27
1333 try:
1334 try:
1334 dec.append(base85.b85decode(line[1:])[:l])
1335 dec.append(base85.b85decode(line[1:])[:l])
1335 except ValueError, e:
1336 except ValueError, e:
1336 raise PatchError(_('could not decode "%s" binary patch: %s')
1337 raise PatchError(_('could not decode "%s" binary patch: %s')
1337 % (self._fname, str(e)))
1338 % (self._fname, str(e)))
1338 line = getline(lr, self.hunk)
1339 line = getline(lr, self.hunk)
1339 text = zlib.decompress(''.join(dec))
1340 text = zlib.decompress(''.join(dec))
1340 if len(text) != size:
1341 if len(text) != size:
1341 raise PatchError(_('"%s" length is %d bytes, should be %d')
1342 raise PatchError(_('"%s" length is %d bytes, should be %d')
1342 % (self._fname, len(text), size))
1343 % (self._fname, len(text), size))
1343 self.text = text
1344 self.text = text
1344
1345
1345 def parsefilename(str):
1346 def parsefilename(str):
1346 # --- filename \t|space stuff
1347 # --- filename \t|space stuff
1347 s = str[4:].rstrip('\r\n')
1348 s = str[4:].rstrip('\r\n')
1348 i = s.find('\t')
1349 i = s.find('\t')
1349 if i < 0:
1350 if i < 0:
1350 i = s.find(' ')
1351 i = s.find(' ')
1351 if i < 0:
1352 if i < 0:
1352 return s
1353 return s
1353 return s[:i]
1354 return s[:i]
1354
1355
1355 def parsepatch(fp):
1356 def parsepatch(originalchunks):
1356 """patch -> [] of headers -> [] of hunks """
1357 """patch -> [] of headers -> [] of hunks """
1357 class parser(object):
1358 class parser(object):
1358 """patch parsing state machine"""
1359 """patch parsing state machine"""
1359 def __init__(self):
1360 def __init__(self):
1360 self.fromline = 0
1361 self.fromline = 0
1361 self.toline = 0
1362 self.toline = 0
1362 self.proc = ''
1363 self.proc = ''
1363 self.header = None
1364 self.header = None
1364 self.context = []
1365 self.context = []
1365 self.before = []
1366 self.before = []
1366 self.hunk = []
1367 self.hunk = []
1367 self.headers = []
1368 self.headers = []
1368
1369
1369 def addrange(self, limits):
1370 def addrange(self, limits):
1370 fromstart, fromend, tostart, toend, proc = limits
1371 fromstart, fromend, tostart, toend, proc = limits
1371 self.fromline = int(fromstart)
1372 self.fromline = int(fromstart)
1372 self.toline = int(tostart)
1373 self.toline = int(tostart)
1373 self.proc = proc
1374 self.proc = proc
1374
1375
1375 def addcontext(self, context):
1376 def addcontext(self, context):
1376 if self.hunk:
1377 if self.hunk:
1377 h = recordhunk(self.header, self.fromline, self.toline,
1378 h = recordhunk(self.header, self.fromline, self.toline,
1378 self.proc, self.before, self.hunk, context)
1379 self.proc, self.before, self.hunk, context)
1379 self.header.hunks.append(h)
1380 self.header.hunks.append(h)
1380 self.fromline += len(self.before) + h.removed
1381 self.fromline += len(self.before) + h.removed
1381 self.toline += len(self.before) + h.added
1382 self.toline += len(self.before) + h.added
1382 self.before = []
1383 self.before = []
1383 self.hunk = []
1384 self.hunk = []
1384 self.proc = ''
1385 self.proc = ''
1385 self.context = context
1386 self.context = context
1386
1387
1387 def addhunk(self, hunk):
1388 def addhunk(self, hunk):
1388 if self.context:
1389 if self.context:
1389 self.before = self.context
1390 self.before = self.context
1390 self.context = []
1391 self.context = []
1391 self.hunk = hunk
1392 self.hunk = hunk
1392
1393
1393 def newfile(self, hdr):
1394 def newfile(self, hdr):
1394 self.addcontext([])
1395 self.addcontext([])
1395 h = header(hdr)
1396 h = header(hdr)
1396 self.headers.append(h)
1397 self.headers.append(h)
1397 self.header = h
1398 self.header = h
1398
1399
1399 def addother(self, line):
1400 def addother(self, line):
1400 pass # 'other' lines are ignored
1401 pass # 'other' lines are ignored
1401
1402
1402 def finished(self):
1403 def finished(self):
1403 self.addcontext([])
1404 self.addcontext([])
1404 return self.headers
1405 return self.headers
1405
1406
1406 transitions = {
1407 transitions = {
1407 'file': {'context': addcontext,
1408 'file': {'context': addcontext,
1408 'file': newfile,
1409 'file': newfile,
1409 'hunk': addhunk,
1410 'hunk': addhunk,
1410 'range': addrange},
1411 'range': addrange},
1411 'context': {'file': newfile,
1412 'context': {'file': newfile,
1412 'hunk': addhunk,
1413 'hunk': addhunk,
1413 'range': addrange,
1414 'range': addrange,
1414 'other': addother},
1415 'other': addother},
1415 'hunk': {'context': addcontext,
1416 'hunk': {'context': addcontext,
1416 'file': newfile,
1417 'file': newfile,
1417 'range': addrange},
1418 'range': addrange},
1418 'range': {'context': addcontext,
1419 'range': {'context': addcontext,
1419 'hunk': addhunk},
1420 'hunk': addhunk},
1420 'other': {'other': addother},
1421 'other': {'other': addother},
1421 }
1422 }
1422
1423
1423 p = parser()
1424 p = parser()
1425 fp = cStringIO.StringIO()
1426 fp.write(''.join(originalchunks))
1427 fp.seek(0)
1424
1428
1425 state = 'context'
1429 state = 'context'
1426 for newstate, data in scanpatch(fp):
1430 for newstate, data in scanpatch(fp):
1427 try:
1431 try:
1428 p.transitions[state][newstate](p, data)
1432 p.transitions[state][newstate](p, data)
1429 except KeyError:
1433 except KeyError:
1430 raise PatchError('unhandled transition: %s -> %s' %
1434 raise PatchError('unhandled transition: %s -> %s' %
1431 (state, newstate))
1435 (state, newstate))
1432 state = newstate
1436 state = newstate
1437 del fp
1433 return p.finished()
1438 return p.finished()
1434
1439
1435 def pathtransform(path, strip, prefix):
1440 def pathtransform(path, strip, prefix):
1436 '''turn a path from a patch into a path suitable for the repository
1441 '''turn a path from a patch into a path suitable for the repository
1437
1442
1438 prefix, if not empty, is expected to be normalized with a / at the end.
1443 prefix, if not empty, is expected to be normalized with a / at the end.
1439
1444
1440 Returns (stripped components, path in repository).
1445 Returns (stripped components, path in repository).
1441
1446
1442 >>> pathtransform('a/b/c', 0, '')
1447 >>> pathtransform('a/b/c', 0, '')
1443 ('', 'a/b/c')
1448 ('', 'a/b/c')
1444 >>> pathtransform(' a/b/c ', 0, '')
1449 >>> pathtransform(' a/b/c ', 0, '')
1445 ('', ' a/b/c')
1450 ('', ' a/b/c')
1446 >>> pathtransform(' a/b/c ', 2, '')
1451 >>> pathtransform(' a/b/c ', 2, '')
1447 ('a/b/', 'c')
1452 ('a/b/', 'c')
1448 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1453 >>> pathtransform(' a//b/c ', 2, 'd/e/')
1449 ('a//b/', 'd/e/c')
1454 ('a//b/', 'd/e/c')
1450 >>> pathtransform('a/b/c', 3, '')
1455 >>> pathtransform('a/b/c', 3, '')
1451 Traceback (most recent call last):
1456 Traceback (most recent call last):
1452 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1457 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1453 '''
1458 '''
1454 pathlen = len(path)
1459 pathlen = len(path)
1455 i = 0
1460 i = 0
1456 if strip == 0:
1461 if strip == 0:
1457 return '', path.rstrip()
1462 return '', path.rstrip()
1458 count = strip
1463 count = strip
1459 while count > 0:
1464 while count > 0:
1460 i = path.find('/', i)
1465 i = path.find('/', i)
1461 if i == -1:
1466 if i == -1:
1462 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1467 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1463 (count, strip, path))
1468 (count, strip, path))
1464 i += 1
1469 i += 1
1465 # consume '//' in the path
1470 # consume '//' in the path
1466 while i < pathlen - 1 and path[i] == '/':
1471 while i < pathlen - 1 and path[i] == '/':
1467 i += 1
1472 i += 1
1468 count -= 1
1473 count -= 1
1469 return path[:i].lstrip(), prefix + path[i:].rstrip()
1474 return path[:i].lstrip(), prefix + path[i:].rstrip()
1470
1475
1471 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1476 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1472 nulla = afile_orig == "/dev/null"
1477 nulla = afile_orig == "/dev/null"
1473 nullb = bfile_orig == "/dev/null"
1478 nullb = bfile_orig == "/dev/null"
1474 create = nulla and hunk.starta == 0 and hunk.lena == 0
1479 create = nulla and hunk.starta == 0 and hunk.lena == 0
1475 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1480 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1476 abase, afile = pathtransform(afile_orig, strip, prefix)
1481 abase, afile = pathtransform(afile_orig, strip, prefix)
1477 gooda = not nulla and backend.exists(afile)
1482 gooda = not nulla and backend.exists(afile)
1478 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1483 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1479 if afile == bfile:
1484 if afile == bfile:
1480 goodb = gooda
1485 goodb = gooda
1481 else:
1486 else:
1482 goodb = not nullb and backend.exists(bfile)
1487 goodb = not nullb and backend.exists(bfile)
1483 missing = not goodb and not gooda and not create
1488 missing = not goodb and not gooda and not create
1484
1489
1485 # some diff programs apparently produce patches where the afile is
1490 # some diff programs apparently produce patches where the afile is
1486 # not /dev/null, but afile starts with bfile
1491 # not /dev/null, but afile starts with bfile
1487 abasedir = afile[:afile.rfind('/') + 1]
1492 abasedir = afile[:afile.rfind('/') + 1]
1488 bbasedir = bfile[:bfile.rfind('/') + 1]
1493 bbasedir = bfile[:bfile.rfind('/') + 1]
1489 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1494 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1490 and hunk.starta == 0 and hunk.lena == 0):
1495 and hunk.starta == 0 and hunk.lena == 0):
1491 create = True
1496 create = True
1492 missing = False
1497 missing = False
1493
1498
1494 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1499 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1495 # diff is between a file and its backup. In this case, the original
1500 # diff is between a file and its backup. In this case, the original
1496 # file should be patched (see original mpatch code).
1501 # file should be patched (see original mpatch code).
1497 isbackup = (abase == bbase and bfile.startswith(afile))
1502 isbackup = (abase == bbase and bfile.startswith(afile))
1498 fname = None
1503 fname = None
1499 if not missing:
1504 if not missing:
1500 if gooda and goodb:
1505 if gooda and goodb:
1501 if isbackup:
1506 if isbackup:
1502 fname = afile
1507 fname = afile
1503 else:
1508 else:
1504 fname = bfile
1509 fname = bfile
1505 elif gooda:
1510 elif gooda:
1506 fname = afile
1511 fname = afile
1507
1512
1508 if not fname:
1513 if not fname:
1509 if not nullb:
1514 if not nullb:
1510 if isbackup:
1515 if isbackup:
1511 fname = afile
1516 fname = afile
1512 else:
1517 else:
1513 fname = bfile
1518 fname = bfile
1514 elif not nulla:
1519 elif not nulla:
1515 fname = afile
1520 fname = afile
1516 else:
1521 else:
1517 raise PatchError(_("undefined source and destination files"))
1522 raise PatchError(_("undefined source and destination files"))
1518
1523
1519 gp = patchmeta(fname)
1524 gp = patchmeta(fname)
1520 if create:
1525 if create:
1521 gp.op = 'ADD'
1526 gp.op = 'ADD'
1522 elif remove:
1527 elif remove:
1523 gp.op = 'DELETE'
1528 gp.op = 'DELETE'
1524 return gp
1529 return gp
1525
1530
1526 def scanpatch(fp):
1531 def scanpatch(fp):
1527 """like patch.iterhunks, but yield different events
1532 """like patch.iterhunks, but yield different events
1528
1533
1529 - ('file', [header_lines + fromfile + tofile])
1534 - ('file', [header_lines + fromfile + tofile])
1530 - ('context', [context_lines])
1535 - ('context', [context_lines])
1531 - ('hunk', [hunk_lines])
1536 - ('hunk', [hunk_lines])
1532 - ('range', (-start,len, +start,len, proc))
1537 - ('range', (-start,len, +start,len, proc))
1533 """
1538 """
1534 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1539 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1535 lr = linereader(fp)
1540 lr = linereader(fp)
1536
1541
1537 def scanwhile(first, p):
1542 def scanwhile(first, p):
1538 """scan lr while predicate holds"""
1543 """scan lr while predicate holds"""
1539 lines = [first]
1544 lines = [first]
1540 while True:
1545 while True:
1541 line = lr.readline()
1546 line = lr.readline()
1542 if not line:
1547 if not line:
1543 break
1548 break
1544 if p(line):
1549 if p(line):
1545 lines.append(line)
1550 lines.append(line)
1546 else:
1551 else:
1547 lr.push(line)
1552 lr.push(line)
1548 break
1553 break
1549 return lines
1554 return lines
1550
1555
1551 while True:
1556 while True:
1552 line = lr.readline()
1557 line = lr.readline()
1553 if not line:
1558 if not line:
1554 break
1559 break
1555 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1560 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1556 def notheader(line):
1561 def notheader(line):
1557 s = line.split(None, 1)
1562 s = line.split(None, 1)
1558 return not s or s[0] not in ('---', 'diff')
1563 return not s or s[0] not in ('---', 'diff')
1559 header = scanwhile(line, notheader)
1564 header = scanwhile(line, notheader)
1560 fromfile = lr.readline()
1565 fromfile = lr.readline()
1561 if fromfile.startswith('---'):
1566 if fromfile.startswith('---'):
1562 tofile = lr.readline()
1567 tofile = lr.readline()
1563 header += [fromfile, tofile]
1568 header += [fromfile, tofile]
1564 else:
1569 else:
1565 lr.push(fromfile)
1570 lr.push(fromfile)
1566 yield 'file', header
1571 yield 'file', header
1567 elif line[0] == ' ':
1572 elif line[0] == ' ':
1568 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1573 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1569 elif line[0] in '-+':
1574 elif line[0] in '-+':
1570 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1575 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1571 else:
1576 else:
1572 m = lines_re.match(line)
1577 m = lines_re.match(line)
1573 if m:
1578 if m:
1574 yield 'range', m.groups()
1579 yield 'range', m.groups()
1575 else:
1580 else:
1576 yield 'other', line
1581 yield 'other', line
1577
1582
1578 def scangitpatch(lr, firstline):
1583 def scangitpatch(lr, firstline):
1579 """
1584 """
1580 Git patches can emit:
1585 Git patches can emit:
1581 - rename a to b
1586 - rename a to b
1582 - change b
1587 - change b
1583 - copy a to c
1588 - copy a to c
1584 - change c
1589 - change c
1585
1590
1586 We cannot apply this sequence as-is, the renamed 'a' could not be
1591 We cannot apply this sequence as-is, the renamed 'a' could not be
1587 found for it would have been renamed already. And we cannot copy
1592 found for it would have been renamed already. And we cannot copy
1588 from 'b' instead because 'b' would have been changed already. So
1593 from 'b' instead because 'b' would have been changed already. So
1589 we scan the git patch for copy and rename commands so we can
1594 we scan the git patch for copy and rename commands so we can
1590 perform the copies ahead of time.
1595 perform the copies ahead of time.
1591 """
1596 """
1592 pos = 0
1597 pos = 0
1593 try:
1598 try:
1594 pos = lr.fp.tell()
1599 pos = lr.fp.tell()
1595 fp = lr.fp
1600 fp = lr.fp
1596 except IOError:
1601 except IOError:
1597 fp = cStringIO.StringIO(lr.fp.read())
1602 fp = cStringIO.StringIO(lr.fp.read())
1598 gitlr = linereader(fp)
1603 gitlr = linereader(fp)
1599 gitlr.push(firstline)
1604 gitlr.push(firstline)
1600 gitpatches = readgitpatch(gitlr)
1605 gitpatches = readgitpatch(gitlr)
1601 fp.seek(pos)
1606 fp.seek(pos)
1602 return gitpatches
1607 return gitpatches
1603
1608
1604 def iterhunks(fp):
1609 def iterhunks(fp):
1605 """Read a patch and yield the following events:
1610 """Read a patch and yield the following events:
1606 - ("file", afile, bfile, firsthunk): select a new target file.
1611 - ("file", afile, bfile, firsthunk): select a new target file.
1607 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1612 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1608 "file" event.
1613 "file" event.
1609 - ("git", gitchanges): current diff is in git format, gitchanges
1614 - ("git", gitchanges): current diff is in git format, gitchanges
1610 maps filenames to gitpatch records. Unique event.
1615 maps filenames to gitpatch records. Unique event.
1611 """
1616 """
1612 afile = ""
1617 afile = ""
1613 bfile = ""
1618 bfile = ""
1614 state = None
1619 state = None
1615 hunknum = 0
1620 hunknum = 0
1616 emitfile = newfile = False
1621 emitfile = newfile = False
1617 gitpatches = None
1622 gitpatches = None
1618
1623
1619 # our states
1624 # our states
1620 BFILE = 1
1625 BFILE = 1
1621 context = None
1626 context = None
1622 lr = linereader(fp)
1627 lr = linereader(fp)
1623
1628
1624 while True:
1629 while True:
1625 x = lr.readline()
1630 x = lr.readline()
1626 if not x:
1631 if not x:
1627 break
1632 break
1628 if state == BFILE and (
1633 if state == BFILE and (
1629 (not context and x[0] == '@')
1634 (not context and x[0] == '@')
1630 or (context is not False and x.startswith('***************'))
1635 or (context is not False and x.startswith('***************'))
1631 or x.startswith('GIT binary patch')):
1636 or x.startswith('GIT binary patch')):
1632 gp = None
1637 gp = None
1633 if (gitpatches and
1638 if (gitpatches and
1634 gitpatches[-1].ispatching(afile, bfile)):
1639 gitpatches[-1].ispatching(afile, bfile)):
1635 gp = gitpatches.pop()
1640 gp = gitpatches.pop()
1636 if x.startswith('GIT binary patch'):
1641 if x.startswith('GIT binary patch'):
1637 h = binhunk(lr, gp.path)
1642 h = binhunk(lr, gp.path)
1638 else:
1643 else:
1639 if context is None and x.startswith('***************'):
1644 if context is None and x.startswith('***************'):
1640 context = True
1645 context = True
1641 h = hunk(x, hunknum + 1, lr, context)
1646 h = hunk(x, hunknum + 1, lr, context)
1642 hunknum += 1
1647 hunknum += 1
1643 if emitfile:
1648 if emitfile:
1644 emitfile = False
1649 emitfile = False
1645 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1650 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1646 yield 'hunk', h
1651 yield 'hunk', h
1647 elif x.startswith('diff --git a/'):
1652 elif x.startswith('diff --git a/'):
1648 m = gitre.match(x.rstrip(' \r\n'))
1653 m = gitre.match(x.rstrip(' \r\n'))
1649 if not m:
1654 if not m:
1650 continue
1655 continue
1651 if gitpatches is None:
1656 if gitpatches is None:
1652 # scan whole input for git metadata
1657 # scan whole input for git metadata
1653 gitpatches = scangitpatch(lr, x)
1658 gitpatches = scangitpatch(lr, x)
1654 yield 'git', [g.copy() for g in gitpatches
1659 yield 'git', [g.copy() for g in gitpatches
1655 if g.op in ('COPY', 'RENAME')]
1660 if g.op in ('COPY', 'RENAME')]
1656 gitpatches.reverse()
1661 gitpatches.reverse()
1657 afile = 'a/' + m.group(1)
1662 afile = 'a/' + m.group(1)
1658 bfile = 'b/' + m.group(2)
1663 bfile = 'b/' + m.group(2)
1659 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1664 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1660 gp = gitpatches.pop()
1665 gp = gitpatches.pop()
1661 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1666 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1662 if not gitpatches:
1667 if not gitpatches:
1663 raise PatchError(_('failed to synchronize metadata for "%s"')
1668 raise PatchError(_('failed to synchronize metadata for "%s"')
1664 % afile[2:])
1669 % afile[2:])
1665 gp = gitpatches[-1]
1670 gp = gitpatches[-1]
1666 newfile = True
1671 newfile = True
1667 elif x.startswith('---'):
1672 elif x.startswith('---'):
1668 # check for a unified diff
1673 # check for a unified diff
1669 l2 = lr.readline()
1674 l2 = lr.readline()
1670 if not l2.startswith('+++'):
1675 if not l2.startswith('+++'):
1671 lr.push(l2)
1676 lr.push(l2)
1672 continue
1677 continue
1673 newfile = True
1678 newfile = True
1674 context = False
1679 context = False
1675 afile = parsefilename(x)
1680 afile = parsefilename(x)
1676 bfile = parsefilename(l2)
1681 bfile = parsefilename(l2)
1677 elif x.startswith('***'):
1682 elif x.startswith('***'):
1678 # check for a context diff
1683 # check for a context diff
1679 l2 = lr.readline()
1684 l2 = lr.readline()
1680 if not l2.startswith('---'):
1685 if not l2.startswith('---'):
1681 lr.push(l2)
1686 lr.push(l2)
1682 continue
1687 continue
1683 l3 = lr.readline()
1688 l3 = lr.readline()
1684 lr.push(l3)
1689 lr.push(l3)
1685 if not l3.startswith("***************"):
1690 if not l3.startswith("***************"):
1686 lr.push(l2)
1691 lr.push(l2)
1687 continue
1692 continue
1688 newfile = True
1693 newfile = True
1689 context = True
1694 context = True
1690 afile = parsefilename(x)
1695 afile = parsefilename(x)
1691 bfile = parsefilename(l2)
1696 bfile = parsefilename(l2)
1692
1697
1693 if newfile:
1698 if newfile:
1694 newfile = False
1699 newfile = False
1695 emitfile = True
1700 emitfile = True
1696 state = BFILE
1701 state = BFILE
1697 hunknum = 0
1702 hunknum = 0
1698
1703
1699 while gitpatches:
1704 while gitpatches:
1700 gp = gitpatches.pop()
1705 gp = gitpatches.pop()
1701 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1706 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1702
1707
1703 def applybindelta(binchunk, data):
1708 def applybindelta(binchunk, data):
1704 """Apply a binary delta hunk
1709 """Apply a binary delta hunk
1705 The algorithm used is the algorithm from git's patch-delta.c
1710 The algorithm used is the algorithm from git's patch-delta.c
1706 """
1711 """
1707 def deltahead(binchunk):
1712 def deltahead(binchunk):
1708 i = 0
1713 i = 0
1709 for c in binchunk:
1714 for c in binchunk:
1710 i += 1
1715 i += 1
1711 if not (ord(c) & 0x80):
1716 if not (ord(c) & 0x80):
1712 return i
1717 return i
1713 return i
1718 return i
1714 out = ""
1719 out = ""
1715 s = deltahead(binchunk)
1720 s = deltahead(binchunk)
1716 binchunk = binchunk[s:]
1721 binchunk = binchunk[s:]
1717 s = deltahead(binchunk)
1722 s = deltahead(binchunk)
1718 binchunk = binchunk[s:]
1723 binchunk = binchunk[s:]
1719 i = 0
1724 i = 0
1720 while i < len(binchunk):
1725 while i < len(binchunk):
1721 cmd = ord(binchunk[i])
1726 cmd = ord(binchunk[i])
1722 i += 1
1727 i += 1
1723 if (cmd & 0x80):
1728 if (cmd & 0x80):
1724 offset = 0
1729 offset = 0
1725 size = 0
1730 size = 0
1726 if (cmd & 0x01):
1731 if (cmd & 0x01):
1727 offset = ord(binchunk[i])
1732 offset = ord(binchunk[i])
1728 i += 1
1733 i += 1
1729 if (cmd & 0x02):
1734 if (cmd & 0x02):
1730 offset |= ord(binchunk[i]) << 8
1735 offset |= ord(binchunk[i]) << 8
1731 i += 1
1736 i += 1
1732 if (cmd & 0x04):
1737 if (cmd & 0x04):
1733 offset |= ord(binchunk[i]) << 16
1738 offset |= ord(binchunk[i]) << 16
1734 i += 1
1739 i += 1
1735 if (cmd & 0x08):
1740 if (cmd & 0x08):
1736 offset |= ord(binchunk[i]) << 24
1741 offset |= ord(binchunk[i]) << 24
1737 i += 1
1742 i += 1
1738 if (cmd & 0x10):
1743 if (cmd & 0x10):
1739 size = ord(binchunk[i])
1744 size = ord(binchunk[i])
1740 i += 1
1745 i += 1
1741 if (cmd & 0x20):
1746 if (cmd & 0x20):
1742 size |= ord(binchunk[i]) << 8
1747 size |= ord(binchunk[i]) << 8
1743 i += 1
1748 i += 1
1744 if (cmd & 0x40):
1749 if (cmd & 0x40):
1745 size |= ord(binchunk[i]) << 16
1750 size |= ord(binchunk[i]) << 16
1746 i += 1
1751 i += 1
1747 if size == 0:
1752 if size == 0:
1748 size = 0x10000
1753 size = 0x10000
1749 offset_end = offset + size
1754 offset_end = offset + size
1750 out += data[offset:offset_end]
1755 out += data[offset:offset_end]
1751 elif cmd != 0:
1756 elif cmd != 0:
1752 offset_end = i + cmd
1757 offset_end = i + cmd
1753 out += binchunk[i:offset_end]
1758 out += binchunk[i:offset_end]
1754 i += cmd
1759 i += cmd
1755 else:
1760 else:
1756 raise PatchError(_('unexpected delta opcode 0'))
1761 raise PatchError(_('unexpected delta opcode 0'))
1757 return out
1762 return out
1758
1763
1759 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1764 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1760 """Reads a patch from fp and tries to apply it.
1765 """Reads a patch from fp and tries to apply it.
1761
1766
1762 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1767 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1763 there was any fuzz.
1768 there was any fuzz.
1764
1769
1765 If 'eolmode' is 'strict', the patch content and patched file are
1770 If 'eolmode' is 'strict', the patch content and patched file are
1766 read in binary mode. Otherwise, line endings are ignored when
1771 read in binary mode. Otherwise, line endings are ignored when
1767 patching then normalized according to 'eolmode'.
1772 patching then normalized according to 'eolmode'.
1768 """
1773 """
1769 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1774 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1770 prefix=prefix, eolmode=eolmode)
1775 prefix=prefix, eolmode=eolmode)
1771
1776
1772 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1777 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1773 eolmode='strict'):
1778 eolmode='strict'):
1774
1779
1775 if prefix:
1780 if prefix:
1776 # clean up double slashes, lack of trailing slashes, etc
1781 # clean up double slashes, lack of trailing slashes, etc
1777 prefix = util.normpath(prefix) + '/'
1782 prefix = util.normpath(prefix) + '/'
1778 def pstrip(p):
1783 def pstrip(p):
1779 return pathtransform(p, strip - 1, prefix)[1]
1784 return pathtransform(p, strip - 1, prefix)[1]
1780
1785
1781 rejects = 0
1786 rejects = 0
1782 err = 0
1787 err = 0
1783 current_file = None
1788 current_file = None
1784
1789
1785 for state, values in iterhunks(fp):
1790 for state, values in iterhunks(fp):
1786 if state == 'hunk':
1791 if state == 'hunk':
1787 if not current_file:
1792 if not current_file:
1788 continue
1793 continue
1789 ret = current_file.apply(values)
1794 ret = current_file.apply(values)
1790 if ret > 0:
1795 if ret > 0:
1791 err = 1
1796 err = 1
1792 elif state == 'file':
1797 elif state == 'file':
1793 if current_file:
1798 if current_file:
1794 rejects += current_file.close()
1799 rejects += current_file.close()
1795 current_file = None
1800 current_file = None
1796 afile, bfile, first_hunk, gp = values
1801 afile, bfile, first_hunk, gp = values
1797 if gp:
1802 if gp:
1798 gp.path = pstrip(gp.path)
1803 gp.path = pstrip(gp.path)
1799 if gp.oldpath:
1804 if gp.oldpath:
1800 gp.oldpath = pstrip(gp.oldpath)
1805 gp.oldpath = pstrip(gp.oldpath)
1801 else:
1806 else:
1802 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1807 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1803 prefix)
1808 prefix)
1804 if gp.op == 'RENAME':
1809 if gp.op == 'RENAME':
1805 backend.unlink(gp.oldpath)
1810 backend.unlink(gp.oldpath)
1806 if not first_hunk:
1811 if not first_hunk:
1807 if gp.op == 'DELETE':
1812 if gp.op == 'DELETE':
1808 backend.unlink(gp.path)
1813 backend.unlink(gp.path)
1809 continue
1814 continue
1810 data, mode = None, None
1815 data, mode = None, None
1811 if gp.op in ('RENAME', 'COPY'):
1816 if gp.op in ('RENAME', 'COPY'):
1812 data, mode = store.getfile(gp.oldpath)[:2]
1817 data, mode = store.getfile(gp.oldpath)[:2]
1813 # FIXME: failing getfile has never been handled here
1818 # FIXME: failing getfile has never been handled here
1814 assert data is not None
1819 assert data is not None
1815 if gp.mode:
1820 if gp.mode:
1816 mode = gp.mode
1821 mode = gp.mode
1817 if gp.op == 'ADD':
1822 if gp.op == 'ADD':
1818 # Added files without content have no hunk and
1823 # Added files without content have no hunk and
1819 # must be created
1824 # must be created
1820 data = ''
1825 data = ''
1821 if data or mode:
1826 if data or mode:
1822 if (gp.op in ('ADD', 'RENAME', 'COPY')
1827 if (gp.op in ('ADD', 'RENAME', 'COPY')
1823 and backend.exists(gp.path)):
1828 and backend.exists(gp.path)):
1824 raise PatchError(_("cannot create %s: destination "
1829 raise PatchError(_("cannot create %s: destination "
1825 "already exists") % gp.path)
1830 "already exists") % gp.path)
1826 backend.setfile(gp.path, data, mode, gp.oldpath)
1831 backend.setfile(gp.path, data, mode, gp.oldpath)
1827 continue
1832 continue
1828 try:
1833 try:
1829 current_file = patcher(ui, gp, backend, store,
1834 current_file = patcher(ui, gp, backend, store,
1830 eolmode=eolmode)
1835 eolmode=eolmode)
1831 except PatchError, inst:
1836 except PatchError, inst:
1832 ui.warn(str(inst) + '\n')
1837 ui.warn(str(inst) + '\n')
1833 current_file = None
1838 current_file = None
1834 rejects += 1
1839 rejects += 1
1835 continue
1840 continue
1836 elif state == 'git':
1841 elif state == 'git':
1837 for gp in values:
1842 for gp in values:
1838 path = pstrip(gp.oldpath)
1843 path = pstrip(gp.oldpath)
1839 data, mode = backend.getfile(path)
1844 data, mode = backend.getfile(path)
1840 if data is None:
1845 if data is None:
1841 # The error ignored here will trigger a getfile()
1846 # The error ignored here will trigger a getfile()
1842 # error in a place more appropriate for error
1847 # error in a place more appropriate for error
1843 # handling, and will not interrupt the patching
1848 # handling, and will not interrupt the patching
1844 # process.
1849 # process.
1845 pass
1850 pass
1846 else:
1851 else:
1847 store.setfile(path, data, mode)
1852 store.setfile(path, data, mode)
1848 else:
1853 else:
1849 raise util.Abort(_('unsupported parser state: %s') % state)
1854 raise util.Abort(_('unsupported parser state: %s') % state)
1850
1855
1851 if current_file:
1856 if current_file:
1852 rejects += current_file.close()
1857 rejects += current_file.close()
1853
1858
1854 if rejects:
1859 if rejects:
1855 return -1
1860 return -1
1856 return err
1861 return err
1857
1862
1858 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1863 def _externalpatch(ui, repo, patcher, patchname, strip, files,
1859 similarity):
1864 similarity):
1860 """use <patcher> to apply <patchname> to the working directory.
1865 """use <patcher> to apply <patchname> to the working directory.
1861 returns whether patch was applied with fuzz factor."""
1866 returns whether patch was applied with fuzz factor."""
1862
1867
1863 fuzz = False
1868 fuzz = False
1864 args = []
1869 args = []
1865 cwd = repo.root
1870 cwd = repo.root
1866 if cwd:
1871 if cwd:
1867 args.append('-d %s' % util.shellquote(cwd))
1872 args.append('-d %s' % util.shellquote(cwd))
1868 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1873 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
1869 util.shellquote(patchname)))
1874 util.shellquote(patchname)))
1870 try:
1875 try:
1871 for line in fp:
1876 for line in fp:
1872 line = line.rstrip()
1877 line = line.rstrip()
1873 ui.note(line + '\n')
1878 ui.note(line + '\n')
1874 if line.startswith('patching file '):
1879 if line.startswith('patching file '):
1875 pf = util.parsepatchoutput(line)
1880 pf = util.parsepatchoutput(line)
1876 printed_file = False
1881 printed_file = False
1877 files.add(pf)
1882 files.add(pf)
1878 elif line.find('with fuzz') >= 0:
1883 elif line.find('with fuzz') >= 0:
1879 fuzz = True
1884 fuzz = True
1880 if not printed_file:
1885 if not printed_file:
1881 ui.warn(pf + '\n')
1886 ui.warn(pf + '\n')
1882 printed_file = True
1887 printed_file = True
1883 ui.warn(line + '\n')
1888 ui.warn(line + '\n')
1884 elif line.find('saving rejects to file') >= 0:
1889 elif line.find('saving rejects to file') >= 0:
1885 ui.warn(line + '\n')
1890 ui.warn(line + '\n')
1886 elif line.find('FAILED') >= 0:
1891 elif line.find('FAILED') >= 0:
1887 if not printed_file:
1892 if not printed_file:
1888 ui.warn(pf + '\n')
1893 ui.warn(pf + '\n')
1889 printed_file = True
1894 printed_file = True
1890 ui.warn(line + '\n')
1895 ui.warn(line + '\n')
1891 finally:
1896 finally:
1892 if files:
1897 if files:
1893 scmutil.marktouched(repo, files, similarity)
1898 scmutil.marktouched(repo, files, similarity)
1894 code = fp.close()
1899 code = fp.close()
1895 if code:
1900 if code:
1896 raise PatchError(_("patch command failed: %s") %
1901 raise PatchError(_("patch command failed: %s") %
1897 util.explainexit(code)[0])
1902 util.explainexit(code)[0])
1898 return fuzz
1903 return fuzz
1899
1904
1900 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1905 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
1901 eolmode='strict'):
1906 eolmode='strict'):
1902 if files is None:
1907 if files is None:
1903 files = set()
1908 files = set()
1904 if eolmode is None:
1909 if eolmode is None:
1905 eolmode = ui.config('patch', 'eol', 'strict')
1910 eolmode = ui.config('patch', 'eol', 'strict')
1906 if eolmode.lower() not in eolmodes:
1911 if eolmode.lower() not in eolmodes:
1907 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1912 raise util.Abort(_('unsupported line endings type: %s') % eolmode)
1908 eolmode = eolmode.lower()
1913 eolmode = eolmode.lower()
1909
1914
1910 store = filestore()
1915 store = filestore()
1911 try:
1916 try:
1912 fp = open(patchobj, 'rb')
1917 fp = open(patchobj, 'rb')
1913 except TypeError:
1918 except TypeError:
1914 fp = patchobj
1919 fp = patchobj
1915 try:
1920 try:
1916 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1921 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
1917 eolmode=eolmode)
1922 eolmode=eolmode)
1918 finally:
1923 finally:
1919 if fp != patchobj:
1924 if fp != patchobj:
1920 fp.close()
1925 fp.close()
1921 files.update(backend.close())
1926 files.update(backend.close())
1922 store.close()
1927 store.close()
1923 if ret < 0:
1928 if ret < 0:
1924 raise PatchError(_('patch failed to apply'))
1929 raise PatchError(_('patch failed to apply'))
1925 return ret > 0
1930 return ret > 0
1926
1931
1927 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1932 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
1928 eolmode='strict', similarity=0):
1933 eolmode='strict', similarity=0):
1929 """use builtin patch to apply <patchobj> to the working directory.
1934 """use builtin patch to apply <patchobj> to the working directory.
1930 returns whether patch was applied with fuzz factor."""
1935 returns whether patch was applied with fuzz factor."""
1931 backend = workingbackend(ui, repo, similarity)
1936 backend = workingbackend(ui, repo, similarity)
1932 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1937 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1933
1938
1934 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1939 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
1935 eolmode='strict'):
1940 eolmode='strict'):
1936 backend = repobackend(ui, repo, ctx, store)
1941 backend = repobackend(ui, repo, ctx, store)
1937 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1942 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
1938
1943
1939 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1944 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
1940 similarity=0):
1945 similarity=0):
1941 """Apply <patchname> to the working directory.
1946 """Apply <patchname> to the working directory.
1942
1947
1943 'eolmode' specifies how end of lines should be handled. It can be:
1948 'eolmode' specifies how end of lines should be handled. It can be:
1944 - 'strict': inputs are read in binary mode, EOLs are preserved
1949 - 'strict': inputs are read in binary mode, EOLs are preserved
1945 - 'crlf': EOLs are ignored when patching and reset to CRLF
1950 - 'crlf': EOLs are ignored when patching and reset to CRLF
1946 - 'lf': EOLs are ignored when patching and reset to LF
1951 - 'lf': EOLs are ignored when patching and reset to LF
1947 - None: get it from user settings, default to 'strict'
1952 - None: get it from user settings, default to 'strict'
1948 'eolmode' is ignored when using an external patcher program.
1953 'eolmode' is ignored when using an external patcher program.
1949
1954
1950 Returns whether patch was applied with fuzz factor.
1955 Returns whether patch was applied with fuzz factor.
1951 """
1956 """
1952 patcher = ui.config('ui', 'patch')
1957 patcher = ui.config('ui', 'patch')
1953 if files is None:
1958 if files is None:
1954 files = set()
1959 files = set()
1955 if patcher:
1960 if patcher:
1956 return _externalpatch(ui, repo, patcher, patchname, strip,
1961 return _externalpatch(ui, repo, patcher, patchname, strip,
1957 files, similarity)
1962 files, similarity)
1958 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1963 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
1959 similarity)
1964 similarity)
1960
1965
1961 def changedfiles(ui, repo, patchpath, strip=1):
1966 def changedfiles(ui, repo, patchpath, strip=1):
1962 backend = fsbackend(ui, repo.root)
1967 backend = fsbackend(ui, repo.root)
1963 fp = open(patchpath, 'rb')
1968 fp = open(patchpath, 'rb')
1964 try:
1969 try:
1965 changed = set()
1970 changed = set()
1966 for state, values in iterhunks(fp):
1971 for state, values in iterhunks(fp):
1967 if state == 'file':
1972 if state == 'file':
1968 afile, bfile, first_hunk, gp = values
1973 afile, bfile, first_hunk, gp = values
1969 if gp:
1974 if gp:
1970 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1975 gp.path = pathtransform(gp.path, strip - 1, '')[1]
1971 if gp.oldpath:
1976 if gp.oldpath:
1972 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1977 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
1973 else:
1978 else:
1974 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1979 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
1975 '')
1980 '')
1976 changed.add(gp.path)
1981 changed.add(gp.path)
1977 if gp.op == 'RENAME':
1982 if gp.op == 'RENAME':
1978 changed.add(gp.oldpath)
1983 changed.add(gp.oldpath)
1979 elif state not in ('hunk', 'git'):
1984 elif state not in ('hunk', 'git'):
1980 raise util.Abort(_('unsupported parser state: %s') % state)
1985 raise util.Abort(_('unsupported parser state: %s') % state)
1981 return changed
1986 return changed
1982 finally:
1987 finally:
1983 fp.close()
1988 fp.close()
1984
1989
1985 class GitDiffRequired(Exception):
1990 class GitDiffRequired(Exception):
1986 pass
1991 pass
1987
1992
1988 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1993 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
1989 '''return diffopts with all features supported and parsed'''
1994 '''return diffopts with all features supported and parsed'''
1990 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1995 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
1991 git=True, whitespace=True, formatchanging=True)
1996 git=True, whitespace=True, formatchanging=True)
1992
1997
1993 diffopts = diffallopts
1998 diffopts = diffallopts
1994
1999
1995 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2000 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
1996 whitespace=False, formatchanging=False):
2001 whitespace=False, formatchanging=False):
1997 '''return diffopts with only opted-in features parsed
2002 '''return diffopts with only opted-in features parsed
1998
2003
1999 Features:
2004 Features:
2000 - git: git-style diffs
2005 - git: git-style diffs
2001 - whitespace: whitespace options like ignoreblanklines and ignorews
2006 - whitespace: whitespace options like ignoreblanklines and ignorews
2002 - formatchanging: options that will likely break or cause correctness issues
2007 - formatchanging: options that will likely break or cause correctness issues
2003 with most diff parsers
2008 with most diff parsers
2004 '''
2009 '''
2005 def get(key, name=None, getter=ui.configbool, forceplain=None):
2010 def get(key, name=None, getter=ui.configbool, forceplain=None):
2006 if opts:
2011 if opts:
2007 v = opts.get(key)
2012 v = opts.get(key)
2008 if v:
2013 if v:
2009 return v
2014 return v
2010 if forceplain is not None and ui.plain():
2015 if forceplain is not None and ui.plain():
2011 return forceplain
2016 return forceplain
2012 return getter(section, name or key, None, untrusted=untrusted)
2017 return getter(section, name or key, None, untrusted=untrusted)
2013
2018
2014 # core options, expected to be understood by every diff parser
2019 # core options, expected to be understood by every diff parser
2015 buildopts = {
2020 buildopts = {
2016 'nodates': get('nodates'),
2021 'nodates': get('nodates'),
2017 'showfunc': get('show_function', 'showfunc'),
2022 'showfunc': get('show_function', 'showfunc'),
2018 'context': get('unified', getter=ui.config),
2023 'context': get('unified', getter=ui.config),
2019 }
2024 }
2020
2025
2021 if git:
2026 if git:
2022 buildopts['git'] = get('git')
2027 buildopts['git'] = get('git')
2023 if whitespace:
2028 if whitespace:
2024 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2029 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2025 buildopts['ignorewsamount'] = get('ignore_space_change',
2030 buildopts['ignorewsamount'] = get('ignore_space_change',
2026 'ignorewsamount')
2031 'ignorewsamount')
2027 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2032 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2028 'ignoreblanklines')
2033 'ignoreblanklines')
2029 if formatchanging:
2034 if formatchanging:
2030 buildopts['text'] = opts and opts.get('text')
2035 buildopts['text'] = opts and opts.get('text')
2031 buildopts['nobinary'] = get('nobinary')
2036 buildopts['nobinary'] = get('nobinary')
2032 buildopts['noprefix'] = get('noprefix', forceplain=False)
2037 buildopts['noprefix'] = get('noprefix', forceplain=False)
2033
2038
2034 return mdiff.diffopts(**buildopts)
2039 return mdiff.diffopts(**buildopts)
2035
2040
2036 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2041 def diff(repo, node1=None, node2=None, match=None, changes=None, opts=None,
2037 losedatafn=None, prefix=''):
2042 losedatafn=None, prefix=''):
2038 '''yields diff of changes to files between two nodes, or node and
2043 '''yields diff of changes to files between two nodes, or node and
2039 working directory.
2044 working directory.
2040
2045
2041 if node1 is None, use first dirstate parent instead.
2046 if node1 is None, use first dirstate parent instead.
2042 if node2 is None, compare node1 with working directory.
2047 if node2 is None, compare node1 with working directory.
2043
2048
2044 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2049 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2045 every time some change cannot be represented with the current
2050 every time some change cannot be represented with the current
2046 patch format. Return False to upgrade to git patch format, True to
2051 patch format. Return False to upgrade to git patch format, True to
2047 accept the loss or raise an exception to abort the diff. It is
2052 accept the loss or raise an exception to abort the diff. It is
2048 called with the name of current file being diffed as 'fn'. If set
2053 called with the name of current file being diffed as 'fn'. If set
2049 to None, patches will always be upgraded to git format when
2054 to None, patches will always be upgraded to git format when
2050 necessary.
2055 necessary.
2051
2056
2052 prefix is a filename prefix that is prepended to all filenames on
2057 prefix is a filename prefix that is prepended to all filenames on
2053 display (used for subrepos).
2058 display (used for subrepos).
2054 '''
2059 '''
2055
2060
2056 if opts is None:
2061 if opts is None:
2057 opts = mdiff.defaultopts
2062 opts = mdiff.defaultopts
2058
2063
2059 if not node1 and not node2:
2064 if not node1 and not node2:
2060 node1 = repo.dirstate.p1()
2065 node1 = repo.dirstate.p1()
2061
2066
2062 def lrugetfilectx():
2067 def lrugetfilectx():
2063 cache = {}
2068 cache = {}
2064 order = util.deque()
2069 order = util.deque()
2065 def getfilectx(f, ctx):
2070 def getfilectx(f, ctx):
2066 fctx = ctx.filectx(f, filelog=cache.get(f))
2071 fctx = ctx.filectx(f, filelog=cache.get(f))
2067 if f not in cache:
2072 if f not in cache:
2068 if len(cache) > 20:
2073 if len(cache) > 20:
2069 del cache[order.popleft()]
2074 del cache[order.popleft()]
2070 cache[f] = fctx.filelog()
2075 cache[f] = fctx.filelog()
2071 else:
2076 else:
2072 order.remove(f)
2077 order.remove(f)
2073 order.append(f)
2078 order.append(f)
2074 return fctx
2079 return fctx
2075 return getfilectx
2080 return getfilectx
2076 getfilectx = lrugetfilectx()
2081 getfilectx = lrugetfilectx()
2077
2082
2078 ctx1 = repo[node1]
2083 ctx1 = repo[node1]
2079 ctx2 = repo[node2]
2084 ctx2 = repo[node2]
2080
2085
2081 if not changes:
2086 if not changes:
2082 changes = repo.status(ctx1, ctx2, match=match)
2087 changes = repo.status(ctx1, ctx2, match=match)
2083 modified, added, removed = changes[:3]
2088 modified, added, removed = changes[:3]
2084
2089
2085 if not modified and not added and not removed:
2090 if not modified and not added and not removed:
2086 return []
2091 return []
2087
2092
2088 if repo.ui.debugflag:
2093 if repo.ui.debugflag:
2089 hexfunc = hex
2094 hexfunc = hex
2090 else:
2095 else:
2091 hexfunc = short
2096 hexfunc = short
2092 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2097 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2093
2098
2094 copy = {}
2099 copy = {}
2095 if opts.git or opts.upgrade:
2100 if opts.git or opts.upgrade:
2096 copy = copies.pathcopies(ctx1, ctx2)
2101 copy = copies.pathcopies(ctx1, ctx2)
2097
2102
2098 def difffn(opts, losedata):
2103 def difffn(opts, losedata):
2099 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2104 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2100 copy, getfilectx, opts, losedata, prefix)
2105 copy, getfilectx, opts, losedata, prefix)
2101 if opts.upgrade and not opts.git:
2106 if opts.upgrade and not opts.git:
2102 try:
2107 try:
2103 def losedata(fn):
2108 def losedata(fn):
2104 if not losedatafn or not losedatafn(fn=fn):
2109 if not losedatafn or not losedatafn(fn=fn):
2105 raise GitDiffRequired
2110 raise GitDiffRequired
2106 # Buffer the whole output until we are sure it can be generated
2111 # Buffer the whole output until we are sure it can be generated
2107 return list(difffn(opts.copy(git=False), losedata))
2112 return list(difffn(opts.copy(git=False), losedata))
2108 except GitDiffRequired:
2113 except GitDiffRequired:
2109 return difffn(opts.copy(git=True), None)
2114 return difffn(opts.copy(git=True), None)
2110 else:
2115 else:
2111 return difffn(opts, None)
2116 return difffn(opts, None)
2112
2117
2113 def difflabel(func, *args, **kw):
2118 def difflabel(func, *args, **kw):
2114 '''yields 2-tuples of (output, label) based on the output of func()'''
2119 '''yields 2-tuples of (output, label) based on the output of func()'''
2115 headprefixes = [('diff', 'diff.diffline'),
2120 headprefixes = [('diff', 'diff.diffline'),
2116 ('copy', 'diff.extended'),
2121 ('copy', 'diff.extended'),
2117 ('rename', 'diff.extended'),
2122 ('rename', 'diff.extended'),
2118 ('old', 'diff.extended'),
2123 ('old', 'diff.extended'),
2119 ('new', 'diff.extended'),
2124 ('new', 'diff.extended'),
2120 ('deleted', 'diff.extended'),
2125 ('deleted', 'diff.extended'),
2121 ('---', 'diff.file_a'),
2126 ('---', 'diff.file_a'),
2122 ('+++', 'diff.file_b')]
2127 ('+++', 'diff.file_b')]
2123 textprefixes = [('@', 'diff.hunk'),
2128 textprefixes = [('@', 'diff.hunk'),
2124 ('-', 'diff.deleted'),
2129 ('-', 'diff.deleted'),
2125 ('+', 'diff.inserted')]
2130 ('+', 'diff.inserted')]
2126 head = False
2131 head = False
2127 for chunk in func(*args, **kw):
2132 for chunk in func(*args, **kw):
2128 lines = chunk.split('\n')
2133 lines = chunk.split('\n')
2129 for i, line in enumerate(lines):
2134 for i, line in enumerate(lines):
2130 if i != 0:
2135 if i != 0:
2131 yield ('\n', '')
2136 yield ('\n', '')
2132 if head:
2137 if head:
2133 if line.startswith('@'):
2138 if line.startswith('@'):
2134 head = False
2139 head = False
2135 else:
2140 else:
2136 if line and line[0] not in ' +-@\\':
2141 if line and line[0] not in ' +-@\\':
2137 head = True
2142 head = True
2138 stripline = line
2143 stripline = line
2139 diffline = False
2144 diffline = False
2140 if not head and line and line[0] in '+-':
2145 if not head and line and line[0] in '+-':
2141 # highlight tabs and trailing whitespace, but only in
2146 # highlight tabs and trailing whitespace, but only in
2142 # changed lines
2147 # changed lines
2143 stripline = line.rstrip()
2148 stripline = line.rstrip()
2144 diffline = True
2149 diffline = True
2145
2150
2146 prefixes = textprefixes
2151 prefixes = textprefixes
2147 if head:
2152 if head:
2148 prefixes = headprefixes
2153 prefixes = headprefixes
2149 for prefix, label in prefixes:
2154 for prefix, label in prefixes:
2150 if stripline.startswith(prefix):
2155 if stripline.startswith(prefix):
2151 if diffline:
2156 if diffline:
2152 for token in tabsplitter.findall(stripline):
2157 for token in tabsplitter.findall(stripline):
2153 if '\t' == token[0]:
2158 if '\t' == token[0]:
2154 yield (token, 'diff.tab')
2159 yield (token, 'diff.tab')
2155 else:
2160 else:
2156 yield (token, label)
2161 yield (token, label)
2157 else:
2162 else:
2158 yield (stripline, label)
2163 yield (stripline, label)
2159 break
2164 break
2160 else:
2165 else:
2161 yield (line, '')
2166 yield (line, '')
2162 if line != stripline:
2167 if line != stripline:
2163 yield (line[len(stripline):], 'diff.trailingwhitespace')
2168 yield (line[len(stripline):], 'diff.trailingwhitespace')
2164
2169
2165 def diffui(*args, **kw):
2170 def diffui(*args, **kw):
2166 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2171 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2167 return difflabel(diff, *args, **kw)
2172 return difflabel(diff, *args, **kw)
2168
2173
2169 def _filepairs(ctx1, modified, added, removed, copy, opts):
2174 def _filepairs(ctx1, modified, added, removed, copy, opts):
2170 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2175 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2171 before and f2 is the the name after. For added files, f1 will be None,
2176 before and f2 is the the name after. For added files, f1 will be None,
2172 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2177 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2173 or 'rename' (the latter two only if opts.git is set).'''
2178 or 'rename' (the latter two only if opts.git is set).'''
2174 gone = set()
2179 gone = set()
2175
2180
2176 copyto = dict([(v, k) for k, v in copy.items()])
2181 copyto = dict([(v, k) for k, v in copy.items()])
2177
2182
2178 addedset, removedset = set(added), set(removed)
2183 addedset, removedset = set(added), set(removed)
2179 # Fix up added, since merged-in additions appear as
2184 # Fix up added, since merged-in additions appear as
2180 # modifications during merges
2185 # modifications during merges
2181 for f in modified:
2186 for f in modified:
2182 if f not in ctx1:
2187 if f not in ctx1:
2183 addedset.add(f)
2188 addedset.add(f)
2184
2189
2185 for f in sorted(modified + added + removed):
2190 for f in sorted(modified + added + removed):
2186 copyop = None
2191 copyop = None
2187 f1, f2 = f, f
2192 f1, f2 = f, f
2188 if f in addedset:
2193 if f in addedset:
2189 f1 = None
2194 f1 = None
2190 if f in copy:
2195 if f in copy:
2191 if opts.git:
2196 if opts.git:
2192 f1 = copy[f]
2197 f1 = copy[f]
2193 if f1 in removedset and f1 not in gone:
2198 if f1 in removedset and f1 not in gone:
2194 copyop = 'rename'
2199 copyop = 'rename'
2195 gone.add(f1)
2200 gone.add(f1)
2196 else:
2201 else:
2197 copyop = 'copy'
2202 copyop = 'copy'
2198 elif f in removedset:
2203 elif f in removedset:
2199 f2 = None
2204 f2 = None
2200 if opts.git:
2205 if opts.git:
2201 # have we already reported a copy above?
2206 # have we already reported a copy above?
2202 if (f in copyto and copyto[f] in addedset
2207 if (f in copyto and copyto[f] in addedset
2203 and copy[copyto[f]] == f):
2208 and copy[copyto[f]] == f):
2204 continue
2209 continue
2205 yield f1, f2, copyop
2210 yield f1, f2, copyop
2206
2211
2207 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2212 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2208 copy, getfilectx, opts, losedatafn, prefix):
2213 copy, getfilectx, opts, losedatafn, prefix):
2209
2214
2210 def gitindex(text):
2215 def gitindex(text):
2211 if not text:
2216 if not text:
2212 text = ""
2217 text = ""
2213 l = len(text)
2218 l = len(text)
2214 s = util.sha1('blob %d\0' % l)
2219 s = util.sha1('blob %d\0' % l)
2215 s.update(text)
2220 s.update(text)
2216 return s.hexdigest()
2221 return s.hexdigest()
2217
2222
2218 if opts.noprefix:
2223 if opts.noprefix:
2219 aprefix = bprefix = ''
2224 aprefix = bprefix = ''
2220 else:
2225 else:
2221 aprefix = 'a/'
2226 aprefix = 'a/'
2222 bprefix = 'b/'
2227 bprefix = 'b/'
2223
2228
2224 def diffline(f, revs):
2229 def diffline(f, revs):
2225 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2230 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2226 return 'diff %s %s' % (revinfo, f)
2231 return 'diff %s %s' % (revinfo, f)
2227
2232
2228 date1 = util.datestr(ctx1.date())
2233 date1 = util.datestr(ctx1.date())
2229 date2 = util.datestr(ctx2.date())
2234 date2 = util.datestr(ctx2.date())
2230
2235
2231 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2236 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2232
2237
2233 for f1, f2, copyop in _filepairs(
2238 for f1, f2, copyop in _filepairs(
2234 ctx1, modified, added, removed, copy, opts):
2239 ctx1, modified, added, removed, copy, opts):
2235 content1 = None
2240 content1 = None
2236 content2 = None
2241 content2 = None
2237 flag1 = None
2242 flag1 = None
2238 flag2 = None
2243 flag2 = None
2239 if f1:
2244 if f1:
2240 content1 = getfilectx(f1, ctx1).data()
2245 content1 = getfilectx(f1, ctx1).data()
2241 if opts.git or losedatafn:
2246 if opts.git or losedatafn:
2242 flag1 = ctx1.flags(f1)
2247 flag1 = ctx1.flags(f1)
2243 if f2:
2248 if f2:
2244 content2 = getfilectx(f2, ctx2).data()
2249 content2 = getfilectx(f2, ctx2).data()
2245 if opts.git or losedatafn:
2250 if opts.git or losedatafn:
2246 flag2 = ctx2.flags(f2)
2251 flag2 = ctx2.flags(f2)
2247 binary = False
2252 binary = False
2248 if opts.git or losedatafn:
2253 if opts.git or losedatafn:
2249 binary = util.binary(content1) or util.binary(content2)
2254 binary = util.binary(content1) or util.binary(content2)
2250
2255
2251 if losedatafn and not opts.git:
2256 if losedatafn and not opts.git:
2252 if (binary or
2257 if (binary or
2253 # copy/rename
2258 # copy/rename
2254 f2 in copy or
2259 f2 in copy or
2255 # empty file creation
2260 # empty file creation
2256 (not f1 and not content2) or
2261 (not f1 and not content2) or
2257 # empty file deletion
2262 # empty file deletion
2258 (not content1 and not f2) or
2263 (not content1 and not f2) or
2259 # create with flags
2264 # create with flags
2260 (not f1 and flag2) or
2265 (not f1 and flag2) or
2261 # change flags
2266 # change flags
2262 (f1 and f2 and flag1 != flag2)):
2267 (f1 and f2 and flag1 != flag2)):
2263 losedatafn(f2 or f1)
2268 losedatafn(f2 or f1)
2264
2269
2265 path1 = posixpath.join(prefix, f1 or f2)
2270 path1 = posixpath.join(prefix, f1 or f2)
2266 path2 = posixpath.join(prefix, f2 or f1)
2271 path2 = posixpath.join(prefix, f2 or f1)
2267 header = []
2272 header = []
2268 if opts.git:
2273 if opts.git:
2269 header.append('diff --git %s%s %s%s' %
2274 header.append('diff --git %s%s %s%s' %
2270 (aprefix, path1, bprefix, path2))
2275 (aprefix, path1, bprefix, path2))
2271 if not f1: # added
2276 if not f1: # added
2272 header.append('new file mode %s' % gitmode[flag2])
2277 header.append('new file mode %s' % gitmode[flag2])
2273 elif not f2: # removed
2278 elif not f2: # removed
2274 header.append('deleted file mode %s' % gitmode[flag1])
2279 header.append('deleted file mode %s' % gitmode[flag1])
2275 else: # modified/copied/renamed
2280 else: # modified/copied/renamed
2276 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2281 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2277 if mode1 != mode2:
2282 if mode1 != mode2:
2278 header.append('old mode %s' % mode1)
2283 header.append('old mode %s' % mode1)
2279 header.append('new mode %s' % mode2)
2284 header.append('new mode %s' % mode2)
2280 if copyop is not None:
2285 if copyop is not None:
2281 header.append('%s from %s' % (copyop, path1))
2286 header.append('%s from %s' % (copyop, path1))
2282 header.append('%s to %s' % (copyop, path2))
2287 header.append('%s to %s' % (copyop, path2))
2283 elif revs and not repo.ui.quiet:
2288 elif revs and not repo.ui.quiet:
2284 header.append(diffline(path1, revs))
2289 header.append(diffline(path1, revs))
2285
2290
2286 if binary and opts.git and not opts.nobinary:
2291 if binary and opts.git and not opts.nobinary:
2287 text = mdiff.b85diff(content1, content2)
2292 text = mdiff.b85diff(content1, content2)
2288 if text:
2293 if text:
2289 header.append('index %s..%s' %
2294 header.append('index %s..%s' %
2290 (gitindex(content1), gitindex(content2)))
2295 (gitindex(content1), gitindex(content2)))
2291 else:
2296 else:
2292 text = mdiff.unidiff(content1, date1,
2297 text = mdiff.unidiff(content1, date1,
2293 content2, date2,
2298 content2, date2,
2294 path1, path2, opts=opts)
2299 path1, path2, opts=opts)
2295 if header and (text or len(header) > 1):
2300 if header and (text or len(header) > 1):
2296 yield '\n'.join(header) + '\n'
2301 yield '\n'.join(header) + '\n'
2297 if text:
2302 if text:
2298 yield text
2303 yield text
2299
2304
2300 def diffstatsum(stats):
2305 def diffstatsum(stats):
2301 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2306 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2302 for f, a, r, b in stats:
2307 for f, a, r, b in stats:
2303 maxfile = max(maxfile, encoding.colwidth(f))
2308 maxfile = max(maxfile, encoding.colwidth(f))
2304 maxtotal = max(maxtotal, a + r)
2309 maxtotal = max(maxtotal, a + r)
2305 addtotal += a
2310 addtotal += a
2306 removetotal += r
2311 removetotal += r
2307 binary = binary or b
2312 binary = binary or b
2308
2313
2309 return maxfile, maxtotal, addtotal, removetotal, binary
2314 return maxfile, maxtotal, addtotal, removetotal, binary
2310
2315
2311 def diffstatdata(lines):
2316 def diffstatdata(lines):
2312 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2317 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2313
2318
2314 results = []
2319 results = []
2315 filename, adds, removes, isbinary = None, 0, 0, False
2320 filename, adds, removes, isbinary = None, 0, 0, False
2316
2321
2317 def addresult():
2322 def addresult():
2318 if filename:
2323 if filename:
2319 results.append((filename, adds, removes, isbinary))
2324 results.append((filename, adds, removes, isbinary))
2320
2325
2321 for line in lines:
2326 for line in lines:
2322 if line.startswith('diff'):
2327 if line.startswith('diff'):
2323 addresult()
2328 addresult()
2324 # set numbers to 0 anyway when starting new file
2329 # set numbers to 0 anyway when starting new file
2325 adds, removes, isbinary = 0, 0, False
2330 adds, removes, isbinary = 0, 0, False
2326 if line.startswith('diff --git a/'):
2331 if line.startswith('diff --git a/'):
2327 filename = gitre.search(line).group(2)
2332 filename = gitre.search(line).group(2)
2328 elif line.startswith('diff -r'):
2333 elif line.startswith('diff -r'):
2329 # format: "diff -r ... -r ... filename"
2334 # format: "diff -r ... -r ... filename"
2330 filename = diffre.search(line).group(1)
2335 filename = diffre.search(line).group(1)
2331 elif line.startswith('+') and not line.startswith('+++ '):
2336 elif line.startswith('+') and not line.startswith('+++ '):
2332 adds += 1
2337 adds += 1
2333 elif line.startswith('-') and not line.startswith('--- '):
2338 elif line.startswith('-') and not line.startswith('--- '):
2334 removes += 1
2339 removes += 1
2335 elif (line.startswith('GIT binary patch') or
2340 elif (line.startswith('GIT binary patch') or
2336 line.startswith('Binary file')):
2341 line.startswith('Binary file')):
2337 isbinary = True
2342 isbinary = True
2338 addresult()
2343 addresult()
2339 return results
2344 return results
2340
2345
2341 def diffstat(lines, width=80, git=False):
2346 def diffstat(lines, width=80, git=False):
2342 output = []
2347 output = []
2343 stats = diffstatdata(lines)
2348 stats = diffstatdata(lines)
2344 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2349 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2345
2350
2346 countwidth = len(str(maxtotal))
2351 countwidth = len(str(maxtotal))
2347 if hasbinary and countwidth < 3:
2352 if hasbinary and countwidth < 3:
2348 countwidth = 3
2353 countwidth = 3
2349 graphwidth = width - countwidth - maxname - 6
2354 graphwidth = width - countwidth - maxname - 6
2350 if graphwidth < 10:
2355 if graphwidth < 10:
2351 graphwidth = 10
2356 graphwidth = 10
2352
2357
2353 def scale(i):
2358 def scale(i):
2354 if maxtotal <= graphwidth:
2359 if maxtotal <= graphwidth:
2355 return i
2360 return i
2356 # If diffstat runs out of room it doesn't print anything,
2361 # If diffstat runs out of room it doesn't print anything,
2357 # which isn't very useful, so always print at least one + or -
2362 # which isn't very useful, so always print at least one + or -
2358 # if there were at least some changes.
2363 # if there were at least some changes.
2359 return max(i * graphwidth // maxtotal, int(bool(i)))
2364 return max(i * graphwidth // maxtotal, int(bool(i)))
2360
2365
2361 for filename, adds, removes, isbinary in stats:
2366 for filename, adds, removes, isbinary in stats:
2362 if isbinary:
2367 if isbinary:
2363 count = 'Bin'
2368 count = 'Bin'
2364 else:
2369 else:
2365 count = adds + removes
2370 count = adds + removes
2366 pluses = '+' * scale(adds)
2371 pluses = '+' * scale(adds)
2367 minuses = '-' * scale(removes)
2372 minuses = '-' * scale(removes)
2368 output.append(' %s%s | %*s %s%s\n' %
2373 output.append(' %s%s | %*s %s%s\n' %
2369 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2374 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2370 countwidth, count, pluses, minuses))
2375 countwidth, count, pluses, minuses))
2371
2376
2372 if stats:
2377 if stats:
2373 output.append(_(' %d files changed, %d insertions(+), '
2378 output.append(_(' %d files changed, %d insertions(+), '
2374 '%d deletions(-)\n')
2379 '%d deletions(-)\n')
2375 % (len(stats), totaladds, totalremoves))
2380 % (len(stats), totaladds, totalremoves))
2376
2381
2377 return ''.join(output)
2382 return ''.join(output)
2378
2383
2379 def diffstatui(*args, **kw):
2384 def diffstatui(*args, **kw):
2380 '''like diffstat(), but yields 2-tuples of (output, label) for
2385 '''like diffstat(), but yields 2-tuples of (output, label) for
2381 ui.write()
2386 ui.write()
2382 '''
2387 '''
2383
2388
2384 for line in diffstat(*args, **kw).splitlines():
2389 for line in diffstat(*args, **kw).splitlines():
2385 if line and line[-1] in '+-':
2390 if line and line[-1] in '+-':
2386 name, graph = line.rsplit(' ', 1)
2391 name, graph = line.rsplit(' ', 1)
2387 yield (name + ' ', '')
2392 yield (name + ' ', '')
2388 m = re.search(r'\++', graph)
2393 m = re.search(r'\++', graph)
2389 if m:
2394 if m:
2390 yield (m.group(0), 'diffstat.inserted')
2395 yield (m.group(0), 'diffstat.inserted')
2391 m = re.search(r'-+', graph)
2396 m = re.search(r'-+', graph)
2392 if m:
2397 if m:
2393 yield (m.group(0), 'diffstat.deleted')
2398 yield (m.group(0), 'diffstat.deleted')
2394 else:
2399 else:
2395 yield (line, '')
2400 yield (line, '')
2396 yield ('\n', '')
2401 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now