##// END OF EJS Templates
diff: pass a diff hunks filter function from changeset_printer to patch.diff()...
Denis Laxalde -
r34857:890afefa default
parent child Browse files
Show More
@@ -1,3876 +1,3881 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import itertools
11 import itertools
12 import os
12 import os
13 import re
13 import re
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 changelog,
26 changelog,
27 copies,
27 copies,
28 crecord as crecordmod,
28 crecord as crecordmod,
29 dirstateguard,
29 dirstateguard,
30 encoding,
30 encoding,
31 error,
31 error,
32 formatter,
32 formatter,
33 graphmod,
33 graphmod,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 pycompat,
38 pycompat,
39 registrar,
39 registrar,
40 revlog,
40 revlog,
41 revset,
41 revset,
42 scmutil,
42 scmutil,
43 smartset,
43 smartset,
44 templatekw,
44 templatekw,
45 templater,
45 templater,
46 util,
46 util,
47 vfs as vfsmod,
47 vfs as vfsmod,
48 )
48 )
49 stringio = util.stringio
49 stringio = util.stringio
50
50
51 # templates of common command options
51 # templates of common command options
52
52
53 dryrunopts = [
53 dryrunopts = [
54 ('n', 'dry-run', None,
54 ('n', 'dry-run', None,
55 _('do not perform actions, just print output')),
55 _('do not perform actions, just print output')),
56 ]
56 ]
57
57
58 remoteopts = [
58 remoteopts = [
59 ('e', 'ssh', '',
59 ('e', 'ssh', '',
60 _('specify ssh command to use'), _('CMD')),
60 _('specify ssh command to use'), _('CMD')),
61 ('', 'remotecmd', '',
61 ('', 'remotecmd', '',
62 _('specify hg command to run on the remote side'), _('CMD')),
62 _('specify hg command to run on the remote side'), _('CMD')),
63 ('', 'insecure', None,
63 ('', 'insecure', None,
64 _('do not verify server certificate (ignoring web.cacerts config)')),
64 _('do not verify server certificate (ignoring web.cacerts config)')),
65 ]
65 ]
66
66
67 walkopts = [
67 walkopts = [
68 ('I', 'include', [],
68 ('I', 'include', [],
69 _('include names matching the given patterns'), _('PATTERN')),
69 _('include names matching the given patterns'), _('PATTERN')),
70 ('X', 'exclude', [],
70 ('X', 'exclude', [],
71 _('exclude names matching the given patterns'), _('PATTERN')),
71 _('exclude names matching the given patterns'), _('PATTERN')),
72 ]
72 ]
73
73
74 commitopts = [
74 commitopts = [
75 ('m', 'message', '',
75 ('m', 'message', '',
76 _('use text as commit message'), _('TEXT')),
76 _('use text as commit message'), _('TEXT')),
77 ('l', 'logfile', '',
77 ('l', 'logfile', '',
78 _('read commit message from file'), _('FILE')),
78 _('read commit message from file'), _('FILE')),
79 ]
79 ]
80
80
81 commitopts2 = [
81 commitopts2 = [
82 ('d', 'date', '',
82 ('d', 'date', '',
83 _('record the specified date as commit date'), _('DATE')),
83 _('record the specified date as commit date'), _('DATE')),
84 ('u', 'user', '',
84 ('u', 'user', '',
85 _('record the specified user as committer'), _('USER')),
85 _('record the specified user as committer'), _('USER')),
86 ]
86 ]
87
87
88 # hidden for now
88 # hidden for now
89 formatteropts = [
89 formatteropts = [
90 ('T', 'template', '',
90 ('T', 'template', '',
91 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
91 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
92 ]
92 ]
93
93
94 templateopts = [
94 templateopts = [
95 ('', 'style', '',
95 ('', 'style', '',
96 _('display using template map file (DEPRECATED)'), _('STYLE')),
96 _('display using template map file (DEPRECATED)'), _('STYLE')),
97 ('T', 'template', '',
97 ('T', 'template', '',
98 _('display with template'), _('TEMPLATE')),
98 _('display with template'), _('TEMPLATE')),
99 ]
99 ]
100
100
101 logopts = [
101 logopts = [
102 ('p', 'patch', None, _('show patch')),
102 ('p', 'patch', None, _('show patch')),
103 ('g', 'git', None, _('use git extended diff format')),
103 ('g', 'git', None, _('use git extended diff format')),
104 ('l', 'limit', '',
104 ('l', 'limit', '',
105 _('limit number of changes displayed'), _('NUM')),
105 _('limit number of changes displayed'), _('NUM')),
106 ('M', 'no-merges', None, _('do not show merges')),
106 ('M', 'no-merges', None, _('do not show merges')),
107 ('', 'stat', None, _('output diffstat-style summary of changes')),
107 ('', 'stat', None, _('output diffstat-style summary of changes')),
108 ('G', 'graph', None, _("show the revision DAG")),
108 ('G', 'graph', None, _("show the revision DAG")),
109 ] + templateopts
109 ] + templateopts
110
110
111 diffopts = [
111 diffopts = [
112 ('a', 'text', None, _('treat all files as text')),
112 ('a', 'text', None, _('treat all files as text')),
113 ('g', 'git', None, _('use git extended diff format')),
113 ('g', 'git', None, _('use git extended diff format')),
114 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
114 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
115 ('', 'nodates', None, _('omit dates from diff headers'))
115 ('', 'nodates', None, _('omit dates from diff headers'))
116 ]
116 ]
117
117
118 diffwsopts = [
118 diffwsopts = [
119 ('w', 'ignore-all-space', None,
119 ('w', 'ignore-all-space', None,
120 _('ignore white space when comparing lines')),
120 _('ignore white space when comparing lines')),
121 ('b', 'ignore-space-change', None,
121 ('b', 'ignore-space-change', None,
122 _('ignore changes in the amount of white space')),
122 _('ignore changes in the amount of white space')),
123 ('B', 'ignore-blank-lines', None,
123 ('B', 'ignore-blank-lines', None,
124 _('ignore changes whose lines are all blank')),
124 _('ignore changes whose lines are all blank')),
125 ('Z', 'ignore-space-at-eol', None,
125 ('Z', 'ignore-space-at-eol', None,
126 _('ignore changes in whitespace at EOL')),
126 _('ignore changes in whitespace at EOL')),
127 ]
127 ]
128
128
129 diffopts2 = [
129 diffopts2 = [
130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
130 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
131 ('p', 'show-function', None, _('show which function each change is in')),
131 ('p', 'show-function', None, _('show which function each change is in')),
132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
132 ('', 'reverse', None, _('produce a diff that undoes the changes')),
133 ] + diffwsopts + [
133 ] + diffwsopts + [
134 ('U', 'unified', '',
134 ('U', 'unified', '',
135 _('number of lines of context to show'), _('NUM')),
135 _('number of lines of context to show'), _('NUM')),
136 ('', 'stat', None, _('output diffstat-style summary of changes')),
136 ('', 'stat', None, _('output diffstat-style summary of changes')),
137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
137 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
138 ]
138 ]
139
139
140 mergetoolopts = [
140 mergetoolopts = [
141 ('t', 'tool', '', _('specify merge tool')),
141 ('t', 'tool', '', _('specify merge tool')),
142 ]
142 ]
143
143
144 similarityopts = [
144 similarityopts = [
145 ('s', 'similarity', '',
145 ('s', 'similarity', '',
146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
146 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
147 ]
147 ]
148
148
149 subrepoopts = [
149 subrepoopts = [
150 ('S', 'subrepos', None,
150 ('S', 'subrepos', None,
151 _('recurse into subrepositories'))
151 _('recurse into subrepositories'))
152 ]
152 ]
153
153
154 debugrevlogopts = [
154 debugrevlogopts = [
155 ('c', 'changelog', False, _('open changelog')),
155 ('c', 'changelog', False, _('open changelog')),
156 ('m', 'manifest', False, _('open manifest')),
156 ('m', 'manifest', False, _('open manifest')),
157 ('', 'dir', '', _('open directory manifest')),
157 ('', 'dir', '', _('open directory manifest')),
158 ]
158 ]
159
159
160 # special string such that everything below this line will be ingored in the
160 # special string such that everything below this line will be ingored in the
161 # editor text
161 # editor text
162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
162 _linebelow = "^HG: ------------------------ >8 ------------------------$"
163
163
164 def ishunk(x):
164 def ishunk(x):
165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
165 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
166 return isinstance(x, hunkclasses)
166 return isinstance(x, hunkclasses)
167
167
168 def newandmodified(chunks, originalchunks):
168 def newandmodified(chunks, originalchunks):
169 newlyaddedandmodifiedfiles = set()
169 newlyaddedandmodifiedfiles = set()
170 for chunk in chunks:
170 for chunk in chunks:
171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
171 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
172 originalchunks:
172 originalchunks:
173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
173 newlyaddedandmodifiedfiles.add(chunk.header.filename())
174 return newlyaddedandmodifiedfiles
174 return newlyaddedandmodifiedfiles
175
175
176 def parsealiases(cmd):
176 def parsealiases(cmd):
177 return cmd.lstrip("^").split("|")
177 return cmd.lstrip("^").split("|")
178
178
179 def setupwrapcolorwrite(ui):
179 def setupwrapcolorwrite(ui):
180 # wrap ui.write so diff output can be labeled/colorized
180 # wrap ui.write so diff output can be labeled/colorized
181 def wrapwrite(orig, *args, **kw):
181 def wrapwrite(orig, *args, **kw):
182 label = kw.pop('label', '')
182 label = kw.pop('label', '')
183 for chunk, l in patch.difflabel(lambda: args):
183 for chunk, l in patch.difflabel(lambda: args):
184 orig(chunk, label=label + l)
184 orig(chunk, label=label + l)
185
185
186 oldwrite = ui.write
186 oldwrite = ui.write
187 def wrap(*args, **kwargs):
187 def wrap(*args, **kwargs):
188 return wrapwrite(oldwrite, *args, **kwargs)
188 return wrapwrite(oldwrite, *args, **kwargs)
189 setattr(ui, 'write', wrap)
189 setattr(ui, 'write', wrap)
190 return oldwrite
190 return oldwrite
191
191
192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
192 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
193 if usecurses:
193 if usecurses:
194 if testfile:
194 if testfile:
195 recordfn = crecordmod.testdecorator(testfile,
195 recordfn = crecordmod.testdecorator(testfile,
196 crecordmod.testchunkselector)
196 crecordmod.testchunkselector)
197 else:
197 else:
198 recordfn = crecordmod.chunkselector
198 recordfn = crecordmod.chunkselector
199
199
200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
200 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
201
201
202 else:
202 else:
203 return patch.filterpatch(ui, originalhunks, operation)
203 return patch.filterpatch(ui, originalhunks, operation)
204
204
205 def recordfilter(ui, originalhunks, operation=None):
205 def recordfilter(ui, originalhunks, operation=None):
206 """ Prompts the user to filter the originalhunks and return a list of
206 """ Prompts the user to filter the originalhunks and return a list of
207 selected hunks.
207 selected hunks.
208 *operation* is used for to build ui messages to indicate the user what
208 *operation* is used for to build ui messages to indicate the user what
209 kind of filtering they are doing: reverting, committing, shelving, etc.
209 kind of filtering they are doing: reverting, committing, shelving, etc.
210 (see patch.filterpatch).
210 (see patch.filterpatch).
211 """
211 """
212 usecurses = crecordmod.checkcurses(ui)
212 usecurses = crecordmod.checkcurses(ui)
213 testfile = ui.config('experimental', 'crecordtest')
213 testfile = ui.config('experimental', 'crecordtest')
214 oldwrite = setupwrapcolorwrite(ui)
214 oldwrite = setupwrapcolorwrite(ui)
215 try:
215 try:
216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
216 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
217 testfile, operation)
217 testfile, operation)
218 finally:
218 finally:
219 ui.write = oldwrite
219 ui.write = oldwrite
220 return newchunks, newopts
220 return newchunks, newopts
221
221
222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
222 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
223 filterfn, *pats, **opts):
223 filterfn, *pats, **opts):
224 from . import merge as mergemod
224 from . import merge as mergemod
225 opts = pycompat.byteskwargs(opts)
225 opts = pycompat.byteskwargs(opts)
226 if not ui.interactive():
226 if not ui.interactive():
227 if cmdsuggest:
227 if cmdsuggest:
228 msg = _('running non-interactively, use %s instead') % cmdsuggest
228 msg = _('running non-interactively, use %s instead') % cmdsuggest
229 else:
229 else:
230 msg = _('running non-interactively')
230 msg = _('running non-interactively')
231 raise error.Abort(msg)
231 raise error.Abort(msg)
232
232
233 # make sure username is set before going interactive
233 # make sure username is set before going interactive
234 if not opts.get('user'):
234 if not opts.get('user'):
235 ui.username() # raise exception, username not provided
235 ui.username() # raise exception, username not provided
236
236
237 def recordfunc(ui, repo, message, match, opts):
237 def recordfunc(ui, repo, message, match, opts):
238 """This is generic record driver.
238 """This is generic record driver.
239
239
240 Its job is to interactively filter local changes, and
240 Its job is to interactively filter local changes, and
241 accordingly prepare working directory into a state in which the
241 accordingly prepare working directory into a state in which the
242 job can be delegated to a non-interactive commit command such as
242 job can be delegated to a non-interactive commit command such as
243 'commit' or 'qrefresh'.
243 'commit' or 'qrefresh'.
244
244
245 After the actual job is done by non-interactive command, the
245 After the actual job is done by non-interactive command, the
246 working directory is restored to its original state.
246 working directory is restored to its original state.
247
247
248 In the end we'll record interesting changes, and everything else
248 In the end we'll record interesting changes, and everything else
249 will be left in place, so the user can continue working.
249 will be left in place, so the user can continue working.
250 """
250 """
251
251
252 checkunfinished(repo, commit=True)
252 checkunfinished(repo, commit=True)
253 wctx = repo[None]
253 wctx = repo[None]
254 merge = len(wctx.parents()) > 1
254 merge = len(wctx.parents()) > 1
255 if merge:
255 if merge:
256 raise error.Abort(_('cannot partially commit a merge '
256 raise error.Abort(_('cannot partially commit a merge '
257 '(use "hg commit" instead)'))
257 '(use "hg commit" instead)'))
258
258
259 def fail(f, msg):
259 def fail(f, msg):
260 raise error.Abort('%s: %s' % (f, msg))
260 raise error.Abort('%s: %s' % (f, msg))
261
261
262 force = opts.get('force')
262 force = opts.get('force')
263 if not force:
263 if not force:
264 vdirs = []
264 vdirs = []
265 match.explicitdir = vdirs.append
265 match.explicitdir = vdirs.append
266 match.bad = fail
266 match.bad = fail
267
267
268 status = repo.status(match=match)
268 status = repo.status(match=match)
269 if not force:
269 if not force:
270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
270 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
271 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
272 diffopts.nodates = True
272 diffopts.nodates = True
273 diffopts.git = True
273 diffopts.git = True
274 diffopts.showfunc = True
274 diffopts.showfunc = True
275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
275 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
276 originalchunks = patch.parsepatch(originaldiff)
276 originalchunks = patch.parsepatch(originaldiff)
277
277
278 # 1. filter patch, since we are intending to apply subset of it
278 # 1. filter patch, since we are intending to apply subset of it
279 try:
279 try:
280 chunks, newopts = filterfn(ui, originalchunks)
280 chunks, newopts = filterfn(ui, originalchunks)
281 except error.PatchError as err:
281 except error.PatchError as err:
282 raise error.Abort(_('error parsing patch: %s') % err)
282 raise error.Abort(_('error parsing patch: %s') % err)
283 opts.update(newopts)
283 opts.update(newopts)
284
284
285 # We need to keep a backup of files that have been newly added and
285 # We need to keep a backup of files that have been newly added and
286 # modified during the recording process because there is a previous
286 # modified during the recording process because there is a previous
287 # version without the edit in the workdir
287 # version without the edit in the workdir
288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
288 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
289 contenders = set()
289 contenders = set()
290 for h in chunks:
290 for h in chunks:
291 try:
291 try:
292 contenders.update(set(h.files()))
292 contenders.update(set(h.files()))
293 except AttributeError:
293 except AttributeError:
294 pass
294 pass
295
295
296 changed = status.modified + status.added + status.removed
296 changed = status.modified + status.added + status.removed
297 newfiles = [f for f in changed if f in contenders]
297 newfiles = [f for f in changed if f in contenders]
298 if not newfiles:
298 if not newfiles:
299 ui.status(_('no changes to record\n'))
299 ui.status(_('no changes to record\n'))
300 return 0
300 return 0
301
301
302 modified = set(status.modified)
302 modified = set(status.modified)
303
303
304 # 2. backup changed files, so we can restore them in the end
304 # 2. backup changed files, so we can restore them in the end
305
305
306 if backupall:
306 if backupall:
307 tobackup = changed
307 tobackup = changed
308 else:
308 else:
309 tobackup = [f for f in newfiles if f in modified or f in \
309 tobackup = [f for f in newfiles if f in modified or f in \
310 newlyaddedandmodifiedfiles]
310 newlyaddedandmodifiedfiles]
311 backups = {}
311 backups = {}
312 if tobackup:
312 if tobackup:
313 backupdir = repo.vfs.join('record-backups')
313 backupdir = repo.vfs.join('record-backups')
314 try:
314 try:
315 os.mkdir(backupdir)
315 os.mkdir(backupdir)
316 except OSError as err:
316 except OSError as err:
317 if err.errno != errno.EEXIST:
317 if err.errno != errno.EEXIST:
318 raise
318 raise
319 try:
319 try:
320 # backup continues
320 # backup continues
321 for f in tobackup:
321 for f in tobackup:
322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
322 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
323 dir=backupdir)
323 dir=backupdir)
324 os.close(fd)
324 os.close(fd)
325 ui.debug('backup %r as %r\n' % (f, tmpname))
325 ui.debug('backup %r as %r\n' % (f, tmpname))
326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
326 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
327 backups[f] = tmpname
327 backups[f] = tmpname
328
328
329 fp = stringio()
329 fp = stringio()
330 for c in chunks:
330 for c in chunks:
331 fname = c.filename()
331 fname = c.filename()
332 if fname in backups:
332 if fname in backups:
333 c.write(fp)
333 c.write(fp)
334 dopatch = fp.tell()
334 dopatch = fp.tell()
335 fp.seek(0)
335 fp.seek(0)
336
336
337 # 2.5 optionally review / modify patch in text editor
337 # 2.5 optionally review / modify patch in text editor
338 if opts.get('review', False):
338 if opts.get('review', False):
339 patchtext = (crecordmod.diffhelptext
339 patchtext = (crecordmod.diffhelptext
340 + crecordmod.patchhelptext
340 + crecordmod.patchhelptext
341 + fp.read())
341 + fp.read())
342 reviewedpatch = ui.edit(patchtext, "",
342 reviewedpatch = ui.edit(patchtext, "",
343 action="diff",
343 action="diff",
344 repopath=repo.path)
344 repopath=repo.path)
345 fp.truncate(0)
345 fp.truncate(0)
346 fp.write(reviewedpatch)
346 fp.write(reviewedpatch)
347 fp.seek(0)
347 fp.seek(0)
348
348
349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
349 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
350 # 3a. apply filtered patch to clean repo (clean)
350 # 3a. apply filtered patch to clean repo (clean)
351 if backups:
351 if backups:
352 # Equivalent to hg.revert
352 # Equivalent to hg.revert
353 m = scmutil.matchfiles(repo, backups.keys())
353 m = scmutil.matchfiles(repo, backups.keys())
354 mergemod.update(repo, repo.dirstate.p1(),
354 mergemod.update(repo, repo.dirstate.p1(),
355 False, True, matcher=m)
355 False, True, matcher=m)
356
356
357 # 3b. (apply)
357 # 3b. (apply)
358 if dopatch:
358 if dopatch:
359 try:
359 try:
360 ui.debug('applying patch\n')
360 ui.debug('applying patch\n')
361 ui.debug(fp.getvalue())
361 ui.debug(fp.getvalue())
362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
362 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
363 except error.PatchError as err:
363 except error.PatchError as err:
364 raise error.Abort(str(err))
364 raise error.Abort(str(err))
365 del fp
365 del fp
366
366
367 # 4. We prepared working directory according to filtered
367 # 4. We prepared working directory according to filtered
368 # patch. Now is the time to delegate the job to
368 # patch. Now is the time to delegate the job to
369 # commit/qrefresh or the like!
369 # commit/qrefresh or the like!
370
370
371 # Make all of the pathnames absolute.
371 # Make all of the pathnames absolute.
372 newfiles = [repo.wjoin(nf) for nf in newfiles]
372 newfiles = [repo.wjoin(nf) for nf in newfiles]
373 return commitfunc(ui, repo, *newfiles, **opts)
373 return commitfunc(ui, repo, *newfiles, **opts)
374 finally:
374 finally:
375 # 5. finally restore backed-up files
375 # 5. finally restore backed-up files
376 try:
376 try:
377 dirstate = repo.dirstate
377 dirstate = repo.dirstate
378 for realname, tmpname in backups.iteritems():
378 for realname, tmpname in backups.iteritems():
379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
379 ui.debug('restoring %r to %r\n' % (tmpname, realname))
380
380
381 if dirstate[realname] == 'n':
381 if dirstate[realname] == 'n':
382 # without normallookup, restoring timestamp
382 # without normallookup, restoring timestamp
383 # may cause partially committed files
383 # may cause partially committed files
384 # to be treated as unmodified
384 # to be treated as unmodified
385 dirstate.normallookup(realname)
385 dirstate.normallookup(realname)
386
386
387 # copystat=True here and above are a hack to trick any
387 # copystat=True here and above are a hack to trick any
388 # editors that have f open that we haven't modified them.
388 # editors that have f open that we haven't modified them.
389 #
389 #
390 # Also note that this racy as an editor could notice the
390 # Also note that this racy as an editor could notice the
391 # file's mtime before we've finished writing it.
391 # file's mtime before we've finished writing it.
392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
392 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
393 os.unlink(tmpname)
393 os.unlink(tmpname)
394 if tobackup:
394 if tobackup:
395 os.rmdir(backupdir)
395 os.rmdir(backupdir)
396 except OSError:
396 except OSError:
397 pass
397 pass
398
398
399 def recordinwlock(ui, repo, message, match, opts):
399 def recordinwlock(ui, repo, message, match, opts):
400 with repo.wlock():
400 with repo.wlock():
401 return recordfunc(ui, repo, message, match, opts)
401 return recordfunc(ui, repo, message, match, opts)
402
402
403 return commit(ui, repo, recordinwlock, pats, opts)
403 return commit(ui, repo, recordinwlock, pats, opts)
404
404
405
405
406 # extracted at module level as it's required each time a file will be added
406 # extracted at module level as it's required each time a file will be added
407 # to dirnode class object below
407 # to dirnode class object below
408 pathsep = pycompat.ossep
408 pathsep = pycompat.ossep
409
409
410 class dirnode(object):
410 class dirnode(object):
411 """
411 """
412 Represent a directory in user working copy with information required for
412 Represent a directory in user working copy with information required for
413 the purpose of tersing its status.
413 the purpose of tersing its status.
414
414
415 path is the path to the directory
415 path is the path to the directory
416
416
417 statuses is a set of statuses of all files in this directory (this includes
417 statuses is a set of statuses of all files in this directory (this includes
418 all the files in all the subdirectories too)
418 all the files in all the subdirectories too)
419
419
420 files is a list of files which are direct child of this directory
420 files is a list of files which are direct child of this directory
421
421
422 subdirs is a dictionary of sub-directory name as the key and it's own
422 subdirs is a dictionary of sub-directory name as the key and it's own
423 dirnode object as the value
423 dirnode object as the value
424 """
424 """
425
425
426 def __init__(self, dirpath):
426 def __init__(self, dirpath):
427 self.path = dirpath
427 self.path = dirpath
428 self.statuses = set([])
428 self.statuses = set([])
429 self.files = []
429 self.files = []
430 self.subdirs = {}
430 self.subdirs = {}
431
431
432 def _addfileindir(self, filename, status):
432 def _addfileindir(self, filename, status):
433 """Add a file in this directory as a direct child."""
433 """Add a file in this directory as a direct child."""
434 self.files.append((filename, status))
434 self.files.append((filename, status))
435
435
436 def addfile(self, filename, status):
436 def addfile(self, filename, status):
437 """
437 """
438 Add a file to this directory or to its direct parent directory.
438 Add a file to this directory or to its direct parent directory.
439
439
440 If the file is not direct child of this directory, we traverse to the
440 If the file is not direct child of this directory, we traverse to the
441 directory of which this file is a direct child of and add the file
441 directory of which this file is a direct child of and add the file
442 there.
442 there.
443 """
443 """
444
444
445 # the filename contains a path separator, it means it's not the direct
445 # the filename contains a path separator, it means it's not the direct
446 # child of this directory
446 # child of this directory
447 if pathsep in filename:
447 if pathsep in filename:
448 subdir, filep = filename.split(pathsep, 1)
448 subdir, filep = filename.split(pathsep, 1)
449
449
450 # does the dirnode object for subdir exists
450 # does the dirnode object for subdir exists
451 if subdir not in self.subdirs:
451 if subdir not in self.subdirs:
452 subdirpath = os.path.join(self.path, subdir)
452 subdirpath = os.path.join(self.path, subdir)
453 self.subdirs[subdir] = dirnode(subdirpath)
453 self.subdirs[subdir] = dirnode(subdirpath)
454
454
455 # try adding the file in subdir
455 # try adding the file in subdir
456 self.subdirs[subdir].addfile(filep, status)
456 self.subdirs[subdir].addfile(filep, status)
457
457
458 else:
458 else:
459 self._addfileindir(filename, status)
459 self._addfileindir(filename, status)
460
460
461 if status not in self.statuses:
461 if status not in self.statuses:
462 self.statuses.add(status)
462 self.statuses.add(status)
463
463
464 def iterfilepaths(self):
464 def iterfilepaths(self):
465 """Yield (status, path) for files directly under this directory."""
465 """Yield (status, path) for files directly under this directory."""
466 for f, st in self.files:
466 for f, st in self.files:
467 yield st, os.path.join(self.path, f)
467 yield st, os.path.join(self.path, f)
468
468
469 def tersewalk(self, terseargs):
469 def tersewalk(self, terseargs):
470 """
470 """
471 Yield (status, path) obtained by processing the status of this
471 Yield (status, path) obtained by processing the status of this
472 dirnode.
472 dirnode.
473
473
474 terseargs is the string of arguments passed by the user with `--terse`
474 terseargs is the string of arguments passed by the user with `--terse`
475 flag.
475 flag.
476
476
477 Following are the cases which can happen:
477 Following are the cases which can happen:
478
478
479 1) All the files in the directory (including all the files in its
479 1) All the files in the directory (including all the files in its
480 subdirectories) share the same status and the user has asked us to terse
480 subdirectories) share the same status and the user has asked us to terse
481 that status. -> yield (status, dirpath)
481 that status. -> yield (status, dirpath)
482
482
483 2) Otherwise, we do following:
483 2) Otherwise, we do following:
484
484
485 a) Yield (status, filepath) for all the files which are in this
485 a) Yield (status, filepath) for all the files which are in this
486 directory (only the ones in this directory, not the subdirs)
486 directory (only the ones in this directory, not the subdirs)
487
487
488 b) Recurse the function on all the subdirectories of this
488 b) Recurse the function on all the subdirectories of this
489 directory
489 directory
490 """
490 """
491
491
492 if len(self.statuses) == 1:
492 if len(self.statuses) == 1:
493 onlyst = self.statuses.pop()
493 onlyst = self.statuses.pop()
494
494
495 # Making sure we terse only when the status abbreviation is
495 # Making sure we terse only when the status abbreviation is
496 # passed as terse argument
496 # passed as terse argument
497 if onlyst in terseargs:
497 if onlyst in terseargs:
498 yield onlyst, self.path + pycompat.ossep
498 yield onlyst, self.path + pycompat.ossep
499 return
499 return
500
500
501 # add the files to status list
501 # add the files to status list
502 for st, fpath in self.iterfilepaths():
502 for st, fpath in self.iterfilepaths():
503 yield st, fpath
503 yield st, fpath
504
504
505 #recurse on the subdirs
505 #recurse on the subdirs
506 for dirobj in self.subdirs.values():
506 for dirobj in self.subdirs.values():
507 for st, fpath in dirobj.tersewalk(terseargs):
507 for st, fpath in dirobj.tersewalk(terseargs):
508 yield st, fpath
508 yield st, fpath
509
509
510 def tersedir(statuslist, terseargs):
510 def tersedir(statuslist, terseargs):
511 """
511 """
512 Terse the status if all the files in a directory shares the same status.
512 Terse the status if all the files in a directory shares the same status.
513
513
514 statuslist is scmutil.status() object which contains a list of files for
514 statuslist is scmutil.status() object which contains a list of files for
515 each status.
515 each status.
516 terseargs is string which is passed by the user as the argument to `--terse`
516 terseargs is string which is passed by the user as the argument to `--terse`
517 flag.
517 flag.
518
518
519 The function makes a tree of objects of dirnode class, and at each node it
519 The function makes a tree of objects of dirnode class, and at each node it
520 stores the information required to know whether we can terse a certain
520 stores the information required to know whether we can terse a certain
521 directory or not.
521 directory or not.
522 """
522 """
523 # the order matters here as that is used to produce final list
523 # the order matters here as that is used to produce final list
524 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
524 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
525
525
526 # checking the argument validity
526 # checking the argument validity
527 for s in terseargs:
527 for s in terseargs:
528 if s not in allst:
528 if s not in allst:
529 raise error.Abort(_("'%s' not recognized") % s)
529 raise error.Abort(_("'%s' not recognized") % s)
530
530
531 # creating a dirnode object for the root of the repo
531 # creating a dirnode object for the root of the repo
532 rootobj = dirnode('')
532 rootobj = dirnode('')
533 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
533 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
534 'ignored', 'removed')
534 'ignored', 'removed')
535
535
536 tersedict = {}
536 tersedict = {}
537 for attrname in pstatus:
537 for attrname in pstatus:
538 for f in getattr(statuslist, attrname):
538 for f in getattr(statuslist, attrname):
539 rootobj.addfile(f, attrname[0])
539 rootobj.addfile(f, attrname[0])
540 tersedict[attrname[0]] = []
540 tersedict[attrname[0]] = []
541
541
542 # we won't be tersing the root dir, so add files in it
542 # we won't be tersing the root dir, so add files in it
543 for st, fpath in rootobj.iterfilepaths():
543 for st, fpath in rootobj.iterfilepaths():
544 tersedict[st].append(fpath)
544 tersedict[st].append(fpath)
545
545
546 # process each sub-directory and build tersedict
546 # process each sub-directory and build tersedict
547 for subdir in rootobj.subdirs.values():
547 for subdir in rootobj.subdirs.values():
548 for st, f in subdir.tersewalk(terseargs):
548 for st, f in subdir.tersewalk(terseargs):
549 tersedict[st].append(f)
549 tersedict[st].append(f)
550
550
551 tersedlist = []
551 tersedlist = []
552 for st in allst:
552 for st in allst:
553 tersedict[st].sort()
553 tersedict[st].sort()
554 tersedlist.append(tersedict[st])
554 tersedlist.append(tersedict[st])
555
555
556 return tersedlist
556 return tersedlist
557
557
558 def _commentlines(raw):
558 def _commentlines(raw):
559 '''Surround lineswith a comment char and a new line'''
559 '''Surround lineswith a comment char and a new line'''
560 lines = raw.splitlines()
560 lines = raw.splitlines()
561 commentedlines = ['# %s' % line for line in lines]
561 commentedlines = ['# %s' % line for line in lines]
562 return '\n'.join(commentedlines) + '\n'
562 return '\n'.join(commentedlines) + '\n'
563
563
564 def _conflictsmsg(repo):
564 def _conflictsmsg(repo):
565 # avoid merge cycle
565 # avoid merge cycle
566 from . import merge as mergemod
566 from . import merge as mergemod
567 mergestate = mergemod.mergestate.read(repo)
567 mergestate = mergemod.mergestate.read(repo)
568 if not mergestate.active():
568 if not mergestate.active():
569 return
569 return
570
570
571 m = scmutil.match(repo[None])
571 m = scmutil.match(repo[None])
572 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
572 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
573 if unresolvedlist:
573 if unresolvedlist:
574 mergeliststr = '\n'.join(
574 mergeliststr = '\n'.join(
575 [' %s' % os.path.relpath(
575 [' %s' % os.path.relpath(
576 os.path.join(repo.root, path),
576 os.path.join(repo.root, path),
577 pycompat.getcwd()) for path in unresolvedlist])
577 pycompat.getcwd()) for path in unresolvedlist])
578 msg = _('''Unresolved merge conflicts:
578 msg = _('''Unresolved merge conflicts:
579
579
580 %s
580 %s
581
581
582 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
582 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
583 else:
583 else:
584 msg = _('No unresolved merge conflicts.')
584 msg = _('No unresolved merge conflicts.')
585
585
586 return _commentlines(msg)
586 return _commentlines(msg)
587
587
588 def _helpmessage(continuecmd, abortcmd):
588 def _helpmessage(continuecmd, abortcmd):
589 msg = _('To continue: %s\n'
589 msg = _('To continue: %s\n'
590 'To abort: %s') % (continuecmd, abortcmd)
590 'To abort: %s') % (continuecmd, abortcmd)
591 return _commentlines(msg)
591 return _commentlines(msg)
592
592
593 def _rebasemsg():
593 def _rebasemsg():
594 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
594 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
595
595
596 def _histeditmsg():
596 def _histeditmsg():
597 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
597 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
598
598
599 def _unshelvemsg():
599 def _unshelvemsg():
600 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
600 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
601
601
602 def _updatecleanmsg(dest=None):
602 def _updatecleanmsg(dest=None):
603 warning = _('warning: this will discard uncommitted changes')
603 warning = _('warning: this will discard uncommitted changes')
604 return 'hg update --clean %s (%s)' % (dest or '.', warning)
604 return 'hg update --clean %s (%s)' % (dest or '.', warning)
605
605
606 def _graftmsg():
606 def _graftmsg():
607 # tweakdefaults requires `update` to have a rev hence the `.`
607 # tweakdefaults requires `update` to have a rev hence the `.`
608 return _helpmessage('hg graft --continue', _updatecleanmsg())
608 return _helpmessage('hg graft --continue', _updatecleanmsg())
609
609
610 def _mergemsg():
610 def _mergemsg():
611 # tweakdefaults requires `update` to have a rev hence the `.`
611 # tweakdefaults requires `update` to have a rev hence the `.`
612 return _helpmessage('hg commit', _updatecleanmsg())
612 return _helpmessage('hg commit', _updatecleanmsg())
613
613
614 def _bisectmsg():
614 def _bisectmsg():
615 msg = _('To mark the changeset good: hg bisect --good\n'
615 msg = _('To mark the changeset good: hg bisect --good\n'
616 'To mark the changeset bad: hg bisect --bad\n'
616 'To mark the changeset bad: hg bisect --bad\n'
617 'To abort: hg bisect --reset\n')
617 'To abort: hg bisect --reset\n')
618 return _commentlines(msg)
618 return _commentlines(msg)
619
619
620 def fileexistspredicate(filename):
620 def fileexistspredicate(filename):
621 return lambda repo: repo.vfs.exists(filename)
621 return lambda repo: repo.vfs.exists(filename)
622
622
623 def _mergepredicate(repo):
623 def _mergepredicate(repo):
624 return len(repo[None].parents()) > 1
624 return len(repo[None].parents()) > 1
625
625
626 STATES = (
626 STATES = (
627 # (state, predicate to detect states, helpful message function)
627 # (state, predicate to detect states, helpful message function)
628 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
628 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
629 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
629 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
630 ('graft', fileexistspredicate('graftstate'), _graftmsg),
630 ('graft', fileexistspredicate('graftstate'), _graftmsg),
631 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
631 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
632 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
632 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
633 # The merge state is part of a list that will be iterated over.
633 # The merge state is part of a list that will be iterated over.
634 # They need to be last because some of the other unfinished states may also
634 # They need to be last because some of the other unfinished states may also
635 # be in a merge or update state (eg. rebase, histedit, graft, etc).
635 # be in a merge or update state (eg. rebase, histedit, graft, etc).
636 # We want those to have priority.
636 # We want those to have priority.
637 ('merge', _mergepredicate, _mergemsg),
637 ('merge', _mergepredicate, _mergemsg),
638 )
638 )
639
639
640 def _getrepostate(repo):
640 def _getrepostate(repo):
641 # experimental config: commands.status.skipstates
641 # experimental config: commands.status.skipstates
642 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
642 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
643 for state, statedetectionpredicate, msgfn in STATES:
643 for state, statedetectionpredicate, msgfn in STATES:
644 if state in skip:
644 if state in skip:
645 continue
645 continue
646 if statedetectionpredicate(repo):
646 if statedetectionpredicate(repo):
647 return (state, statedetectionpredicate, msgfn)
647 return (state, statedetectionpredicate, msgfn)
648
648
649 def morestatus(repo, fm):
649 def morestatus(repo, fm):
650 statetuple = _getrepostate(repo)
650 statetuple = _getrepostate(repo)
651 label = 'status.morestatus'
651 label = 'status.morestatus'
652 if statetuple:
652 if statetuple:
653 fm.startitem()
653 fm.startitem()
654 state, statedetectionpredicate, helpfulmsg = statetuple
654 state, statedetectionpredicate, helpfulmsg = statetuple
655 statemsg = _('The repository is in an unfinished *%s* state.') % state
655 statemsg = _('The repository is in an unfinished *%s* state.') % state
656 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
656 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
657 conmsg = _conflictsmsg(repo)
657 conmsg = _conflictsmsg(repo)
658 if conmsg:
658 if conmsg:
659 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
659 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
660 if helpfulmsg:
660 if helpfulmsg:
661 helpmsg = helpfulmsg()
661 helpmsg = helpfulmsg()
662 fm.write('helpmsg', '%s\n', helpmsg, label=label)
662 fm.write('helpmsg', '%s\n', helpmsg, label=label)
663
663
664 def findpossible(cmd, table, strict=False):
664 def findpossible(cmd, table, strict=False):
665 """
665 """
666 Return cmd -> (aliases, command table entry)
666 Return cmd -> (aliases, command table entry)
667 for each matching command.
667 for each matching command.
668 Return debug commands (or their aliases) only if no normal command matches.
668 Return debug commands (or their aliases) only if no normal command matches.
669 """
669 """
670 choice = {}
670 choice = {}
671 debugchoice = {}
671 debugchoice = {}
672
672
673 if cmd in table:
673 if cmd in table:
674 # short-circuit exact matches, "log" alias beats "^log|history"
674 # short-circuit exact matches, "log" alias beats "^log|history"
675 keys = [cmd]
675 keys = [cmd]
676 else:
676 else:
677 keys = table.keys()
677 keys = table.keys()
678
678
679 allcmds = []
679 allcmds = []
680 for e in keys:
680 for e in keys:
681 aliases = parsealiases(e)
681 aliases = parsealiases(e)
682 allcmds.extend(aliases)
682 allcmds.extend(aliases)
683 found = None
683 found = None
684 if cmd in aliases:
684 if cmd in aliases:
685 found = cmd
685 found = cmd
686 elif not strict:
686 elif not strict:
687 for a in aliases:
687 for a in aliases:
688 if a.startswith(cmd):
688 if a.startswith(cmd):
689 found = a
689 found = a
690 break
690 break
691 if found is not None:
691 if found is not None:
692 if aliases[0].startswith("debug") or found.startswith("debug"):
692 if aliases[0].startswith("debug") or found.startswith("debug"):
693 debugchoice[found] = (aliases, table[e])
693 debugchoice[found] = (aliases, table[e])
694 else:
694 else:
695 choice[found] = (aliases, table[e])
695 choice[found] = (aliases, table[e])
696
696
697 if not choice and debugchoice:
697 if not choice and debugchoice:
698 choice = debugchoice
698 choice = debugchoice
699
699
700 return choice, allcmds
700 return choice, allcmds
701
701
702 def findcmd(cmd, table, strict=True):
702 def findcmd(cmd, table, strict=True):
703 """Return (aliases, command table entry) for command string."""
703 """Return (aliases, command table entry) for command string."""
704 choice, allcmds = findpossible(cmd, table, strict)
704 choice, allcmds = findpossible(cmd, table, strict)
705
705
706 if cmd in choice:
706 if cmd in choice:
707 return choice[cmd]
707 return choice[cmd]
708
708
709 if len(choice) > 1:
709 if len(choice) > 1:
710 clist = sorted(choice)
710 clist = sorted(choice)
711 raise error.AmbiguousCommand(cmd, clist)
711 raise error.AmbiguousCommand(cmd, clist)
712
712
713 if choice:
713 if choice:
714 return list(choice.values())[0]
714 return list(choice.values())[0]
715
715
716 raise error.UnknownCommand(cmd, allcmds)
716 raise error.UnknownCommand(cmd, allcmds)
717
717
718 def findrepo(p):
718 def findrepo(p):
719 while not os.path.isdir(os.path.join(p, ".hg")):
719 while not os.path.isdir(os.path.join(p, ".hg")):
720 oldp, p = p, os.path.dirname(p)
720 oldp, p = p, os.path.dirname(p)
721 if p == oldp:
721 if p == oldp:
722 return None
722 return None
723
723
724 return p
724 return p
725
725
726 def bailifchanged(repo, merge=True, hint=None):
726 def bailifchanged(repo, merge=True, hint=None):
727 """ enforce the precondition that working directory must be clean.
727 """ enforce the precondition that working directory must be clean.
728
728
729 'merge' can be set to false if a pending uncommitted merge should be
729 'merge' can be set to false if a pending uncommitted merge should be
730 ignored (such as when 'update --check' runs).
730 ignored (such as when 'update --check' runs).
731
731
732 'hint' is the usual hint given to Abort exception.
732 'hint' is the usual hint given to Abort exception.
733 """
733 """
734
734
735 if merge and repo.dirstate.p2() != nullid:
735 if merge and repo.dirstate.p2() != nullid:
736 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
736 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
737 modified, added, removed, deleted = repo.status()[:4]
737 modified, added, removed, deleted = repo.status()[:4]
738 if modified or added or removed or deleted:
738 if modified or added or removed or deleted:
739 raise error.Abort(_('uncommitted changes'), hint=hint)
739 raise error.Abort(_('uncommitted changes'), hint=hint)
740 ctx = repo[None]
740 ctx = repo[None]
741 for s in sorted(ctx.substate):
741 for s in sorted(ctx.substate):
742 ctx.sub(s).bailifchanged(hint=hint)
742 ctx.sub(s).bailifchanged(hint=hint)
743
743
744 def logmessage(ui, opts):
744 def logmessage(ui, opts):
745 """ get the log message according to -m and -l option """
745 """ get the log message according to -m and -l option """
746 message = opts.get('message')
746 message = opts.get('message')
747 logfile = opts.get('logfile')
747 logfile = opts.get('logfile')
748
748
749 if message and logfile:
749 if message and logfile:
750 raise error.Abort(_('options --message and --logfile are mutually '
750 raise error.Abort(_('options --message and --logfile are mutually '
751 'exclusive'))
751 'exclusive'))
752 if not message and logfile:
752 if not message and logfile:
753 try:
753 try:
754 if isstdiofilename(logfile):
754 if isstdiofilename(logfile):
755 message = ui.fin.read()
755 message = ui.fin.read()
756 else:
756 else:
757 message = '\n'.join(util.readfile(logfile).splitlines())
757 message = '\n'.join(util.readfile(logfile).splitlines())
758 except IOError as inst:
758 except IOError as inst:
759 raise error.Abort(_("can't read commit message '%s': %s") %
759 raise error.Abort(_("can't read commit message '%s': %s") %
760 (logfile, encoding.strtolocal(inst.strerror)))
760 (logfile, encoding.strtolocal(inst.strerror)))
761 return message
761 return message
762
762
763 def mergeeditform(ctxorbool, baseformname):
763 def mergeeditform(ctxorbool, baseformname):
764 """return appropriate editform name (referencing a committemplate)
764 """return appropriate editform name (referencing a committemplate)
765
765
766 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
766 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
767 merging is committed.
767 merging is committed.
768
768
769 This returns baseformname with '.merge' appended if it is a merge,
769 This returns baseformname with '.merge' appended if it is a merge,
770 otherwise '.normal' is appended.
770 otherwise '.normal' is appended.
771 """
771 """
772 if isinstance(ctxorbool, bool):
772 if isinstance(ctxorbool, bool):
773 if ctxorbool:
773 if ctxorbool:
774 return baseformname + ".merge"
774 return baseformname + ".merge"
775 elif 1 < len(ctxorbool.parents()):
775 elif 1 < len(ctxorbool.parents()):
776 return baseformname + ".merge"
776 return baseformname + ".merge"
777
777
778 return baseformname + ".normal"
778 return baseformname + ".normal"
779
779
780 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
780 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
781 editform='', **opts):
781 editform='', **opts):
782 """get appropriate commit message editor according to '--edit' option
782 """get appropriate commit message editor according to '--edit' option
783
783
784 'finishdesc' is a function to be called with edited commit message
784 'finishdesc' is a function to be called with edited commit message
785 (= 'description' of the new changeset) just after editing, but
785 (= 'description' of the new changeset) just after editing, but
786 before checking empty-ness. It should return actual text to be
786 before checking empty-ness. It should return actual text to be
787 stored into history. This allows to change description before
787 stored into history. This allows to change description before
788 storing.
788 storing.
789
789
790 'extramsg' is a extra message to be shown in the editor instead of
790 'extramsg' is a extra message to be shown in the editor instead of
791 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
791 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
792 is automatically added.
792 is automatically added.
793
793
794 'editform' is a dot-separated list of names, to distinguish
794 'editform' is a dot-separated list of names, to distinguish
795 the purpose of commit text editing.
795 the purpose of commit text editing.
796
796
797 'getcommiteditor' returns 'commitforceeditor' regardless of
797 'getcommiteditor' returns 'commitforceeditor' regardless of
798 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
798 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
799 they are specific for usage in MQ.
799 they are specific for usage in MQ.
800 """
800 """
801 if edit or finishdesc or extramsg:
801 if edit or finishdesc or extramsg:
802 return lambda r, c, s: commitforceeditor(r, c, s,
802 return lambda r, c, s: commitforceeditor(r, c, s,
803 finishdesc=finishdesc,
803 finishdesc=finishdesc,
804 extramsg=extramsg,
804 extramsg=extramsg,
805 editform=editform)
805 editform=editform)
806 elif editform:
806 elif editform:
807 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
807 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
808 else:
808 else:
809 return commiteditor
809 return commiteditor
810
810
811 def loglimit(opts):
811 def loglimit(opts):
812 """get the log limit according to option -l/--limit"""
812 """get the log limit according to option -l/--limit"""
813 limit = opts.get('limit')
813 limit = opts.get('limit')
814 if limit:
814 if limit:
815 try:
815 try:
816 limit = int(limit)
816 limit = int(limit)
817 except ValueError:
817 except ValueError:
818 raise error.Abort(_('limit must be a positive integer'))
818 raise error.Abort(_('limit must be a positive integer'))
819 if limit <= 0:
819 if limit <= 0:
820 raise error.Abort(_('limit must be positive'))
820 raise error.Abort(_('limit must be positive'))
821 else:
821 else:
822 limit = None
822 limit = None
823 return limit
823 return limit
824
824
825 def makefilename(repo, pat, node, desc=None,
825 def makefilename(repo, pat, node, desc=None,
826 total=None, seqno=None, revwidth=None, pathname=None):
826 total=None, seqno=None, revwidth=None, pathname=None):
827 node_expander = {
827 node_expander = {
828 'H': lambda: hex(node),
828 'H': lambda: hex(node),
829 'R': lambda: str(repo.changelog.rev(node)),
829 'R': lambda: str(repo.changelog.rev(node)),
830 'h': lambda: short(node),
830 'h': lambda: short(node),
831 'm': lambda: re.sub('[^\w]', '_', str(desc))
831 'm': lambda: re.sub('[^\w]', '_', str(desc))
832 }
832 }
833 expander = {
833 expander = {
834 '%': lambda: '%',
834 '%': lambda: '%',
835 'b': lambda: os.path.basename(repo.root),
835 'b': lambda: os.path.basename(repo.root),
836 }
836 }
837
837
838 try:
838 try:
839 if node:
839 if node:
840 expander.update(node_expander)
840 expander.update(node_expander)
841 if node:
841 if node:
842 expander['r'] = (lambda:
842 expander['r'] = (lambda:
843 str(repo.changelog.rev(node)).zfill(revwidth or 0))
843 str(repo.changelog.rev(node)).zfill(revwidth or 0))
844 if total is not None:
844 if total is not None:
845 expander['N'] = lambda: str(total)
845 expander['N'] = lambda: str(total)
846 if seqno is not None:
846 if seqno is not None:
847 expander['n'] = lambda: str(seqno)
847 expander['n'] = lambda: str(seqno)
848 if total is not None and seqno is not None:
848 if total is not None and seqno is not None:
849 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
849 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
850 if pathname is not None:
850 if pathname is not None:
851 expander['s'] = lambda: os.path.basename(pathname)
851 expander['s'] = lambda: os.path.basename(pathname)
852 expander['d'] = lambda: os.path.dirname(pathname) or '.'
852 expander['d'] = lambda: os.path.dirname(pathname) or '.'
853 expander['p'] = lambda: pathname
853 expander['p'] = lambda: pathname
854
854
855 newname = []
855 newname = []
856 patlen = len(pat)
856 patlen = len(pat)
857 i = 0
857 i = 0
858 while i < patlen:
858 while i < patlen:
859 c = pat[i:i + 1]
859 c = pat[i:i + 1]
860 if c == '%':
860 if c == '%':
861 i += 1
861 i += 1
862 c = pat[i:i + 1]
862 c = pat[i:i + 1]
863 c = expander[c]()
863 c = expander[c]()
864 newname.append(c)
864 newname.append(c)
865 i += 1
865 i += 1
866 return ''.join(newname)
866 return ''.join(newname)
867 except KeyError as inst:
867 except KeyError as inst:
868 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
868 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
869 inst.args[0])
869 inst.args[0])
870
870
871 def isstdiofilename(pat):
871 def isstdiofilename(pat):
872 """True if the given pat looks like a filename denoting stdin/stdout"""
872 """True if the given pat looks like a filename denoting stdin/stdout"""
873 return not pat or pat == '-'
873 return not pat or pat == '-'
874
874
875 class _unclosablefile(object):
875 class _unclosablefile(object):
876 def __init__(self, fp):
876 def __init__(self, fp):
877 self._fp = fp
877 self._fp = fp
878
878
879 def close(self):
879 def close(self):
880 pass
880 pass
881
881
882 def __iter__(self):
882 def __iter__(self):
883 return iter(self._fp)
883 return iter(self._fp)
884
884
885 def __getattr__(self, attr):
885 def __getattr__(self, attr):
886 return getattr(self._fp, attr)
886 return getattr(self._fp, attr)
887
887
888 def __enter__(self):
888 def __enter__(self):
889 return self
889 return self
890
890
891 def __exit__(self, exc_type, exc_value, exc_tb):
891 def __exit__(self, exc_type, exc_value, exc_tb):
892 pass
892 pass
893
893
894 def makefileobj(repo, pat, node=None, desc=None, total=None,
894 def makefileobj(repo, pat, node=None, desc=None, total=None,
895 seqno=None, revwidth=None, mode='wb', modemap=None,
895 seqno=None, revwidth=None, mode='wb', modemap=None,
896 pathname=None):
896 pathname=None):
897
897
898 writable = mode not in ('r', 'rb')
898 writable = mode not in ('r', 'rb')
899
899
900 if isstdiofilename(pat):
900 if isstdiofilename(pat):
901 if writable:
901 if writable:
902 fp = repo.ui.fout
902 fp = repo.ui.fout
903 else:
903 else:
904 fp = repo.ui.fin
904 fp = repo.ui.fin
905 return _unclosablefile(fp)
905 return _unclosablefile(fp)
906 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
906 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
907 if modemap is not None:
907 if modemap is not None:
908 mode = modemap.get(fn, mode)
908 mode = modemap.get(fn, mode)
909 if mode == 'wb':
909 if mode == 'wb':
910 modemap[fn] = 'ab'
910 modemap[fn] = 'ab'
911 return open(fn, mode)
911 return open(fn, mode)
912
912
913 def openrevlog(repo, cmd, file_, opts):
913 def openrevlog(repo, cmd, file_, opts):
914 """opens the changelog, manifest, a filelog or a given revlog"""
914 """opens the changelog, manifest, a filelog or a given revlog"""
915 cl = opts['changelog']
915 cl = opts['changelog']
916 mf = opts['manifest']
916 mf = opts['manifest']
917 dir = opts['dir']
917 dir = opts['dir']
918 msg = None
918 msg = None
919 if cl and mf:
919 if cl and mf:
920 msg = _('cannot specify --changelog and --manifest at the same time')
920 msg = _('cannot specify --changelog and --manifest at the same time')
921 elif cl and dir:
921 elif cl and dir:
922 msg = _('cannot specify --changelog and --dir at the same time')
922 msg = _('cannot specify --changelog and --dir at the same time')
923 elif cl or mf or dir:
923 elif cl or mf or dir:
924 if file_:
924 if file_:
925 msg = _('cannot specify filename with --changelog or --manifest')
925 msg = _('cannot specify filename with --changelog or --manifest')
926 elif not repo:
926 elif not repo:
927 msg = _('cannot specify --changelog or --manifest or --dir '
927 msg = _('cannot specify --changelog or --manifest or --dir '
928 'without a repository')
928 'without a repository')
929 if msg:
929 if msg:
930 raise error.Abort(msg)
930 raise error.Abort(msg)
931
931
932 r = None
932 r = None
933 if repo:
933 if repo:
934 if cl:
934 if cl:
935 r = repo.unfiltered().changelog
935 r = repo.unfiltered().changelog
936 elif dir:
936 elif dir:
937 if 'treemanifest' not in repo.requirements:
937 if 'treemanifest' not in repo.requirements:
938 raise error.Abort(_("--dir can only be used on repos with "
938 raise error.Abort(_("--dir can only be used on repos with "
939 "treemanifest enabled"))
939 "treemanifest enabled"))
940 dirlog = repo.manifestlog._revlog.dirlog(dir)
940 dirlog = repo.manifestlog._revlog.dirlog(dir)
941 if len(dirlog):
941 if len(dirlog):
942 r = dirlog
942 r = dirlog
943 elif mf:
943 elif mf:
944 r = repo.manifestlog._revlog
944 r = repo.manifestlog._revlog
945 elif file_:
945 elif file_:
946 filelog = repo.file(file_)
946 filelog = repo.file(file_)
947 if len(filelog):
947 if len(filelog):
948 r = filelog
948 r = filelog
949 if not r:
949 if not r:
950 if not file_:
950 if not file_:
951 raise error.CommandError(cmd, _('invalid arguments'))
951 raise error.CommandError(cmd, _('invalid arguments'))
952 if not os.path.isfile(file_):
952 if not os.path.isfile(file_):
953 raise error.Abort(_("revlog '%s' not found") % file_)
953 raise error.Abort(_("revlog '%s' not found") % file_)
954 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
954 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
955 file_[:-2] + ".i")
955 file_[:-2] + ".i")
956 return r
956 return r
957
957
958 def copy(ui, repo, pats, opts, rename=False):
958 def copy(ui, repo, pats, opts, rename=False):
959 # called with the repo lock held
959 # called with the repo lock held
960 #
960 #
961 # hgsep => pathname that uses "/" to separate directories
961 # hgsep => pathname that uses "/" to separate directories
962 # ossep => pathname that uses os.sep to separate directories
962 # ossep => pathname that uses os.sep to separate directories
963 cwd = repo.getcwd()
963 cwd = repo.getcwd()
964 targets = {}
964 targets = {}
965 after = opts.get("after")
965 after = opts.get("after")
966 dryrun = opts.get("dry_run")
966 dryrun = opts.get("dry_run")
967 wctx = repo[None]
967 wctx = repo[None]
968
968
969 def walkpat(pat):
969 def walkpat(pat):
970 srcs = []
970 srcs = []
971 if after:
971 if after:
972 badstates = '?'
972 badstates = '?'
973 else:
973 else:
974 badstates = '?r'
974 badstates = '?r'
975 m = scmutil.match(wctx, [pat], opts, globbed=True)
975 m = scmutil.match(wctx, [pat], opts, globbed=True)
976 for abs in wctx.walk(m):
976 for abs in wctx.walk(m):
977 state = repo.dirstate[abs]
977 state = repo.dirstate[abs]
978 rel = m.rel(abs)
978 rel = m.rel(abs)
979 exact = m.exact(abs)
979 exact = m.exact(abs)
980 if state in badstates:
980 if state in badstates:
981 if exact and state == '?':
981 if exact and state == '?':
982 ui.warn(_('%s: not copying - file is not managed\n') % rel)
982 ui.warn(_('%s: not copying - file is not managed\n') % rel)
983 if exact and state == 'r':
983 if exact and state == 'r':
984 ui.warn(_('%s: not copying - file has been marked for'
984 ui.warn(_('%s: not copying - file has been marked for'
985 ' remove\n') % rel)
985 ' remove\n') % rel)
986 continue
986 continue
987 # abs: hgsep
987 # abs: hgsep
988 # rel: ossep
988 # rel: ossep
989 srcs.append((abs, rel, exact))
989 srcs.append((abs, rel, exact))
990 return srcs
990 return srcs
991
991
992 # abssrc: hgsep
992 # abssrc: hgsep
993 # relsrc: ossep
993 # relsrc: ossep
994 # otarget: ossep
994 # otarget: ossep
995 def copyfile(abssrc, relsrc, otarget, exact):
995 def copyfile(abssrc, relsrc, otarget, exact):
996 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
996 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
997 if '/' in abstarget:
997 if '/' in abstarget:
998 # We cannot normalize abstarget itself, this would prevent
998 # We cannot normalize abstarget itself, this would prevent
999 # case only renames, like a => A.
999 # case only renames, like a => A.
1000 abspath, absname = abstarget.rsplit('/', 1)
1000 abspath, absname = abstarget.rsplit('/', 1)
1001 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1001 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1002 reltarget = repo.pathto(abstarget, cwd)
1002 reltarget = repo.pathto(abstarget, cwd)
1003 target = repo.wjoin(abstarget)
1003 target = repo.wjoin(abstarget)
1004 src = repo.wjoin(abssrc)
1004 src = repo.wjoin(abssrc)
1005 state = repo.dirstate[abstarget]
1005 state = repo.dirstate[abstarget]
1006
1006
1007 scmutil.checkportable(ui, abstarget)
1007 scmutil.checkportable(ui, abstarget)
1008
1008
1009 # check for collisions
1009 # check for collisions
1010 prevsrc = targets.get(abstarget)
1010 prevsrc = targets.get(abstarget)
1011 if prevsrc is not None:
1011 if prevsrc is not None:
1012 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1012 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1013 (reltarget, repo.pathto(abssrc, cwd),
1013 (reltarget, repo.pathto(abssrc, cwd),
1014 repo.pathto(prevsrc, cwd)))
1014 repo.pathto(prevsrc, cwd)))
1015 return
1015 return
1016
1016
1017 # check for overwrites
1017 # check for overwrites
1018 exists = os.path.lexists(target)
1018 exists = os.path.lexists(target)
1019 samefile = False
1019 samefile = False
1020 if exists and abssrc != abstarget:
1020 if exists and abssrc != abstarget:
1021 if (repo.dirstate.normalize(abssrc) ==
1021 if (repo.dirstate.normalize(abssrc) ==
1022 repo.dirstate.normalize(abstarget)):
1022 repo.dirstate.normalize(abstarget)):
1023 if not rename:
1023 if not rename:
1024 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1024 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1025 return
1025 return
1026 exists = False
1026 exists = False
1027 samefile = True
1027 samefile = True
1028
1028
1029 if not after and exists or after and state in 'mn':
1029 if not after and exists or after and state in 'mn':
1030 if not opts['force']:
1030 if not opts['force']:
1031 if state in 'mn':
1031 if state in 'mn':
1032 msg = _('%s: not overwriting - file already committed\n')
1032 msg = _('%s: not overwriting - file already committed\n')
1033 if after:
1033 if after:
1034 flags = '--after --force'
1034 flags = '--after --force'
1035 else:
1035 else:
1036 flags = '--force'
1036 flags = '--force'
1037 if rename:
1037 if rename:
1038 hint = _('(hg rename %s to replace the file by '
1038 hint = _('(hg rename %s to replace the file by '
1039 'recording a rename)\n') % flags
1039 'recording a rename)\n') % flags
1040 else:
1040 else:
1041 hint = _('(hg copy %s to replace the file by '
1041 hint = _('(hg copy %s to replace the file by '
1042 'recording a copy)\n') % flags
1042 'recording a copy)\n') % flags
1043 else:
1043 else:
1044 msg = _('%s: not overwriting - file exists\n')
1044 msg = _('%s: not overwriting - file exists\n')
1045 if rename:
1045 if rename:
1046 hint = _('(hg rename --after to record the rename)\n')
1046 hint = _('(hg rename --after to record the rename)\n')
1047 else:
1047 else:
1048 hint = _('(hg copy --after to record the copy)\n')
1048 hint = _('(hg copy --after to record the copy)\n')
1049 ui.warn(msg % reltarget)
1049 ui.warn(msg % reltarget)
1050 ui.warn(hint)
1050 ui.warn(hint)
1051 return
1051 return
1052
1052
1053 if after:
1053 if after:
1054 if not exists:
1054 if not exists:
1055 if rename:
1055 if rename:
1056 ui.warn(_('%s: not recording move - %s does not exist\n') %
1056 ui.warn(_('%s: not recording move - %s does not exist\n') %
1057 (relsrc, reltarget))
1057 (relsrc, reltarget))
1058 else:
1058 else:
1059 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1059 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1060 (relsrc, reltarget))
1060 (relsrc, reltarget))
1061 return
1061 return
1062 elif not dryrun:
1062 elif not dryrun:
1063 try:
1063 try:
1064 if exists:
1064 if exists:
1065 os.unlink(target)
1065 os.unlink(target)
1066 targetdir = os.path.dirname(target) or '.'
1066 targetdir = os.path.dirname(target) or '.'
1067 if not os.path.isdir(targetdir):
1067 if not os.path.isdir(targetdir):
1068 os.makedirs(targetdir)
1068 os.makedirs(targetdir)
1069 if samefile:
1069 if samefile:
1070 tmp = target + "~hgrename"
1070 tmp = target + "~hgrename"
1071 os.rename(src, tmp)
1071 os.rename(src, tmp)
1072 os.rename(tmp, target)
1072 os.rename(tmp, target)
1073 else:
1073 else:
1074 util.copyfile(src, target)
1074 util.copyfile(src, target)
1075 srcexists = True
1075 srcexists = True
1076 except IOError as inst:
1076 except IOError as inst:
1077 if inst.errno == errno.ENOENT:
1077 if inst.errno == errno.ENOENT:
1078 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1078 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1079 srcexists = False
1079 srcexists = False
1080 else:
1080 else:
1081 ui.warn(_('%s: cannot copy - %s\n') %
1081 ui.warn(_('%s: cannot copy - %s\n') %
1082 (relsrc, encoding.strtolocal(inst.strerror)))
1082 (relsrc, encoding.strtolocal(inst.strerror)))
1083 return True # report a failure
1083 return True # report a failure
1084
1084
1085 if ui.verbose or not exact:
1085 if ui.verbose or not exact:
1086 if rename:
1086 if rename:
1087 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1087 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1088 else:
1088 else:
1089 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1089 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1090
1090
1091 targets[abstarget] = abssrc
1091 targets[abstarget] = abssrc
1092
1092
1093 # fix up dirstate
1093 # fix up dirstate
1094 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1094 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1095 dryrun=dryrun, cwd=cwd)
1095 dryrun=dryrun, cwd=cwd)
1096 if rename and not dryrun:
1096 if rename and not dryrun:
1097 if not after and srcexists and not samefile:
1097 if not after and srcexists and not samefile:
1098 repo.wvfs.unlinkpath(abssrc)
1098 repo.wvfs.unlinkpath(abssrc)
1099 wctx.forget([abssrc])
1099 wctx.forget([abssrc])
1100
1100
1101 # pat: ossep
1101 # pat: ossep
1102 # dest ossep
1102 # dest ossep
1103 # srcs: list of (hgsep, hgsep, ossep, bool)
1103 # srcs: list of (hgsep, hgsep, ossep, bool)
1104 # return: function that takes hgsep and returns ossep
1104 # return: function that takes hgsep and returns ossep
1105 def targetpathfn(pat, dest, srcs):
1105 def targetpathfn(pat, dest, srcs):
1106 if os.path.isdir(pat):
1106 if os.path.isdir(pat):
1107 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1107 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1108 abspfx = util.localpath(abspfx)
1108 abspfx = util.localpath(abspfx)
1109 if destdirexists:
1109 if destdirexists:
1110 striplen = len(os.path.split(abspfx)[0])
1110 striplen = len(os.path.split(abspfx)[0])
1111 else:
1111 else:
1112 striplen = len(abspfx)
1112 striplen = len(abspfx)
1113 if striplen:
1113 if striplen:
1114 striplen += len(pycompat.ossep)
1114 striplen += len(pycompat.ossep)
1115 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1115 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1116 elif destdirexists:
1116 elif destdirexists:
1117 res = lambda p: os.path.join(dest,
1117 res = lambda p: os.path.join(dest,
1118 os.path.basename(util.localpath(p)))
1118 os.path.basename(util.localpath(p)))
1119 else:
1119 else:
1120 res = lambda p: dest
1120 res = lambda p: dest
1121 return res
1121 return res
1122
1122
1123 # pat: ossep
1123 # pat: ossep
1124 # dest ossep
1124 # dest ossep
1125 # srcs: list of (hgsep, hgsep, ossep, bool)
1125 # srcs: list of (hgsep, hgsep, ossep, bool)
1126 # return: function that takes hgsep and returns ossep
1126 # return: function that takes hgsep and returns ossep
1127 def targetpathafterfn(pat, dest, srcs):
1127 def targetpathafterfn(pat, dest, srcs):
1128 if matchmod.patkind(pat):
1128 if matchmod.patkind(pat):
1129 # a mercurial pattern
1129 # a mercurial pattern
1130 res = lambda p: os.path.join(dest,
1130 res = lambda p: os.path.join(dest,
1131 os.path.basename(util.localpath(p)))
1131 os.path.basename(util.localpath(p)))
1132 else:
1132 else:
1133 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1133 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1134 if len(abspfx) < len(srcs[0][0]):
1134 if len(abspfx) < len(srcs[0][0]):
1135 # A directory. Either the target path contains the last
1135 # A directory. Either the target path contains the last
1136 # component of the source path or it does not.
1136 # component of the source path or it does not.
1137 def evalpath(striplen):
1137 def evalpath(striplen):
1138 score = 0
1138 score = 0
1139 for s in srcs:
1139 for s in srcs:
1140 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1140 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1141 if os.path.lexists(t):
1141 if os.path.lexists(t):
1142 score += 1
1142 score += 1
1143 return score
1143 return score
1144
1144
1145 abspfx = util.localpath(abspfx)
1145 abspfx = util.localpath(abspfx)
1146 striplen = len(abspfx)
1146 striplen = len(abspfx)
1147 if striplen:
1147 if striplen:
1148 striplen += len(pycompat.ossep)
1148 striplen += len(pycompat.ossep)
1149 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1149 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1150 score = evalpath(striplen)
1150 score = evalpath(striplen)
1151 striplen1 = len(os.path.split(abspfx)[0])
1151 striplen1 = len(os.path.split(abspfx)[0])
1152 if striplen1:
1152 if striplen1:
1153 striplen1 += len(pycompat.ossep)
1153 striplen1 += len(pycompat.ossep)
1154 if evalpath(striplen1) > score:
1154 if evalpath(striplen1) > score:
1155 striplen = striplen1
1155 striplen = striplen1
1156 res = lambda p: os.path.join(dest,
1156 res = lambda p: os.path.join(dest,
1157 util.localpath(p)[striplen:])
1157 util.localpath(p)[striplen:])
1158 else:
1158 else:
1159 # a file
1159 # a file
1160 if destdirexists:
1160 if destdirexists:
1161 res = lambda p: os.path.join(dest,
1161 res = lambda p: os.path.join(dest,
1162 os.path.basename(util.localpath(p)))
1162 os.path.basename(util.localpath(p)))
1163 else:
1163 else:
1164 res = lambda p: dest
1164 res = lambda p: dest
1165 return res
1165 return res
1166
1166
1167 pats = scmutil.expandpats(pats)
1167 pats = scmutil.expandpats(pats)
1168 if not pats:
1168 if not pats:
1169 raise error.Abort(_('no source or destination specified'))
1169 raise error.Abort(_('no source or destination specified'))
1170 if len(pats) == 1:
1170 if len(pats) == 1:
1171 raise error.Abort(_('no destination specified'))
1171 raise error.Abort(_('no destination specified'))
1172 dest = pats.pop()
1172 dest = pats.pop()
1173 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1173 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1174 if not destdirexists:
1174 if not destdirexists:
1175 if len(pats) > 1 or matchmod.patkind(pats[0]):
1175 if len(pats) > 1 or matchmod.patkind(pats[0]):
1176 raise error.Abort(_('with multiple sources, destination must be an '
1176 raise error.Abort(_('with multiple sources, destination must be an '
1177 'existing directory'))
1177 'existing directory'))
1178 if util.endswithsep(dest):
1178 if util.endswithsep(dest):
1179 raise error.Abort(_('destination %s is not a directory') % dest)
1179 raise error.Abort(_('destination %s is not a directory') % dest)
1180
1180
1181 tfn = targetpathfn
1181 tfn = targetpathfn
1182 if after:
1182 if after:
1183 tfn = targetpathafterfn
1183 tfn = targetpathafterfn
1184 copylist = []
1184 copylist = []
1185 for pat in pats:
1185 for pat in pats:
1186 srcs = walkpat(pat)
1186 srcs = walkpat(pat)
1187 if not srcs:
1187 if not srcs:
1188 continue
1188 continue
1189 copylist.append((tfn(pat, dest, srcs), srcs))
1189 copylist.append((tfn(pat, dest, srcs), srcs))
1190 if not copylist:
1190 if not copylist:
1191 raise error.Abort(_('no files to copy'))
1191 raise error.Abort(_('no files to copy'))
1192
1192
1193 errors = 0
1193 errors = 0
1194 for targetpath, srcs in copylist:
1194 for targetpath, srcs in copylist:
1195 for abssrc, relsrc, exact in srcs:
1195 for abssrc, relsrc, exact in srcs:
1196 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1196 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1197 errors += 1
1197 errors += 1
1198
1198
1199 if errors:
1199 if errors:
1200 ui.warn(_('(consider using --after)\n'))
1200 ui.warn(_('(consider using --after)\n'))
1201
1201
1202 return errors != 0
1202 return errors != 0
1203
1203
1204 ## facility to let extension process additional data into an import patch
1204 ## facility to let extension process additional data into an import patch
1205 # list of identifier to be executed in order
1205 # list of identifier to be executed in order
1206 extrapreimport = [] # run before commit
1206 extrapreimport = [] # run before commit
1207 extrapostimport = [] # run after commit
1207 extrapostimport = [] # run after commit
1208 # mapping from identifier to actual import function
1208 # mapping from identifier to actual import function
1209 #
1209 #
1210 # 'preimport' are run before the commit is made and are provided the following
1210 # 'preimport' are run before the commit is made and are provided the following
1211 # arguments:
1211 # arguments:
1212 # - repo: the localrepository instance,
1212 # - repo: the localrepository instance,
1213 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1213 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1214 # - extra: the future extra dictionary of the changeset, please mutate it,
1214 # - extra: the future extra dictionary of the changeset, please mutate it,
1215 # - opts: the import options.
1215 # - opts: the import options.
1216 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1216 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1217 # mutation of in memory commit and more. Feel free to rework the code to get
1217 # mutation of in memory commit and more. Feel free to rework the code to get
1218 # there.
1218 # there.
1219 extrapreimportmap = {}
1219 extrapreimportmap = {}
1220 # 'postimport' are run after the commit is made and are provided the following
1220 # 'postimport' are run after the commit is made and are provided the following
1221 # argument:
1221 # argument:
1222 # - ctx: the changectx created by import.
1222 # - ctx: the changectx created by import.
1223 extrapostimportmap = {}
1223 extrapostimportmap = {}
1224
1224
1225 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1225 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1226 """Utility function used by commands.import to import a single patch
1226 """Utility function used by commands.import to import a single patch
1227
1227
1228 This function is explicitly defined here to help the evolve extension to
1228 This function is explicitly defined here to help the evolve extension to
1229 wrap this part of the import logic.
1229 wrap this part of the import logic.
1230
1230
1231 The API is currently a bit ugly because it a simple code translation from
1231 The API is currently a bit ugly because it a simple code translation from
1232 the import command. Feel free to make it better.
1232 the import command. Feel free to make it better.
1233
1233
1234 :hunk: a patch (as a binary string)
1234 :hunk: a patch (as a binary string)
1235 :parents: nodes that will be parent of the created commit
1235 :parents: nodes that will be parent of the created commit
1236 :opts: the full dict of option passed to the import command
1236 :opts: the full dict of option passed to the import command
1237 :msgs: list to save commit message to.
1237 :msgs: list to save commit message to.
1238 (used in case we need to save it when failing)
1238 (used in case we need to save it when failing)
1239 :updatefunc: a function that update a repo to a given node
1239 :updatefunc: a function that update a repo to a given node
1240 updatefunc(<repo>, <node>)
1240 updatefunc(<repo>, <node>)
1241 """
1241 """
1242 # avoid cycle context -> subrepo -> cmdutil
1242 # avoid cycle context -> subrepo -> cmdutil
1243 from . import context
1243 from . import context
1244 extractdata = patch.extract(ui, hunk)
1244 extractdata = patch.extract(ui, hunk)
1245 tmpname = extractdata.get('filename')
1245 tmpname = extractdata.get('filename')
1246 message = extractdata.get('message')
1246 message = extractdata.get('message')
1247 user = opts.get('user') or extractdata.get('user')
1247 user = opts.get('user') or extractdata.get('user')
1248 date = opts.get('date') or extractdata.get('date')
1248 date = opts.get('date') or extractdata.get('date')
1249 branch = extractdata.get('branch')
1249 branch = extractdata.get('branch')
1250 nodeid = extractdata.get('nodeid')
1250 nodeid = extractdata.get('nodeid')
1251 p1 = extractdata.get('p1')
1251 p1 = extractdata.get('p1')
1252 p2 = extractdata.get('p2')
1252 p2 = extractdata.get('p2')
1253
1253
1254 nocommit = opts.get('no_commit')
1254 nocommit = opts.get('no_commit')
1255 importbranch = opts.get('import_branch')
1255 importbranch = opts.get('import_branch')
1256 update = not opts.get('bypass')
1256 update = not opts.get('bypass')
1257 strip = opts["strip"]
1257 strip = opts["strip"]
1258 prefix = opts["prefix"]
1258 prefix = opts["prefix"]
1259 sim = float(opts.get('similarity') or 0)
1259 sim = float(opts.get('similarity') or 0)
1260 if not tmpname:
1260 if not tmpname:
1261 return (None, None, False)
1261 return (None, None, False)
1262
1262
1263 rejects = False
1263 rejects = False
1264
1264
1265 try:
1265 try:
1266 cmdline_message = logmessage(ui, opts)
1266 cmdline_message = logmessage(ui, opts)
1267 if cmdline_message:
1267 if cmdline_message:
1268 # pickup the cmdline msg
1268 # pickup the cmdline msg
1269 message = cmdline_message
1269 message = cmdline_message
1270 elif message:
1270 elif message:
1271 # pickup the patch msg
1271 # pickup the patch msg
1272 message = message.strip()
1272 message = message.strip()
1273 else:
1273 else:
1274 # launch the editor
1274 # launch the editor
1275 message = None
1275 message = None
1276 ui.debug('message:\n%s\n' % message)
1276 ui.debug('message:\n%s\n' % message)
1277
1277
1278 if len(parents) == 1:
1278 if len(parents) == 1:
1279 parents.append(repo[nullid])
1279 parents.append(repo[nullid])
1280 if opts.get('exact'):
1280 if opts.get('exact'):
1281 if not nodeid or not p1:
1281 if not nodeid or not p1:
1282 raise error.Abort(_('not a Mercurial patch'))
1282 raise error.Abort(_('not a Mercurial patch'))
1283 p1 = repo[p1]
1283 p1 = repo[p1]
1284 p2 = repo[p2 or nullid]
1284 p2 = repo[p2 or nullid]
1285 elif p2:
1285 elif p2:
1286 try:
1286 try:
1287 p1 = repo[p1]
1287 p1 = repo[p1]
1288 p2 = repo[p2]
1288 p2 = repo[p2]
1289 # Without any options, consider p2 only if the
1289 # Without any options, consider p2 only if the
1290 # patch is being applied on top of the recorded
1290 # patch is being applied on top of the recorded
1291 # first parent.
1291 # first parent.
1292 if p1 != parents[0]:
1292 if p1 != parents[0]:
1293 p1 = parents[0]
1293 p1 = parents[0]
1294 p2 = repo[nullid]
1294 p2 = repo[nullid]
1295 except error.RepoError:
1295 except error.RepoError:
1296 p1, p2 = parents
1296 p1, p2 = parents
1297 if p2.node() == nullid:
1297 if p2.node() == nullid:
1298 ui.warn(_("warning: import the patch as a normal revision\n"
1298 ui.warn(_("warning: import the patch as a normal revision\n"
1299 "(use --exact to import the patch as a merge)\n"))
1299 "(use --exact to import the patch as a merge)\n"))
1300 else:
1300 else:
1301 p1, p2 = parents
1301 p1, p2 = parents
1302
1302
1303 n = None
1303 n = None
1304 if update:
1304 if update:
1305 if p1 != parents[0]:
1305 if p1 != parents[0]:
1306 updatefunc(repo, p1.node())
1306 updatefunc(repo, p1.node())
1307 if p2 != parents[1]:
1307 if p2 != parents[1]:
1308 repo.setparents(p1.node(), p2.node())
1308 repo.setparents(p1.node(), p2.node())
1309
1309
1310 if opts.get('exact') or importbranch:
1310 if opts.get('exact') or importbranch:
1311 repo.dirstate.setbranch(branch or 'default')
1311 repo.dirstate.setbranch(branch or 'default')
1312
1312
1313 partial = opts.get('partial', False)
1313 partial = opts.get('partial', False)
1314 files = set()
1314 files = set()
1315 try:
1315 try:
1316 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1316 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1317 files=files, eolmode=None, similarity=sim / 100.0)
1317 files=files, eolmode=None, similarity=sim / 100.0)
1318 except error.PatchError as e:
1318 except error.PatchError as e:
1319 if not partial:
1319 if not partial:
1320 raise error.Abort(str(e))
1320 raise error.Abort(str(e))
1321 if partial:
1321 if partial:
1322 rejects = True
1322 rejects = True
1323
1323
1324 files = list(files)
1324 files = list(files)
1325 if nocommit:
1325 if nocommit:
1326 if message:
1326 if message:
1327 msgs.append(message)
1327 msgs.append(message)
1328 else:
1328 else:
1329 if opts.get('exact') or p2:
1329 if opts.get('exact') or p2:
1330 # If you got here, you either use --force and know what
1330 # If you got here, you either use --force and know what
1331 # you are doing or used --exact or a merge patch while
1331 # you are doing or used --exact or a merge patch while
1332 # being updated to its first parent.
1332 # being updated to its first parent.
1333 m = None
1333 m = None
1334 else:
1334 else:
1335 m = scmutil.matchfiles(repo, files or [])
1335 m = scmutil.matchfiles(repo, files or [])
1336 editform = mergeeditform(repo[None], 'import.normal')
1336 editform = mergeeditform(repo[None], 'import.normal')
1337 if opts.get('exact'):
1337 if opts.get('exact'):
1338 editor = None
1338 editor = None
1339 else:
1339 else:
1340 editor = getcommiteditor(editform=editform, **opts)
1340 editor = getcommiteditor(editform=editform, **opts)
1341 extra = {}
1341 extra = {}
1342 for idfunc in extrapreimport:
1342 for idfunc in extrapreimport:
1343 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1343 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1344 overrides = {}
1344 overrides = {}
1345 if partial:
1345 if partial:
1346 overrides[('ui', 'allowemptycommit')] = True
1346 overrides[('ui', 'allowemptycommit')] = True
1347 with repo.ui.configoverride(overrides, 'import'):
1347 with repo.ui.configoverride(overrides, 'import'):
1348 n = repo.commit(message, user,
1348 n = repo.commit(message, user,
1349 date, match=m,
1349 date, match=m,
1350 editor=editor, extra=extra)
1350 editor=editor, extra=extra)
1351 for idfunc in extrapostimport:
1351 for idfunc in extrapostimport:
1352 extrapostimportmap[idfunc](repo[n])
1352 extrapostimportmap[idfunc](repo[n])
1353 else:
1353 else:
1354 if opts.get('exact') or importbranch:
1354 if opts.get('exact') or importbranch:
1355 branch = branch or 'default'
1355 branch = branch or 'default'
1356 else:
1356 else:
1357 branch = p1.branch()
1357 branch = p1.branch()
1358 store = patch.filestore()
1358 store = patch.filestore()
1359 try:
1359 try:
1360 files = set()
1360 files = set()
1361 try:
1361 try:
1362 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1362 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1363 files, eolmode=None)
1363 files, eolmode=None)
1364 except error.PatchError as e:
1364 except error.PatchError as e:
1365 raise error.Abort(str(e))
1365 raise error.Abort(str(e))
1366 if opts.get('exact'):
1366 if opts.get('exact'):
1367 editor = None
1367 editor = None
1368 else:
1368 else:
1369 editor = getcommiteditor(editform='import.bypass')
1369 editor = getcommiteditor(editform='import.bypass')
1370 memctx = context.memctx(repo, (p1.node(), p2.node()),
1370 memctx = context.memctx(repo, (p1.node(), p2.node()),
1371 message,
1371 message,
1372 files=files,
1372 files=files,
1373 filectxfn=store,
1373 filectxfn=store,
1374 user=user,
1374 user=user,
1375 date=date,
1375 date=date,
1376 branch=branch,
1376 branch=branch,
1377 editor=editor)
1377 editor=editor)
1378 n = memctx.commit()
1378 n = memctx.commit()
1379 finally:
1379 finally:
1380 store.close()
1380 store.close()
1381 if opts.get('exact') and nocommit:
1381 if opts.get('exact') and nocommit:
1382 # --exact with --no-commit is still useful in that it does merge
1382 # --exact with --no-commit is still useful in that it does merge
1383 # and branch bits
1383 # and branch bits
1384 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1384 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1385 elif opts.get('exact') and hex(n) != nodeid:
1385 elif opts.get('exact') and hex(n) != nodeid:
1386 raise error.Abort(_('patch is damaged or loses information'))
1386 raise error.Abort(_('patch is damaged or loses information'))
1387 msg = _('applied to working directory')
1387 msg = _('applied to working directory')
1388 if n:
1388 if n:
1389 # i18n: refers to a short changeset id
1389 # i18n: refers to a short changeset id
1390 msg = _('created %s') % short(n)
1390 msg = _('created %s') % short(n)
1391 return (msg, n, rejects)
1391 return (msg, n, rejects)
1392 finally:
1392 finally:
1393 os.unlink(tmpname)
1393 os.unlink(tmpname)
1394
1394
1395 # facility to let extensions include additional data in an exported patch
1395 # facility to let extensions include additional data in an exported patch
1396 # list of identifiers to be executed in order
1396 # list of identifiers to be executed in order
1397 extraexport = []
1397 extraexport = []
1398 # mapping from identifier to actual export function
1398 # mapping from identifier to actual export function
1399 # function as to return a string to be added to the header or None
1399 # function as to return a string to be added to the header or None
1400 # it is given two arguments (sequencenumber, changectx)
1400 # it is given two arguments (sequencenumber, changectx)
1401 extraexportmap = {}
1401 extraexportmap = {}
1402
1402
1403 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1403 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1404 node = scmutil.binnode(ctx)
1404 node = scmutil.binnode(ctx)
1405 parents = [p.node() for p in ctx.parents() if p]
1405 parents = [p.node() for p in ctx.parents() if p]
1406 branch = ctx.branch()
1406 branch = ctx.branch()
1407 if switch_parent:
1407 if switch_parent:
1408 parents.reverse()
1408 parents.reverse()
1409
1409
1410 if parents:
1410 if parents:
1411 prev = parents[0]
1411 prev = parents[0]
1412 else:
1412 else:
1413 prev = nullid
1413 prev = nullid
1414
1414
1415 write("# HG changeset patch\n")
1415 write("# HG changeset patch\n")
1416 write("# User %s\n" % ctx.user())
1416 write("# User %s\n" % ctx.user())
1417 write("# Date %d %d\n" % ctx.date())
1417 write("# Date %d %d\n" % ctx.date())
1418 write("# %s\n" % util.datestr(ctx.date()))
1418 write("# %s\n" % util.datestr(ctx.date()))
1419 if branch and branch != 'default':
1419 if branch and branch != 'default':
1420 write("# Branch %s\n" % branch)
1420 write("# Branch %s\n" % branch)
1421 write("# Node ID %s\n" % hex(node))
1421 write("# Node ID %s\n" % hex(node))
1422 write("# Parent %s\n" % hex(prev))
1422 write("# Parent %s\n" % hex(prev))
1423 if len(parents) > 1:
1423 if len(parents) > 1:
1424 write("# Parent %s\n" % hex(parents[1]))
1424 write("# Parent %s\n" % hex(parents[1]))
1425
1425
1426 for headerid in extraexport:
1426 for headerid in extraexport:
1427 header = extraexportmap[headerid](seqno, ctx)
1427 header = extraexportmap[headerid](seqno, ctx)
1428 if header is not None:
1428 if header is not None:
1429 write('# %s\n' % header)
1429 write('# %s\n' % header)
1430 write(ctx.description().rstrip())
1430 write(ctx.description().rstrip())
1431 write("\n\n")
1431 write("\n\n")
1432
1432
1433 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1433 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1434 write(chunk, label=label)
1434 write(chunk, label=label)
1435
1435
1436 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1436 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1437 opts=None, match=None):
1437 opts=None, match=None):
1438 '''export changesets as hg patches
1438 '''export changesets as hg patches
1439
1439
1440 Args:
1440 Args:
1441 repo: The repository from which we're exporting revisions.
1441 repo: The repository from which we're exporting revisions.
1442 revs: A list of revisions to export as revision numbers.
1442 revs: A list of revisions to export as revision numbers.
1443 fntemplate: An optional string to use for generating patch file names.
1443 fntemplate: An optional string to use for generating patch file names.
1444 fp: An optional file-like object to which patches should be written.
1444 fp: An optional file-like object to which patches should be written.
1445 switch_parent: If True, show diffs against second parent when not nullid.
1445 switch_parent: If True, show diffs against second parent when not nullid.
1446 Default is false, which always shows diff against p1.
1446 Default is false, which always shows diff against p1.
1447 opts: diff options to use for generating the patch.
1447 opts: diff options to use for generating the patch.
1448 match: If specified, only export changes to files matching this matcher.
1448 match: If specified, only export changes to files matching this matcher.
1449
1449
1450 Returns:
1450 Returns:
1451 Nothing.
1451 Nothing.
1452
1452
1453 Side Effect:
1453 Side Effect:
1454 "HG Changeset Patch" data is emitted to one of the following
1454 "HG Changeset Patch" data is emitted to one of the following
1455 destinations:
1455 destinations:
1456 fp is specified: All revs are written to the specified
1456 fp is specified: All revs are written to the specified
1457 file-like object.
1457 file-like object.
1458 fntemplate specified: Each rev is written to a unique file named using
1458 fntemplate specified: Each rev is written to a unique file named using
1459 the given template.
1459 the given template.
1460 Neither fp nor template specified: All revs written to repo.ui.write()
1460 Neither fp nor template specified: All revs written to repo.ui.write()
1461 '''
1461 '''
1462
1462
1463 total = len(revs)
1463 total = len(revs)
1464 revwidth = max(len(str(rev)) for rev in revs)
1464 revwidth = max(len(str(rev)) for rev in revs)
1465 filemode = {}
1465 filemode = {}
1466
1466
1467 write = None
1467 write = None
1468 dest = '<unnamed>'
1468 dest = '<unnamed>'
1469 if fp:
1469 if fp:
1470 dest = getattr(fp, 'name', dest)
1470 dest = getattr(fp, 'name', dest)
1471 def write(s, **kw):
1471 def write(s, **kw):
1472 fp.write(s)
1472 fp.write(s)
1473 elif not fntemplate:
1473 elif not fntemplate:
1474 write = repo.ui.write
1474 write = repo.ui.write
1475
1475
1476 for seqno, rev in enumerate(revs, 1):
1476 for seqno, rev in enumerate(revs, 1):
1477 ctx = repo[rev]
1477 ctx = repo[rev]
1478 fo = None
1478 fo = None
1479 if not fp and fntemplate:
1479 if not fp and fntemplate:
1480 desc_lines = ctx.description().rstrip().split('\n')
1480 desc_lines = ctx.description().rstrip().split('\n')
1481 desc = desc_lines[0] #Commit always has a first line.
1481 desc = desc_lines[0] #Commit always has a first line.
1482 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1482 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1483 total=total, seqno=seqno, revwidth=revwidth,
1483 total=total, seqno=seqno, revwidth=revwidth,
1484 mode='wb', modemap=filemode)
1484 mode='wb', modemap=filemode)
1485 dest = fo.name
1485 dest = fo.name
1486 def write(s, **kw):
1486 def write(s, **kw):
1487 fo.write(s)
1487 fo.write(s)
1488 if not dest.startswith('<'):
1488 if not dest.startswith('<'):
1489 repo.ui.note("%s\n" % dest)
1489 repo.ui.note("%s\n" % dest)
1490 _exportsingle(
1490 _exportsingle(
1491 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1491 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1492 if fo is not None:
1492 if fo is not None:
1493 fo.close()
1493 fo.close()
1494
1494
1495 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1495 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1496 changes=None, stat=False, fp=None, prefix='',
1496 changes=None, stat=False, fp=None, prefix='',
1497 root='', listsubrepos=False):
1497 root='', listsubrepos=False, hunksfilterfn=None):
1498 '''show diff or diffstat.'''
1498 '''show diff or diffstat.'''
1499 if fp is None:
1499 if fp is None:
1500 write = ui.write
1500 write = ui.write
1501 else:
1501 else:
1502 def write(s, **kw):
1502 def write(s, **kw):
1503 fp.write(s)
1503 fp.write(s)
1504
1504
1505 if root:
1505 if root:
1506 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1506 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1507 else:
1507 else:
1508 relroot = ''
1508 relroot = ''
1509 if relroot != '':
1509 if relroot != '':
1510 # XXX relative roots currently don't work if the root is within a
1510 # XXX relative roots currently don't work if the root is within a
1511 # subrepo
1511 # subrepo
1512 uirelroot = match.uipath(relroot)
1512 uirelroot = match.uipath(relroot)
1513 relroot += '/'
1513 relroot += '/'
1514 for matchroot in match.files():
1514 for matchroot in match.files():
1515 if not matchroot.startswith(relroot):
1515 if not matchroot.startswith(relroot):
1516 ui.warn(_('warning: %s not inside relative root %s\n') % (
1516 ui.warn(_('warning: %s not inside relative root %s\n') % (
1517 match.uipath(matchroot), uirelroot))
1517 match.uipath(matchroot), uirelroot))
1518
1518
1519 if stat:
1519 if stat:
1520 diffopts = diffopts.copy(context=0)
1520 diffopts = diffopts.copy(context=0)
1521 width = 80
1521 width = 80
1522 if not ui.plain():
1522 if not ui.plain():
1523 width = ui.termwidth()
1523 width = ui.termwidth()
1524 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1524 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1525 prefix=prefix, relroot=relroot)
1525 prefix=prefix, relroot=relroot,
1526 hunksfilterfn=hunksfilterfn)
1526 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1527 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1527 width=width):
1528 width=width):
1528 write(chunk, label=label)
1529 write(chunk, label=label)
1529 else:
1530 else:
1530 for chunk, label in patch.diffui(repo, node1, node2, match,
1531 for chunk, label in patch.diffui(repo, node1, node2, match,
1531 changes, diffopts, prefix=prefix,
1532 changes, diffopts, prefix=prefix,
1532 relroot=relroot):
1533 relroot=relroot,
1534 hunksfilterfn=hunksfilterfn):
1533 write(chunk, label=label)
1535 write(chunk, label=label)
1534
1536
1535 if listsubrepos:
1537 if listsubrepos:
1536 ctx1 = repo[node1]
1538 ctx1 = repo[node1]
1537 ctx2 = repo[node2]
1539 ctx2 = repo[node2]
1538 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1540 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1539 tempnode2 = node2
1541 tempnode2 = node2
1540 try:
1542 try:
1541 if node2 is not None:
1543 if node2 is not None:
1542 tempnode2 = ctx2.substate[subpath][1]
1544 tempnode2 = ctx2.substate[subpath][1]
1543 except KeyError:
1545 except KeyError:
1544 # A subrepo that existed in node1 was deleted between node1 and
1546 # A subrepo that existed in node1 was deleted between node1 and
1545 # node2 (inclusive). Thus, ctx2's substate won't contain that
1547 # node2 (inclusive). Thus, ctx2's substate won't contain that
1546 # subpath. The best we can do is to ignore it.
1548 # subpath. The best we can do is to ignore it.
1547 tempnode2 = None
1549 tempnode2 = None
1548 submatch = matchmod.subdirmatcher(subpath, match)
1550 submatch = matchmod.subdirmatcher(subpath, match)
1549 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1551 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1550 stat=stat, fp=fp, prefix=prefix)
1552 stat=stat, fp=fp, prefix=prefix)
1551
1553
1552 def _changesetlabels(ctx):
1554 def _changesetlabels(ctx):
1553 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1555 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1554 if ctx.obsolete():
1556 if ctx.obsolete():
1555 labels.append('changeset.obsolete')
1557 labels.append('changeset.obsolete')
1556 if ctx.isunstable():
1558 if ctx.isunstable():
1557 labels.append('changeset.unstable')
1559 labels.append('changeset.unstable')
1558 for instability in ctx.instabilities():
1560 for instability in ctx.instabilities():
1559 labels.append('instability.%s' % instability)
1561 labels.append('instability.%s' % instability)
1560 return ' '.join(labels)
1562 return ' '.join(labels)
1561
1563
1562 class changeset_printer(object):
1564 class changeset_printer(object):
1563 '''show changeset information when templating not requested.'''
1565 '''show changeset information when templating not requested.'''
1564
1566
1565 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1567 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1566 self.ui = ui
1568 self.ui = ui
1567 self.repo = repo
1569 self.repo = repo
1568 self.buffered = buffered
1570 self.buffered = buffered
1569 self.matchfn = matchfn
1571 self.matchfn = matchfn
1570 self.diffopts = diffopts
1572 self.diffopts = diffopts
1571 self.header = {}
1573 self.header = {}
1572 self.hunk = {}
1574 self.hunk = {}
1573 self.lastheader = None
1575 self.lastheader = None
1574 self.footer = None
1576 self.footer = None
1575
1577
1576 def flush(self, ctx):
1578 def flush(self, ctx):
1577 rev = ctx.rev()
1579 rev = ctx.rev()
1578 if rev in self.header:
1580 if rev in self.header:
1579 h = self.header[rev]
1581 h = self.header[rev]
1580 if h != self.lastheader:
1582 if h != self.lastheader:
1581 self.lastheader = h
1583 self.lastheader = h
1582 self.ui.write(h)
1584 self.ui.write(h)
1583 del self.header[rev]
1585 del self.header[rev]
1584 if rev in self.hunk:
1586 if rev in self.hunk:
1585 self.ui.write(self.hunk[rev])
1587 self.ui.write(self.hunk[rev])
1586 del self.hunk[rev]
1588 del self.hunk[rev]
1587 return 1
1589 return 1
1588 return 0
1590 return 0
1589
1591
1590 def close(self):
1592 def close(self):
1591 if self.footer:
1593 if self.footer:
1592 self.ui.write(self.footer)
1594 self.ui.write(self.footer)
1593
1595
1594 def show(self, ctx, copies=None, matchfn=None, **props):
1596 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1597 **props):
1595 props = pycompat.byteskwargs(props)
1598 props = pycompat.byteskwargs(props)
1596 if self.buffered:
1599 if self.buffered:
1597 self.ui.pushbuffer(labeled=True)
1600 self.ui.pushbuffer(labeled=True)
1598 self._show(ctx, copies, matchfn, props)
1601 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1599 self.hunk[ctx.rev()] = self.ui.popbuffer()
1602 self.hunk[ctx.rev()] = self.ui.popbuffer()
1600 else:
1603 else:
1601 self._show(ctx, copies, matchfn, props)
1604 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1602
1605
1603 def _show(self, ctx, copies, matchfn, props):
1606 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1604 '''show a single changeset or file revision'''
1607 '''show a single changeset or file revision'''
1605 changenode = ctx.node()
1608 changenode = ctx.node()
1606 rev = ctx.rev()
1609 rev = ctx.rev()
1607
1610
1608 if self.ui.quiet:
1611 if self.ui.quiet:
1609 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1612 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1610 label='log.node')
1613 label='log.node')
1611 return
1614 return
1612
1615
1613 date = util.datestr(ctx.date())
1616 date = util.datestr(ctx.date())
1614
1617
1615 # i18n: column positioning for "hg log"
1618 # i18n: column positioning for "hg log"
1616 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1619 self.ui.write(_("changeset: %s\n") % scmutil.formatchangeid(ctx),
1617 label=_changesetlabels(ctx))
1620 label=_changesetlabels(ctx))
1618
1621
1619 # branches are shown first before any other names due to backwards
1622 # branches are shown first before any other names due to backwards
1620 # compatibility
1623 # compatibility
1621 branch = ctx.branch()
1624 branch = ctx.branch()
1622 # don't show the default branch name
1625 # don't show the default branch name
1623 if branch != 'default':
1626 if branch != 'default':
1624 # i18n: column positioning for "hg log"
1627 # i18n: column positioning for "hg log"
1625 self.ui.write(_("branch: %s\n") % branch,
1628 self.ui.write(_("branch: %s\n") % branch,
1626 label='log.branch')
1629 label='log.branch')
1627
1630
1628 for nsname, ns in self.repo.names.iteritems():
1631 for nsname, ns in self.repo.names.iteritems():
1629 # branches has special logic already handled above, so here we just
1632 # branches has special logic already handled above, so here we just
1630 # skip it
1633 # skip it
1631 if nsname == 'branches':
1634 if nsname == 'branches':
1632 continue
1635 continue
1633 # we will use the templatename as the color name since those two
1636 # we will use the templatename as the color name since those two
1634 # should be the same
1637 # should be the same
1635 for name in ns.names(self.repo, changenode):
1638 for name in ns.names(self.repo, changenode):
1636 self.ui.write(ns.logfmt % name,
1639 self.ui.write(ns.logfmt % name,
1637 label='log.%s' % ns.colorname)
1640 label='log.%s' % ns.colorname)
1638 if self.ui.debugflag:
1641 if self.ui.debugflag:
1639 # i18n: column positioning for "hg log"
1642 # i18n: column positioning for "hg log"
1640 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1643 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1641 label='log.phase')
1644 label='log.phase')
1642 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1645 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1643 label = 'log.parent changeset.%s' % pctx.phasestr()
1646 label = 'log.parent changeset.%s' % pctx.phasestr()
1644 # i18n: column positioning for "hg log"
1647 # i18n: column positioning for "hg log"
1645 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1648 self.ui.write(_("parent: %s\n") % scmutil.formatchangeid(pctx),
1646 label=label)
1649 label=label)
1647
1650
1648 if self.ui.debugflag and rev is not None:
1651 if self.ui.debugflag and rev is not None:
1649 mnode = ctx.manifestnode()
1652 mnode = ctx.manifestnode()
1650 mrev = self.repo.manifestlog._revlog.rev(mnode)
1653 mrev = self.repo.manifestlog._revlog.rev(mnode)
1651 # i18n: column positioning for "hg log"
1654 # i18n: column positioning for "hg log"
1652 self.ui.write(_("manifest: %s\n")
1655 self.ui.write(_("manifest: %s\n")
1653 % scmutil.formatrevnode(self.ui, mrev, mnode),
1656 % scmutil.formatrevnode(self.ui, mrev, mnode),
1654 label='ui.debug log.manifest')
1657 label='ui.debug log.manifest')
1655 # i18n: column positioning for "hg log"
1658 # i18n: column positioning for "hg log"
1656 self.ui.write(_("user: %s\n") % ctx.user(),
1659 self.ui.write(_("user: %s\n") % ctx.user(),
1657 label='log.user')
1660 label='log.user')
1658 # i18n: column positioning for "hg log"
1661 # i18n: column positioning for "hg log"
1659 self.ui.write(_("date: %s\n") % date,
1662 self.ui.write(_("date: %s\n") % date,
1660 label='log.date')
1663 label='log.date')
1661
1664
1662 if ctx.isunstable():
1665 if ctx.isunstable():
1663 # i18n: column positioning for "hg log"
1666 # i18n: column positioning for "hg log"
1664 instabilities = ctx.instabilities()
1667 instabilities = ctx.instabilities()
1665 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1668 self.ui.write(_("instability: %s\n") % ', '.join(instabilities),
1666 label='log.instability')
1669 label='log.instability')
1667
1670
1668 elif ctx.obsolete():
1671 elif ctx.obsolete():
1669 self._showobsfate(ctx)
1672 self._showobsfate(ctx)
1670
1673
1671 self._exthook(ctx)
1674 self._exthook(ctx)
1672
1675
1673 if self.ui.debugflag:
1676 if self.ui.debugflag:
1674 files = ctx.p1().status(ctx)[:3]
1677 files = ctx.p1().status(ctx)[:3]
1675 for key, value in zip([# i18n: column positioning for "hg log"
1678 for key, value in zip([# i18n: column positioning for "hg log"
1676 _("files:"),
1679 _("files:"),
1677 # i18n: column positioning for "hg log"
1680 # i18n: column positioning for "hg log"
1678 _("files+:"),
1681 _("files+:"),
1679 # i18n: column positioning for "hg log"
1682 # i18n: column positioning for "hg log"
1680 _("files-:")], files):
1683 _("files-:")], files):
1681 if value:
1684 if value:
1682 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1685 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1683 label='ui.debug log.files')
1686 label='ui.debug log.files')
1684 elif ctx.files() and self.ui.verbose:
1687 elif ctx.files() and self.ui.verbose:
1685 # i18n: column positioning for "hg log"
1688 # i18n: column positioning for "hg log"
1686 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1689 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1687 label='ui.note log.files')
1690 label='ui.note log.files')
1688 if copies and self.ui.verbose:
1691 if copies and self.ui.verbose:
1689 copies = ['%s (%s)' % c for c in copies]
1692 copies = ['%s (%s)' % c for c in copies]
1690 # i18n: column positioning for "hg log"
1693 # i18n: column positioning for "hg log"
1691 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1694 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1692 label='ui.note log.copies')
1695 label='ui.note log.copies')
1693
1696
1694 extra = ctx.extra()
1697 extra = ctx.extra()
1695 if extra and self.ui.debugflag:
1698 if extra and self.ui.debugflag:
1696 for key, value in sorted(extra.items()):
1699 for key, value in sorted(extra.items()):
1697 # i18n: column positioning for "hg log"
1700 # i18n: column positioning for "hg log"
1698 self.ui.write(_("extra: %s=%s\n")
1701 self.ui.write(_("extra: %s=%s\n")
1699 % (key, util.escapestr(value)),
1702 % (key, util.escapestr(value)),
1700 label='ui.debug log.extra')
1703 label='ui.debug log.extra')
1701
1704
1702 description = ctx.description().strip()
1705 description = ctx.description().strip()
1703 if description:
1706 if description:
1704 if self.ui.verbose:
1707 if self.ui.verbose:
1705 self.ui.write(_("description:\n"),
1708 self.ui.write(_("description:\n"),
1706 label='ui.note log.description')
1709 label='ui.note log.description')
1707 self.ui.write(description,
1710 self.ui.write(description,
1708 label='ui.note log.description')
1711 label='ui.note log.description')
1709 self.ui.write("\n\n")
1712 self.ui.write("\n\n")
1710 else:
1713 else:
1711 # i18n: column positioning for "hg log"
1714 # i18n: column positioning for "hg log"
1712 self.ui.write(_("summary: %s\n") %
1715 self.ui.write(_("summary: %s\n") %
1713 description.splitlines()[0],
1716 description.splitlines()[0],
1714 label='log.summary')
1717 label='log.summary')
1715 self.ui.write("\n")
1718 self.ui.write("\n")
1716
1719
1717 self.showpatch(ctx, matchfn)
1720 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1718
1721
1719 def _showobsfate(self, ctx):
1722 def _showobsfate(self, ctx):
1720 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1723 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1721
1724
1722 if obsfate:
1725 if obsfate:
1723 for obsfateline in obsfate:
1726 for obsfateline in obsfate:
1724 # i18n: column positioning for "hg log"
1727 # i18n: column positioning for "hg log"
1725 self.ui.write(_("obsfate: %s\n") % obsfateline,
1728 self.ui.write(_("obsfate: %s\n") % obsfateline,
1726 label='log.obsfate')
1729 label='log.obsfate')
1727
1730
1728 def _exthook(self, ctx):
1731 def _exthook(self, ctx):
1729 '''empty method used by extension as a hook point
1732 '''empty method used by extension as a hook point
1730 '''
1733 '''
1731
1734
1732 def showpatch(self, ctx, matchfn):
1735 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1733 if not matchfn:
1736 if not matchfn:
1734 matchfn = self.matchfn
1737 matchfn = self.matchfn
1735 if matchfn:
1738 if matchfn:
1736 stat = self.diffopts.get('stat')
1739 stat = self.diffopts.get('stat')
1737 diff = self.diffopts.get('patch')
1740 diff = self.diffopts.get('patch')
1738 diffopts = patch.diffallopts(self.ui, self.diffopts)
1741 diffopts = patch.diffallopts(self.ui, self.diffopts)
1739 node = ctx.node()
1742 node = ctx.node()
1740 prev = ctx.p1().node()
1743 prev = ctx.p1().node()
1741 if stat:
1744 if stat:
1742 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1745 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1743 match=matchfn, stat=True)
1746 match=matchfn, stat=True,
1747 hunksfilterfn=hunksfilterfn)
1744 if diff:
1748 if diff:
1745 if stat:
1749 if stat:
1746 self.ui.write("\n")
1750 self.ui.write("\n")
1747 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1751 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1748 match=matchfn, stat=False)
1752 match=matchfn, stat=False,
1753 hunksfilterfn=hunksfilterfn)
1749 self.ui.write("\n")
1754 self.ui.write("\n")
1750
1755
1751 class jsonchangeset(changeset_printer):
1756 class jsonchangeset(changeset_printer):
1752 '''format changeset information.'''
1757 '''format changeset information.'''
1753
1758
1754 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1759 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1755 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1760 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1756 self.cache = {}
1761 self.cache = {}
1757 self._first = True
1762 self._first = True
1758
1763
1759 def close(self):
1764 def close(self):
1760 if not self._first:
1765 if not self._first:
1761 self.ui.write("\n]\n")
1766 self.ui.write("\n]\n")
1762 else:
1767 else:
1763 self.ui.write("[]\n")
1768 self.ui.write("[]\n")
1764
1769
1765 def _show(self, ctx, copies, matchfn, props):
1770 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1766 '''show a single changeset or file revision'''
1771 '''show a single changeset or file revision'''
1767 rev = ctx.rev()
1772 rev = ctx.rev()
1768 if rev is None:
1773 if rev is None:
1769 jrev = jnode = 'null'
1774 jrev = jnode = 'null'
1770 else:
1775 else:
1771 jrev = '%d' % rev
1776 jrev = '%d' % rev
1772 jnode = '"%s"' % hex(ctx.node())
1777 jnode = '"%s"' % hex(ctx.node())
1773 j = encoding.jsonescape
1778 j = encoding.jsonescape
1774
1779
1775 if self._first:
1780 if self._first:
1776 self.ui.write("[\n {")
1781 self.ui.write("[\n {")
1777 self._first = False
1782 self._first = False
1778 else:
1783 else:
1779 self.ui.write(",\n {")
1784 self.ui.write(",\n {")
1780
1785
1781 if self.ui.quiet:
1786 if self.ui.quiet:
1782 self.ui.write(('\n "rev": %s') % jrev)
1787 self.ui.write(('\n "rev": %s') % jrev)
1783 self.ui.write((',\n "node": %s') % jnode)
1788 self.ui.write((',\n "node": %s') % jnode)
1784 self.ui.write('\n }')
1789 self.ui.write('\n }')
1785 return
1790 return
1786
1791
1787 self.ui.write(('\n "rev": %s') % jrev)
1792 self.ui.write(('\n "rev": %s') % jrev)
1788 self.ui.write((',\n "node": %s') % jnode)
1793 self.ui.write((',\n "node": %s') % jnode)
1789 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1794 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1790 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1795 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1791 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1796 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1792 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1797 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1793 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1798 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1794
1799
1795 self.ui.write((',\n "bookmarks": [%s]') %
1800 self.ui.write((',\n "bookmarks": [%s]') %
1796 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1801 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1797 self.ui.write((',\n "tags": [%s]') %
1802 self.ui.write((',\n "tags": [%s]') %
1798 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1803 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1799 self.ui.write((',\n "parents": [%s]') %
1804 self.ui.write((',\n "parents": [%s]') %
1800 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1805 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1801
1806
1802 if self.ui.debugflag:
1807 if self.ui.debugflag:
1803 if rev is None:
1808 if rev is None:
1804 jmanifestnode = 'null'
1809 jmanifestnode = 'null'
1805 else:
1810 else:
1806 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1811 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1807 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1812 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1808
1813
1809 self.ui.write((',\n "extra": {%s}') %
1814 self.ui.write((',\n "extra": {%s}') %
1810 ", ".join('"%s": "%s"' % (j(k), j(v))
1815 ", ".join('"%s": "%s"' % (j(k), j(v))
1811 for k, v in ctx.extra().items()))
1816 for k, v in ctx.extra().items()))
1812
1817
1813 files = ctx.p1().status(ctx)
1818 files = ctx.p1().status(ctx)
1814 self.ui.write((',\n "modified": [%s]') %
1819 self.ui.write((',\n "modified": [%s]') %
1815 ", ".join('"%s"' % j(f) for f in files[0]))
1820 ", ".join('"%s"' % j(f) for f in files[0]))
1816 self.ui.write((',\n "added": [%s]') %
1821 self.ui.write((',\n "added": [%s]') %
1817 ", ".join('"%s"' % j(f) for f in files[1]))
1822 ", ".join('"%s"' % j(f) for f in files[1]))
1818 self.ui.write((',\n "removed": [%s]') %
1823 self.ui.write((',\n "removed": [%s]') %
1819 ", ".join('"%s"' % j(f) for f in files[2]))
1824 ", ".join('"%s"' % j(f) for f in files[2]))
1820
1825
1821 elif self.ui.verbose:
1826 elif self.ui.verbose:
1822 self.ui.write((',\n "files": [%s]') %
1827 self.ui.write((',\n "files": [%s]') %
1823 ", ".join('"%s"' % j(f) for f in ctx.files()))
1828 ", ".join('"%s"' % j(f) for f in ctx.files()))
1824
1829
1825 if copies:
1830 if copies:
1826 self.ui.write((',\n "copies": {%s}') %
1831 self.ui.write((',\n "copies": {%s}') %
1827 ", ".join('"%s": "%s"' % (j(k), j(v))
1832 ", ".join('"%s": "%s"' % (j(k), j(v))
1828 for k, v in copies))
1833 for k, v in copies))
1829
1834
1830 matchfn = self.matchfn
1835 matchfn = self.matchfn
1831 if matchfn:
1836 if matchfn:
1832 stat = self.diffopts.get('stat')
1837 stat = self.diffopts.get('stat')
1833 diff = self.diffopts.get('patch')
1838 diff = self.diffopts.get('patch')
1834 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1839 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1835 node, prev = ctx.node(), ctx.p1().node()
1840 node, prev = ctx.node(), ctx.p1().node()
1836 if stat:
1841 if stat:
1837 self.ui.pushbuffer()
1842 self.ui.pushbuffer()
1838 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1843 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1839 match=matchfn, stat=True)
1844 match=matchfn, stat=True)
1840 self.ui.write((',\n "diffstat": "%s"')
1845 self.ui.write((',\n "diffstat": "%s"')
1841 % j(self.ui.popbuffer()))
1846 % j(self.ui.popbuffer()))
1842 if diff:
1847 if diff:
1843 self.ui.pushbuffer()
1848 self.ui.pushbuffer()
1844 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1849 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1845 match=matchfn, stat=False)
1850 match=matchfn, stat=False)
1846 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1851 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1847
1852
1848 self.ui.write("\n }")
1853 self.ui.write("\n }")
1849
1854
1850 class changeset_templater(changeset_printer):
1855 class changeset_templater(changeset_printer):
1851 '''format changeset information.'''
1856 '''format changeset information.'''
1852
1857
1853 # Arguments before "buffered" used to be positional. Consider not
1858 # Arguments before "buffered" used to be positional. Consider not
1854 # adding/removing arguments before "buffered" to not break callers.
1859 # adding/removing arguments before "buffered" to not break callers.
1855 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1860 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1856 buffered=False):
1861 buffered=False):
1857 diffopts = diffopts or {}
1862 diffopts = diffopts or {}
1858
1863
1859 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1864 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1860 self.t = formatter.loadtemplater(ui, tmplspec,
1865 self.t = formatter.loadtemplater(ui, tmplspec,
1861 cache=templatekw.defaulttempl)
1866 cache=templatekw.defaulttempl)
1862 self._counter = itertools.count()
1867 self._counter = itertools.count()
1863 self.cache = {}
1868 self.cache = {}
1864
1869
1865 self._tref = tmplspec.ref
1870 self._tref = tmplspec.ref
1866 self._parts = {'header': '', 'footer': '',
1871 self._parts = {'header': '', 'footer': '',
1867 tmplspec.ref: tmplspec.ref,
1872 tmplspec.ref: tmplspec.ref,
1868 'docheader': '', 'docfooter': '',
1873 'docheader': '', 'docfooter': '',
1869 'separator': ''}
1874 'separator': ''}
1870 if tmplspec.mapfile:
1875 if tmplspec.mapfile:
1871 # find correct templates for current mode, for backward
1876 # find correct templates for current mode, for backward
1872 # compatibility with 'log -v/-q/--debug' using a mapfile
1877 # compatibility with 'log -v/-q/--debug' using a mapfile
1873 tmplmodes = [
1878 tmplmodes = [
1874 (True, ''),
1879 (True, ''),
1875 (self.ui.verbose, '_verbose'),
1880 (self.ui.verbose, '_verbose'),
1876 (self.ui.quiet, '_quiet'),
1881 (self.ui.quiet, '_quiet'),
1877 (self.ui.debugflag, '_debug'),
1882 (self.ui.debugflag, '_debug'),
1878 ]
1883 ]
1879 for mode, postfix in tmplmodes:
1884 for mode, postfix in tmplmodes:
1880 for t in self._parts:
1885 for t in self._parts:
1881 cur = t + postfix
1886 cur = t + postfix
1882 if mode and cur in self.t:
1887 if mode and cur in self.t:
1883 self._parts[t] = cur
1888 self._parts[t] = cur
1884 else:
1889 else:
1885 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1890 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1886 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1891 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1887 self._parts.update(m)
1892 self._parts.update(m)
1888
1893
1889 if self._parts['docheader']:
1894 if self._parts['docheader']:
1890 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1895 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1891
1896
1892 def close(self):
1897 def close(self):
1893 if self._parts['docfooter']:
1898 if self._parts['docfooter']:
1894 if not self.footer:
1899 if not self.footer:
1895 self.footer = ""
1900 self.footer = ""
1896 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1901 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1897 return super(changeset_templater, self).close()
1902 return super(changeset_templater, self).close()
1898
1903
1899 def _show(self, ctx, copies, matchfn, props):
1904 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1900 '''show a single changeset or file revision'''
1905 '''show a single changeset or file revision'''
1901 props = props.copy()
1906 props = props.copy()
1902 props.update(templatekw.keywords)
1907 props.update(templatekw.keywords)
1903 props['templ'] = self.t
1908 props['templ'] = self.t
1904 props['ctx'] = ctx
1909 props['ctx'] = ctx
1905 props['repo'] = self.repo
1910 props['repo'] = self.repo
1906 props['ui'] = self.repo.ui
1911 props['ui'] = self.repo.ui
1907 props['index'] = index = next(self._counter)
1912 props['index'] = index = next(self._counter)
1908 props['revcache'] = {'copies': copies}
1913 props['revcache'] = {'copies': copies}
1909 props['cache'] = self.cache
1914 props['cache'] = self.cache
1910 props = pycompat.strkwargs(props)
1915 props = pycompat.strkwargs(props)
1911
1916
1912 # write separator, which wouldn't work well with the header part below
1917 # write separator, which wouldn't work well with the header part below
1913 # since there's inherently a conflict between header (across items) and
1918 # since there's inherently a conflict between header (across items) and
1914 # separator (per item)
1919 # separator (per item)
1915 if self._parts['separator'] and index > 0:
1920 if self._parts['separator'] and index > 0:
1916 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1921 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1917
1922
1918 # write header
1923 # write header
1919 if self._parts['header']:
1924 if self._parts['header']:
1920 h = templater.stringify(self.t(self._parts['header'], **props))
1925 h = templater.stringify(self.t(self._parts['header'], **props))
1921 if self.buffered:
1926 if self.buffered:
1922 self.header[ctx.rev()] = h
1927 self.header[ctx.rev()] = h
1923 else:
1928 else:
1924 if self.lastheader != h:
1929 if self.lastheader != h:
1925 self.lastheader = h
1930 self.lastheader = h
1926 self.ui.write(h)
1931 self.ui.write(h)
1927
1932
1928 # write changeset metadata, then patch if requested
1933 # write changeset metadata, then patch if requested
1929 key = self._parts[self._tref]
1934 key = self._parts[self._tref]
1930 self.ui.write(templater.stringify(self.t(key, **props)))
1935 self.ui.write(templater.stringify(self.t(key, **props)))
1931 self.showpatch(ctx, matchfn)
1936 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1932
1937
1933 if self._parts['footer']:
1938 if self._parts['footer']:
1934 if not self.footer:
1939 if not self.footer:
1935 self.footer = templater.stringify(
1940 self.footer = templater.stringify(
1936 self.t(self._parts['footer'], **props))
1941 self.t(self._parts['footer'], **props))
1937
1942
1938 def logtemplatespec(tmpl, mapfile):
1943 def logtemplatespec(tmpl, mapfile):
1939 if mapfile:
1944 if mapfile:
1940 return formatter.templatespec('changeset', tmpl, mapfile)
1945 return formatter.templatespec('changeset', tmpl, mapfile)
1941 else:
1946 else:
1942 return formatter.templatespec('', tmpl, None)
1947 return formatter.templatespec('', tmpl, None)
1943
1948
1944 def _lookuplogtemplate(ui, tmpl, style):
1949 def _lookuplogtemplate(ui, tmpl, style):
1945 """Find the template matching the given template spec or style
1950 """Find the template matching the given template spec or style
1946
1951
1947 See formatter.lookuptemplate() for details.
1952 See formatter.lookuptemplate() for details.
1948 """
1953 """
1949
1954
1950 # ui settings
1955 # ui settings
1951 if not tmpl and not style: # template are stronger than style
1956 if not tmpl and not style: # template are stronger than style
1952 tmpl = ui.config('ui', 'logtemplate')
1957 tmpl = ui.config('ui', 'logtemplate')
1953 if tmpl:
1958 if tmpl:
1954 return logtemplatespec(templater.unquotestring(tmpl), None)
1959 return logtemplatespec(templater.unquotestring(tmpl), None)
1955 else:
1960 else:
1956 style = util.expandpath(ui.config('ui', 'style'))
1961 style = util.expandpath(ui.config('ui', 'style'))
1957
1962
1958 if not tmpl and style:
1963 if not tmpl and style:
1959 mapfile = style
1964 mapfile = style
1960 if not os.path.split(mapfile)[0]:
1965 if not os.path.split(mapfile)[0]:
1961 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1966 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1962 or templater.templatepath(mapfile))
1967 or templater.templatepath(mapfile))
1963 if mapname:
1968 if mapname:
1964 mapfile = mapname
1969 mapfile = mapname
1965 return logtemplatespec(None, mapfile)
1970 return logtemplatespec(None, mapfile)
1966
1971
1967 if not tmpl:
1972 if not tmpl:
1968 return logtemplatespec(None, None)
1973 return logtemplatespec(None, None)
1969
1974
1970 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1975 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1971
1976
1972 def makelogtemplater(ui, repo, tmpl, buffered=False):
1977 def makelogtemplater(ui, repo, tmpl, buffered=False):
1973 """Create a changeset_templater from a literal template 'tmpl'"""
1978 """Create a changeset_templater from a literal template 'tmpl'"""
1974 spec = logtemplatespec(tmpl, None)
1979 spec = logtemplatespec(tmpl, None)
1975 return changeset_templater(ui, repo, spec, buffered=buffered)
1980 return changeset_templater(ui, repo, spec, buffered=buffered)
1976
1981
1977 def show_changeset(ui, repo, opts, buffered=False):
1982 def show_changeset(ui, repo, opts, buffered=False):
1978 """show one changeset using template or regular display.
1983 """show one changeset using template or regular display.
1979
1984
1980 Display format will be the first non-empty hit of:
1985 Display format will be the first non-empty hit of:
1981 1. option 'template'
1986 1. option 'template'
1982 2. option 'style'
1987 2. option 'style'
1983 3. [ui] setting 'logtemplate'
1988 3. [ui] setting 'logtemplate'
1984 4. [ui] setting 'style'
1989 4. [ui] setting 'style'
1985 If all of these values are either the unset or the empty string,
1990 If all of these values are either the unset or the empty string,
1986 regular display via changeset_printer() is done.
1991 regular display via changeset_printer() is done.
1987 """
1992 """
1988 # options
1993 # options
1989 match = None
1994 match = None
1990 if opts.get('patch') or opts.get('stat'):
1995 if opts.get('patch') or opts.get('stat'):
1991 match = scmutil.matchall(repo)
1996 match = scmutil.matchall(repo)
1992
1997
1993 if opts.get('template') == 'json':
1998 if opts.get('template') == 'json':
1994 return jsonchangeset(ui, repo, match, opts, buffered)
1999 return jsonchangeset(ui, repo, match, opts, buffered)
1995
2000
1996 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
2001 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1997
2002
1998 if not spec.ref and not spec.tmpl and not spec.mapfile:
2003 if not spec.ref and not spec.tmpl and not spec.mapfile:
1999 return changeset_printer(ui, repo, match, opts, buffered)
2004 return changeset_printer(ui, repo, match, opts, buffered)
2000
2005
2001 return changeset_templater(ui, repo, spec, match, opts, buffered)
2006 return changeset_templater(ui, repo, spec, match, opts, buffered)
2002
2007
2003 def showmarker(fm, marker, index=None):
2008 def showmarker(fm, marker, index=None):
2004 """utility function to display obsolescence marker in a readable way
2009 """utility function to display obsolescence marker in a readable way
2005
2010
2006 To be used by debug function."""
2011 To be used by debug function."""
2007 if index is not None:
2012 if index is not None:
2008 fm.write('index', '%i ', index)
2013 fm.write('index', '%i ', index)
2009 fm.write('prednode', '%s ', hex(marker.prednode()))
2014 fm.write('prednode', '%s ', hex(marker.prednode()))
2010 succs = marker.succnodes()
2015 succs = marker.succnodes()
2011 fm.condwrite(succs, 'succnodes', '%s ',
2016 fm.condwrite(succs, 'succnodes', '%s ',
2012 fm.formatlist(map(hex, succs), name='node'))
2017 fm.formatlist(map(hex, succs), name='node'))
2013 fm.write('flag', '%X ', marker.flags())
2018 fm.write('flag', '%X ', marker.flags())
2014 parents = marker.parentnodes()
2019 parents = marker.parentnodes()
2015 if parents is not None:
2020 if parents is not None:
2016 fm.write('parentnodes', '{%s} ',
2021 fm.write('parentnodes', '{%s} ',
2017 fm.formatlist(map(hex, parents), name='node', sep=', '))
2022 fm.formatlist(map(hex, parents), name='node', sep=', '))
2018 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2023 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2019 meta = marker.metadata().copy()
2024 meta = marker.metadata().copy()
2020 meta.pop('date', None)
2025 meta.pop('date', None)
2021 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2026 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2022 fm.plain('\n')
2027 fm.plain('\n')
2023
2028
2024 def finddate(ui, repo, date):
2029 def finddate(ui, repo, date):
2025 """Find the tipmost changeset that matches the given date spec"""
2030 """Find the tipmost changeset that matches the given date spec"""
2026
2031
2027 df = util.matchdate(date)
2032 df = util.matchdate(date)
2028 m = scmutil.matchall(repo)
2033 m = scmutil.matchall(repo)
2029 results = {}
2034 results = {}
2030
2035
2031 def prep(ctx, fns):
2036 def prep(ctx, fns):
2032 d = ctx.date()
2037 d = ctx.date()
2033 if df(d[0]):
2038 if df(d[0]):
2034 results[ctx.rev()] = d
2039 results[ctx.rev()] = d
2035
2040
2036 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2041 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2037 rev = ctx.rev()
2042 rev = ctx.rev()
2038 if rev in results:
2043 if rev in results:
2039 ui.status(_("found revision %s from %s\n") %
2044 ui.status(_("found revision %s from %s\n") %
2040 (rev, util.datestr(results[rev])))
2045 (rev, util.datestr(results[rev])))
2041 return '%d' % rev
2046 return '%d' % rev
2042
2047
2043 raise error.Abort(_("revision matching date not found"))
2048 raise error.Abort(_("revision matching date not found"))
2044
2049
2045 def increasingwindows(windowsize=8, sizelimit=512):
2050 def increasingwindows(windowsize=8, sizelimit=512):
2046 while True:
2051 while True:
2047 yield windowsize
2052 yield windowsize
2048 if windowsize < sizelimit:
2053 if windowsize < sizelimit:
2049 windowsize *= 2
2054 windowsize *= 2
2050
2055
2051 class FileWalkError(Exception):
2056 class FileWalkError(Exception):
2052 pass
2057 pass
2053
2058
2054 def walkfilerevs(repo, match, follow, revs, fncache):
2059 def walkfilerevs(repo, match, follow, revs, fncache):
2055 '''Walks the file history for the matched files.
2060 '''Walks the file history for the matched files.
2056
2061
2057 Returns the changeset revs that are involved in the file history.
2062 Returns the changeset revs that are involved in the file history.
2058
2063
2059 Throws FileWalkError if the file history can't be walked using
2064 Throws FileWalkError if the file history can't be walked using
2060 filelogs alone.
2065 filelogs alone.
2061 '''
2066 '''
2062 wanted = set()
2067 wanted = set()
2063 copies = []
2068 copies = []
2064 minrev, maxrev = min(revs), max(revs)
2069 minrev, maxrev = min(revs), max(revs)
2065 def filerevgen(filelog, last):
2070 def filerevgen(filelog, last):
2066 """
2071 """
2067 Only files, no patterns. Check the history of each file.
2072 Only files, no patterns. Check the history of each file.
2068
2073
2069 Examines filelog entries within minrev, maxrev linkrev range
2074 Examines filelog entries within minrev, maxrev linkrev range
2070 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2075 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2071 tuples in backwards order
2076 tuples in backwards order
2072 """
2077 """
2073 cl_count = len(repo)
2078 cl_count = len(repo)
2074 revs = []
2079 revs = []
2075 for j in xrange(0, last + 1):
2080 for j in xrange(0, last + 1):
2076 linkrev = filelog.linkrev(j)
2081 linkrev = filelog.linkrev(j)
2077 if linkrev < minrev:
2082 if linkrev < minrev:
2078 continue
2083 continue
2079 # only yield rev for which we have the changelog, it can
2084 # only yield rev for which we have the changelog, it can
2080 # happen while doing "hg log" during a pull or commit
2085 # happen while doing "hg log" during a pull or commit
2081 if linkrev >= cl_count:
2086 if linkrev >= cl_count:
2082 break
2087 break
2083
2088
2084 parentlinkrevs = []
2089 parentlinkrevs = []
2085 for p in filelog.parentrevs(j):
2090 for p in filelog.parentrevs(j):
2086 if p != nullrev:
2091 if p != nullrev:
2087 parentlinkrevs.append(filelog.linkrev(p))
2092 parentlinkrevs.append(filelog.linkrev(p))
2088 n = filelog.node(j)
2093 n = filelog.node(j)
2089 revs.append((linkrev, parentlinkrevs,
2094 revs.append((linkrev, parentlinkrevs,
2090 follow and filelog.renamed(n)))
2095 follow and filelog.renamed(n)))
2091
2096
2092 return reversed(revs)
2097 return reversed(revs)
2093 def iterfiles():
2098 def iterfiles():
2094 pctx = repo['.']
2099 pctx = repo['.']
2095 for filename in match.files():
2100 for filename in match.files():
2096 if follow:
2101 if follow:
2097 if filename not in pctx:
2102 if filename not in pctx:
2098 raise error.Abort(_('cannot follow file not in parent '
2103 raise error.Abort(_('cannot follow file not in parent '
2099 'revision: "%s"') % filename)
2104 'revision: "%s"') % filename)
2100 yield filename, pctx[filename].filenode()
2105 yield filename, pctx[filename].filenode()
2101 else:
2106 else:
2102 yield filename, None
2107 yield filename, None
2103 for filename_node in copies:
2108 for filename_node in copies:
2104 yield filename_node
2109 yield filename_node
2105
2110
2106 for file_, node in iterfiles():
2111 for file_, node in iterfiles():
2107 filelog = repo.file(file_)
2112 filelog = repo.file(file_)
2108 if not len(filelog):
2113 if not len(filelog):
2109 if node is None:
2114 if node is None:
2110 # A zero count may be a directory or deleted file, so
2115 # A zero count may be a directory or deleted file, so
2111 # try to find matching entries on the slow path.
2116 # try to find matching entries on the slow path.
2112 if follow:
2117 if follow:
2113 raise error.Abort(
2118 raise error.Abort(
2114 _('cannot follow nonexistent file: "%s"') % file_)
2119 _('cannot follow nonexistent file: "%s"') % file_)
2115 raise FileWalkError("Cannot walk via filelog")
2120 raise FileWalkError("Cannot walk via filelog")
2116 else:
2121 else:
2117 continue
2122 continue
2118
2123
2119 if node is None:
2124 if node is None:
2120 last = len(filelog) - 1
2125 last = len(filelog) - 1
2121 else:
2126 else:
2122 last = filelog.rev(node)
2127 last = filelog.rev(node)
2123
2128
2124 # keep track of all ancestors of the file
2129 # keep track of all ancestors of the file
2125 ancestors = {filelog.linkrev(last)}
2130 ancestors = {filelog.linkrev(last)}
2126
2131
2127 # iterate from latest to oldest revision
2132 # iterate from latest to oldest revision
2128 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2133 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2129 if not follow:
2134 if not follow:
2130 if rev > maxrev:
2135 if rev > maxrev:
2131 continue
2136 continue
2132 else:
2137 else:
2133 # Note that last might not be the first interesting
2138 # Note that last might not be the first interesting
2134 # rev to us:
2139 # rev to us:
2135 # if the file has been changed after maxrev, we'll
2140 # if the file has been changed after maxrev, we'll
2136 # have linkrev(last) > maxrev, and we still need
2141 # have linkrev(last) > maxrev, and we still need
2137 # to explore the file graph
2142 # to explore the file graph
2138 if rev not in ancestors:
2143 if rev not in ancestors:
2139 continue
2144 continue
2140 # XXX insert 1327 fix here
2145 # XXX insert 1327 fix here
2141 if flparentlinkrevs:
2146 if flparentlinkrevs:
2142 ancestors.update(flparentlinkrevs)
2147 ancestors.update(flparentlinkrevs)
2143
2148
2144 fncache.setdefault(rev, []).append(file_)
2149 fncache.setdefault(rev, []).append(file_)
2145 wanted.add(rev)
2150 wanted.add(rev)
2146 if copied:
2151 if copied:
2147 copies.append(copied)
2152 copies.append(copied)
2148
2153
2149 return wanted
2154 return wanted
2150
2155
2151 class _followfilter(object):
2156 class _followfilter(object):
2152 def __init__(self, repo, onlyfirst=False):
2157 def __init__(self, repo, onlyfirst=False):
2153 self.repo = repo
2158 self.repo = repo
2154 self.startrev = nullrev
2159 self.startrev = nullrev
2155 self.roots = set()
2160 self.roots = set()
2156 self.onlyfirst = onlyfirst
2161 self.onlyfirst = onlyfirst
2157
2162
2158 def match(self, rev):
2163 def match(self, rev):
2159 def realparents(rev):
2164 def realparents(rev):
2160 if self.onlyfirst:
2165 if self.onlyfirst:
2161 return self.repo.changelog.parentrevs(rev)[0:1]
2166 return self.repo.changelog.parentrevs(rev)[0:1]
2162 else:
2167 else:
2163 return filter(lambda x: x != nullrev,
2168 return filter(lambda x: x != nullrev,
2164 self.repo.changelog.parentrevs(rev))
2169 self.repo.changelog.parentrevs(rev))
2165
2170
2166 if self.startrev == nullrev:
2171 if self.startrev == nullrev:
2167 self.startrev = rev
2172 self.startrev = rev
2168 return True
2173 return True
2169
2174
2170 if rev > self.startrev:
2175 if rev > self.startrev:
2171 # forward: all descendants
2176 # forward: all descendants
2172 if not self.roots:
2177 if not self.roots:
2173 self.roots.add(self.startrev)
2178 self.roots.add(self.startrev)
2174 for parent in realparents(rev):
2179 for parent in realparents(rev):
2175 if parent in self.roots:
2180 if parent in self.roots:
2176 self.roots.add(rev)
2181 self.roots.add(rev)
2177 return True
2182 return True
2178 else:
2183 else:
2179 # backwards: all parents
2184 # backwards: all parents
2180 if not self.roots:
2185 if not self.roots:
2181 self.roots.update(realparents(self.startrev))
2186 self.roots.update(realparents(self.startrev))
2182 if rev in self.roots:
2187 if rev in self.roots:
2183 self.roots.remove(rev)
2188 self.roots.remove(rev)
2184 self.roots.update(realparents(rev))
2189 self.roots.update(realparents(rev))
2185 return True
2190 return True
2186
2191
2187 return False
2192 return False
2188
2193
2189 def walkchangerevs(repo, match, opts, prepare):
2194 def walkchangerevs(repo, match, opts, prepare):
2190 '''Iterate over files and the revs in which they changed.
2195 '''Iterate over files and the revs in which they changed.
2191
2196
2192 Callers most commonly need to iterate backwards over the history
2197 Callers most commonly need to iterate backwards over the history
2193 in which they are interested. Doing so has awful (quadratic-looking)
2198 in which they are interested. Doing so has awful (quadratic-looking)
2194 performance, so we use iterators in a "windowed" way.
2199 performance, so we use iterators in a "windowed" way.
2195
2200
2196 We walk a window of revisions in the desired order. Within the
2201 We walk a window of revisions in the desired order. Within the
2197 window, we first walk forwards to gather data, then in the desired
2202 window, we first walk forwards to gather data, then in the desired
2198 order (usually backwards) to display it.
2203 order (usually backwards) to display it.
2199
2204
2200 This function returns an iterator yielding contexts. Before
2205 This function returns an iterator yielding contexts. Before
2201 yielding each context, the iterator will first call the prepare
2206 yielding each context, the iterator will first call the prepare
2202 function on each context in the window in forward order.'''
2207 function on each context in the window in forward order.'''
2203
2208
2204 follow = opts.get('follow') or opts.get('follow_first')
2209 follow = opts.get('follow') or opts.get('follow_first')
2205 revs = _logrevs(repo, opts)
2210 revs = _logrevs(repo, opts)
2206 if not revs:
2211 if not revs:
2207 return []
2212 return []
2208 wanted = set()
2213 wanted = set()
2209 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2214 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2210 opts.get('removed'))
2215 opts.get('removed'))
2211 fncache = {}
2216 fncache = {}
2212 change = repo.changectx
2217 change = repo.changectx
2213
2218
2214 # First step is to fill wanted, the set of revisions that we want to yield.
2219 # First step is to fill wanted, the set of revisions that we want to yield.
2215 # When it does not induce extra cost, we also fill fncache for revisions in
2220 # When it does not induce extra cost, we also fill fncache for revisions in
2216 # wanted: a cache of filenames that were changed (ctx.files()) and that
2221 # wanted: a cache of filenames that were changed (ctx.files()) and that
2217 # match the file filtering conditions.
2222 # match the file filtering conditions.
2218
2223
2219 if match.always():
2224 if match.always():
2220 # No files, no patterns. Display all revs.
2225 # No files, no patterns. Display all revs.
2221 wanted = revs
2226 wanted = revs
2222 elif not slowpath:
2227 elif not slowpath:
2223 # We only have to read through the filelog to find wanted revisions
2228 # We only have to read through the filelog to find wanted revisions
2224
2229
2225 try:
2230 try:
2226 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2231 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2227 except FileWalkError:
2232 except FileWalkError:
2228 slowpath = True
2233 slowpath = True
2229
2234
2230 # We decided to fall back to the slowpath because at least one
2235 # We decided to fall back to the slowpath because at least one
2231 # of the paths was not a file. Check to see if at least one of them
2236 # of the paths was not a file. Check to see if at least one of them
2232 # existed in history, otherwise simply return
2237 # existed in history, otherwise simply return
2233 for path in match.files():
2238 for path in match.files():
2234 if path == '.' or path in repo.store:
2239 if path == '.' or path in repo.store:
2235 break
2240 break
2236 else:
2241 else:
2237 return []
2242 return []
2238
2243
2239 if slowpath:
2244 if slowpath:
2240 # We have to read the changelog to match filenames against
2245 # We have to read the changelog to match filenames against
2241 # changed files
2246 # changed files
2242
2247
2243 if follow:
2248 if follow:
2244 raise error.Abort(_('can only follow copies/renames for explicit '
2249 raise error.Abort(_('can only follow copies/renames for explicit '
2245 'filenames'))
2250 'filenames'))
2246
2251
2247 # The slow path checks files modified in every changeset.
2252 # The slow path checks files modified in every changeset.
2248 # This is really slow on large repos, so compute the set lazily.
2253 # This is really slow on large repos, so compute the set lazily.
2249 class lazywantedset(object):
2254 class lazywantedset(object):
2250 def __init__(self):
2255 def __init__(self):
2251 self.set = set()
2256 self.set = set()
2252 self.revs = set(revs)
2257 self.revs = set(revs)
2253
2258
2254 # No need to worry about locality here because it will be accessed
2259 # No need to worry about locality here because it will be accessed
2255 # in the same order as the increasing window below.
2260 # in the same order as the increasing window below.
2256 def __contains__(self, value):
2261 def __contains__(self, value):
2257 if value in self.set:
2262 if value in self.set:
2258 return True
2263 return True
2259 elif not value in self.revs:
2264 elif not value in self.revs:
2260 return False
2265 return False
2261 else:
2266 else:
2262 self.revs.discard(value)
2267 self.revs.discard(value)
2263 ctx = change(value)
2268 ctx = change(value)
2264 matches = filter(match, ctx.files())
2269 matches = filter(match, ctx.files())
2265 if matches:
2270 if matches:
2266 fncache[value] = matches
2271 fncache[value] = matches
2267 self.set.add(value)
2272 self.set.add(value)
2268 return True
2273 return True
2269 return False
2274 return False
2270
2275
2271 def discard(self, value):
2276 def discard(self, value):
2272 self.revs.discard(value)
2277 self.revs.discard(value)
2273 self.set.discard(value)
2278 self.set.discard(value)
2274
2279
2275 wanted = lazywantedset()
2280 wanted = lazywantedset()
2276
2281
2277 # it might be worthwhile to do this in the iterator if the rev range
2282 # it might be worthwhile to do this in the iterator if the rev range
2278 # is descending and the prune args are all within that range
2283 # is descending and the prune args are all within that range
2279 for rev in opts.get('prune', ()):
2284 for rev in opts.get('prune', ()):
2280 rev = repo[rev].rev()
2285 rev = repo[rev].rev()
2281 ff = _followfilter(repo)
2286 ff = _followfilter(repo)
2282 stop = min(revs[0], revs[-1])
2287 stop = min(revs[0], revs[-1])
2283 for x in xrange(rev, stop - 1, -1):
2288 for x in xrange(rev, stop - 1, -1):
2284 if ff.match(x):
2289 if ff.match(x):
2285 wanted = wanted - [x]
2290 wanted = wanted - [x]
2286
2291
2287 # Now that wanted is correctly initialized, we can iterate over the
2292 # Now that wanted is correctly initialized, we can iterate over the
2288 # revision range, yielding only revisions in wanted.
2293 # revision range, yielding only revisions in wanted.
2289 def iterate():
2294 def iterate():
2290 if follow and match.always():
2295 if follow and match.always():
2291 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2296 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2292 def want(rev):
2297 def want(rev):
2293 return ff.match(rev) and rev in wanted
2298 return ff.match(rev) and rev in wanted
2294 else:
2299 else:
2295 def want(rev):
2300 def want(rev):
2296 return rev in wanted
2301 return rev in wanted
2297
2302
2298 it = iter(revs)
2303 it = iter(revs)
2299 stopiteration = False
2304 stopiteration = False
2300 for windowsize in increasingwindows():
2305 for windowsize in increasingwindows():
2301 nrevs = []
2306 nrevs = []
2302 for i in xrange(windowsize):
2307 for i in xrange(windowsize):
2303 rev = next(it, None)
2308 rev = next(it, None)
2304 if rev is None:
2309 if rev is None:
2305 stopiteration = True
2310 stopiteration = True
2306 break
2311 break
2307 elif want(rev):
2312 elif want(rev):
2308 nrevs.append(rev)
2313 nrevs.append(rev)
2309 for rev in sorted(nrevs):
2314 for rev in sorted(nrevs):
2310 fns = fncache.get(rev)
2315 fns = fncache.get(rev)
2311 ctx = change(rev)
2316 ctx = change(rev)
2312 if not fns:
2317 if not fns:
2313 def fns_generator():
2318 def fns_generator():
2314 for f in ctx.files():
2319 for f in ctx.files():
2315 if match(f):
2320 if match(f):
2316 yield f
2321 yield f
2317 fns = fns_generator()
2322 fns = fns_generator()
2318 prepare(ctx, fns)
2323 prepare(ctx, fns)
2319 for rev in nrevs:
2324 for rev in nrevs:
2320 yield change(rev)
2325 yield change(rev)
2321
2326
2322 if stopiteration:
2327 if stopiteration:
2323 break
2328 break
2324
2329
2325 return iterate()
2330 return iterate()
2326
2331
2327 def _makefollowlogfilematcher(repo, files, followfirst):
2332 def _makefollowlogfilematcher(repo, files, followfirst):
2328 # When displaying a revision with --patch --follow FILE, we have
2333 # When displaying a revision with --patch --follow FILE, we have
2329 # to know which file of the revision must be diffed. With
2334 # to know which file of the revision must be diffed. With
2330 # --follow, we want the names of the ancestors of FILE in the
2335 # --follow, we want the names of the ancestors of FILE in the
2331 # revision, stored in "fcache". "fcache" is populated by
2336 # revision, stored in "fcache". "fcache" is populated by
2332 # reproducing the graph traversal already done by --follow revset
2337 # reproducing the graph traversal already done by --follow revset
2333 # and relating revs to file names (which is not "correct" but
2338 # and relating revs to file names (which is not "correct" but
2334 # good enough).
2339 # good enough).
2335 fcache = {}
2340 fcache = {}
2336 fcacheready = [False]
2341 fcacheready = [False]
2337 pctx = repo['.']
2342 pctx = repo['.']
2338
2343
2339 def populate():
2344 def populate():
2340 for fn in files:
2345 for fn in files:
2341 fctx = pctx[fn]
2346 fctx = pctx[fn]
2342 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2347 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2343 for c in fctx.ancestors(followfirst=followfirst):
2348 for c in fctx.ancestors(followfirst=followfirst):
2344 fcache.setdefault(c.rev(), set()).add(c.path())
2349 fcache.setdefault(c.rev(), set()).add(c.path())
2345
2350
2346 def filematcher(rev):
2351 def filematcher(rev):
2347 if not fcacheready[0]:
2352 if not fcacheready[0]:
2348 # Lazy initialization
2353 # Lazy initialization
2349 fcacheready[0] = True
2354 fcacheready[0] = True
2350 populate()
2355 populate()
2351 return scmutil.matchfiles(repo, fcache.get(rev, []))
2356 return scmutil.matchfiles(repo, fcache.get(rev, []))
2352
2357
2353 return filematcher
2358 return filematcher
2354
2359
2355 def _makenofollowlogfilematcher(repo, pats, opts):
2360 def _makenofollowlogfilematcher(repo, pats, opts):
2356 '''hook for extensions to override the filematcher for non-follow cases'''
2361 '''hook for extensions to override the filematcher for non-follow cases'''
2357 return None
2362 return None
2358
2363
2359 def _makelogrevset(repo, pats, opts, revs):
2364 def _makelogrevset(repo, pats, opts, revs):
2360 """Return (expr, filematcher) where expr is a revset string built
2365 """Return (expr, filematcher) where expr is a revset string built
2361 from log options and file patterns or None. If --stat or --patch
2366 from log options and file patterns or None. If --stat or --patch
2362 are not passed filematcher is None. Otherwise it is a callable
2367 are not passed filematcher is None. Otherwise it is a callable
2363 taking a revision number and returning a match objects filtering
2368 taking a revision number and returning a match objects filtering
2364 the files to be detailed when displaying the revision.
2369 the files to be detailed when displaying the revision.
2365 """
2370 """
2366 opt2revset = {
2371 opt2revset = {
2367 'no_merges': ('not merge()', None),
2372 'no_merges': ('not merge()', None),
2368 'only_merges': ('merge()', None),
2373 'only_merges': ('merge()', None),
2369 '_ancestors': ('ancestors(%(val)s)', None),
2374 '_ancestors': ('ancestors(%(val)s)', None),
2370 '_fancestors': ('_firstancestors(%(val)s)', None),
2375 '_fancestors': ('_firstancestors(%(val)s)', None),
2371 '_descendants': ('descendants(%(val)s)', None),
2376 '_descendants': ('descendants(%(val)s)', None),
2372 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2377 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2373 '_matchfiles': ('_matchfiles(%(val)s)', None),
2378 '_matchfiles': ('_matchfiles(%(val)s)', None),
2374 'date': ('date(%(val)r)', None),
2379 'date': ('date(%(val)r)', None),
2375 'branch': ('branch(%(val)r)', ' or '),
2380 'branch': ('branch(%(val)r)', ' or '),
2376 '_patslog': ('filelog(%(val)r)', ' or '),
2381 '_patslog': ('filelog(%(val)r)', ' or '),
2377 '_patsfollow': ('follow(%(val)r)', ' or '),
2382 '_patsfollow': ('follow(%(val)r)', ' or '),
2378 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2383 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2379 'keyword': ('keyword(%(val)r)', ' or '),
2384 'keyword': ('keyword(%(val)r)', ' or '),
2380 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2385 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2381 'user': ('user(%(val)r)', ' or '),
2386 'user': ('user(%(val)r)', ' or '),
2382 }
2387 }
2383
2388
2384 opts = dict(opts)
2389 opts = dict(opts)
2385 # follow or not follow?
2390 # follow or not follow?
2386 follow = opts.get('follow') or opts.get('follow_first')
2391 follow = opts.get('follow') or opts.get('follow_first')
2387 if opts.get('follow_first'):
2392 if opts.get('follow_first'):
2388 followfirst = 1
2393 followfirst = 1
2389 else:
2394 else:
2390 followfirst = 0
2395 followfirst = 0
2391 # --follow with FILE behavior depends on revs...
2396 # --follow with FILE behavior depends on revs...
2392 it = iter(revs)
2397 it = iter(revs)
2393 startrev = next(it)
2398 startrev = next(it)
2394 followdescendants = startrev < next(it, startrev)
2399 followdescendants = startrev < next(it, startrev)
2395
2400
2396 # branch and only_branch are really aliases and must be handled at
2401 # branch and only_branch are really aliases and must be handled at
2397 # the same time
2402 # the same time
2398 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2403 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2399 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2404 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2400 # pats/include/exclude are passed to match.match() directly in
2405 # pats/include/exclude are passed to match.match() directly in
2401 # _matchfiles() revset but walkchangerevs() builds its matcher with
2406 # _matchfiles() revset but walkchangerevs() builds its matcher with
2402 # scmutil.match(). The difference is input pats are globbed on
2407 # scmutil.match(). The difference is input pats are globbed on
2403 # platforms without shell expansion (windows).
2408 # platforms without shell expansion (windows).
2404 wctx = repo[None]
2409 wctx = repo[None]
2405 match, pats = scmutil.matchandpats(wctx, pats, opts)
2410 match, pats = scmutil.matchandpats(wctx, pats, opts)
2406 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2411 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2407 opts.get('removed'))
2412 opts.get('removed'))
2408 if not slowpath:
2413 if not slowpath:
2409 for f in match.files():
2414 for f in match.files():
2410 if follow and f not in wctx:
2415 if follow and f not in wctx:
2411 # If the file exists, it may be a directory, so let it
2416 # If the file exists, it may be a directory, so let it
2412 # take the slow path.
2417 # take the slow path.
2413 if os.path.exists(repo.wjoin(f)):
2418 if os.path.exists(repo.wjoin(f)):
2414 slowpath = True
2419 slowpath = True
2415 continue
2420 continue
2416 else:
2421 else:
2417 raise error.Abort(_('cannot follow file not in parent '
2422 raise error.Abort(_('cannot follow file not in parent '
2418 'revision: "%s"') % f)
2423 'revision: "%s"') % f)
2419 filelog = repo.file(f)
2424 filelog = repo.file(f)
2420 if not filelog:
2425 if not filelog:
2421 # A zero count may be a directory or deleted file, so
2426 # A zero count may be a directory or deleted file, so
2422 # try to find matching entries on the slow path.
2427 # try to find matching entries on the slow path.
2423 if follow:
2428 if follow:
2424 raise error.Abort(
2429 raise error.Abort(
2425 _('cannot follow nonexistent file: "%s"') % f)
2430 _('cannot follow nonexistent file: "%s"') % f)
2426 slowpath = True
2431 slowpath = True
2427
2432
2428 # We decided to fall back to the slowpath because at least one
2433 # We decided to fall back to the slowpath because at least one
2429 # of the paths was not a file. Check to see if at least one of them
2434 # of the paths was not a file. Check to see if at least one of them
2430 # existed in history - in that case, we'll continue down the
2435 # existed in history - in that case, we'll continue down the
2431 # slowpath; otherwise, we can turn off the slowpath
2436 # slowpath; otherwise, we can turn off the slowpath
2432 if slowpath:
2437 if slowpath:
2433 for path in match.files():
2438 for path in match.files():
2434 if path == '.' or path in repo.store:
2439 if path == '.' or path in repo.store:
2435 break
2440 break
2436 else:
2441 else:
2437 slowpath = False
2442 slowpath = False
2438
2443
2439 fpats = ('_patsfollow', '_patsfollowfirst')
2444 fpats = ('_patsfollow', '_patsfollowfirst')
2440 fnopats = (('_ancestors', '_fancestors'),
2445 fnopats = (('_ancestors', '_fancestors'),
2441 ('_descendants', '_fdescendants'))
2446 ('_descendants', '_fdescendants'))
2442 if slowpath:
2447 if slowpath:
2443 # See walkchangerevs() slow path.
2448 # See walkchangerevs() slow path.
2444 #
2449 #
2445 # pats/include/exclude cannot be represented as separate
2450 # pats/include/exclude cannot be represented as separate
2446 # revset expressions as their filtering logic applies at file
2451 # revset expressions as their filtering logic applies at file
2447 # level. For instance "-I a -X a" matches a revision touching
2452 # level. For instance "-I a -X a" matches a revision touching
2448 # "a" and "b" while "file(a) and not file(b)" does
2453 # "a" and "b" while "file(a) and not file(b)" does
2449 # not. Besides, filesets are evaluated against the working
2454 # not. Besides, filesets are evaluated against the working
2450 # directory.
2455 # directory.
2451 matchargs = ['r:', 'd:relpath']
2456 matchargs = ['r:', 'd:relpath']
2452 for p in pats:
2457 for p in pats:
2453 matchargs.append('p:' + p)
2458 matchargs.append('p:' + p)
2454 for p in opts.get('include', []):
2459 for p in opts.get('include', []):
2455 matchargs.append('i:' + p)
2460 matchargs.append('i:' + p)
2456 for p in opts.get('exclude', []):
2461 for p in opts.get('exclude', []):
2457 matchargs.append('x:' + p)
2462 matchargs.append('x:' + p)
2458 matchargs = ','.join(('%r' % p) for p in matchargs)
2463 matchargs = ','.join(('%r' % p) for p in matchargs)
2459 opts['_matchfiles'] = matchargs
2464 opts['_matchfiles'] = matchargs
2460 if follow:
2465 if follow:
2461 opts[fnopats[0][followfirst]] = '.'
2466 opts[fnopats[0][followfirst]] = '.'
2462 else:
2467 else:
2463 if follow:
2468 if follow:
2464 if pats:
2469 if pats:
2465 # follow() revset interprets its file argument as a
2470 # follow() revset interprets its file argument as a
2466 # manifest entry, so use match.files(), not pats.
2471 # manifest entry, so use match.files(), not pats.
2467 opts[fpats[followfirst]] = list(match.files())
2472 opts[fpats[followfirst]] = list(match.files())
2468 else:
2473 else:
2469 op = fnopats[followdescendants][followfirst]
2474 op = fnopats[followdescendants][followfirst]
2470 opts[op] = 'rev(%d)' % startrev
2475 opts[op] = 'rev(%d)' % startrev
2471 else:
2476 else:
2472 opts['_patslog'] = list(pats)
2477 opts['_patslog'] = list(pats)
2473
2478
2474 filematcher = None
2479 filematcher = None
2475 if opts.get('patch') or opts.get('stat'):
2480 if opts.get('patch') or opts.get('stat'):
2476 # When following files, track renames via a special matcher.
2481 # When following files, track renames via a special matcher.
2477 # If we're forced to take the slowpath it means we're following
2482 # If we're forced to take the slowpath it means we're following
2478 # at least one pattern/directory, so don't bother with rename tracking.
2483 # at least one pattern/directory, so don't bother with rename tracking.
2479 if follow and not match.always() and not slowpath:
2484 if follow and not match.always() and not slowpath:
2480 # _makefollowlogfilematcher expects its files argument to be
2485 # _makefollowlogfilematcher expects its files argument to be
2481 # relative to the repo root, so use match.files(), not pats.
2486 # relative to the repo root, so use match.files(), not pats.
2482 filematcher = _makefollowlogfilematcher(repo, match.files(),
2487 filematcher = _makefollowlogfilematcher(repo, match.files(),
2483 followfirst)
2488 followfirst)
2484 else:
2489 else:
2485 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2490 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2486 if filematcher is None:
2491 if filematcher is None:
2487 filematcher = lambda rev: match
2492 filematcher = lambda rev: match
2488
2493
2489 expr = []
2494 expr = []
2490 for op, val in sorted(opts.iteritems()):
2495 for op, val in sorted(opts.iteritems()):
2491 if not val:
2496 if not val:
2492 continue
2497 continue
2493 if op not in opt2revset:
2498 if op not in opt2revset:
2494 continue
2499 continue
2495 revop, andor = opt2revset[op]
2500 revop, andor = opt2revset[op]
2496 if '%(val)' not in revop:
2501 if '%(val)' not in revop:
2497 expr.append(revop)
2502 expr.append(revop)
2498 else:
2503 else:
2499 if not isinstance(val, list):
2504 if not isinstance(val, list):
2500 e = revop % {'val': val}
2505 e = revop % {'val': val}
2501 else:
2506 else:
2502 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2507 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2503 expr.append(e)
2508 expr.append(e)
2504
2509
2505 if expr:
2510 if expr:
2506 expr = '(' + ' and '.join(expr) + ')'
2511 expr = '(' + ' and '.join(expr) + ')'
2507 else:
2512 else:
2508 expr = None
2513 expr = None
2509 return expr, filematcher
2514 return expr, filematcher
2510
2515
2511 def _logrevs(repo, opts):
2516 def _logrevs(repo, opts):
2512 # Default --rev value depends on --follow but --follow behavior
2517 # Default --rev value depends on --follow but --follow behavior
2513 # depends on revisions resolved from --rev...
2518 # depends on revisions resolved from --rev...
2514 follow = opts.get('follow') or opts.get('follow_first')
2519 follow = opts.get('follow') or opts.get('follow_first')
2515 if opts.get('rev'):
2520 if opts.get('rev'):
2516 revs = scmutil.revrange(repo, opts['rev'])
2521 revs = scmutil.revrange(repo, opts['rev'])
2517 elif follow and repo.dirstate.p1() == nullid:
2522 elif follow and repo.dirstate.p1() == nullid:
2518 revs = smartset.baseset()
2523 revs = smartset.baseset()
2519 elif follow:
2524 elif follow:
2520 revs = repo.revs('reverse(:.)')
2525 revs = repo.revs('reverse(:.)')
2521 else:
2526 else:
2522 revs = smartset.spanset(repo)
2527 revs = smartset.spanset(repo)
2523 revs.reverse()
2528 revs.reverse()
2524 return revs
2529 return revs
2525
2530
2526 def getgraphlogrevs(repo, pats, opts):
2531 def getgraphlogrevs(repo, pats, opts):
2527 """Return (revs, expr, filematcher) where revs is an iterable of
2532 """Return (revs, expr, filematcher) where revs is an iterable of
2528 revision numbers, expr is a revset string built from log options
2533 revision numbers, expr is a revset string built from log options
2529 and file patterns or None, and used to filter 'revs'. If --stat or
2534 and file patterns or None, and used to filter 'revs'. If --stat or
2530 --patch are not passed filematcher is None. Otherwise it is a
2535 --patch are not passed filematcher is None. Otherwise it is a
2531 callable taking a revision number and returning a match objects
2536 callable taking a revision number and returning a match objects
2532 filtering the files to be detailed when displaying the revision.
2537 filtering the files to be detailed when displaying the revision.
2533 """
2538 """
2534 limit = loglimit(opts)
2539 limit = loglimit(opts)
2535 revs = _logrevs(repo, opts)
2540 revs = _logrevs(repo, opts)
2536 if not revs:
2541 if not revs:
2537 return smartset.baseset(), None, None
2542 return smartset.baseset(), None, None
2538 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2543 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2539 if opts.get('rev'):
2544 if opts.get('rev'):
2540 # User-specified revs might be unsorted, but don't sort before
2545 # User-specified revs might be unsorted, but don't sort before
2541 # _makelogrevset because it might depend on the order of revs
2546 # _makelogrevset because it might depend on the order of revs
2542 if not (revs.isdescending() or revs.istopo()):
2547 if not (revs.isdescending() or revs.istopo()):
2543 revs.sort(reverse=True)
2548 revs.sort(reverse=True)
2544 if expr:
2549 if expr:
2545 matcher = revset.match(repo.ui, expr)
2550 matcher = revset.match(repo.ui, expr)
2546 revs = matcher(repo, revs)
2551 revs = matcher(repo, revs)
2547 if limit is not None:
2552 if limit is not None:
2548 limitedrevs = []
2553 limitedrevs = []
2549 for idx, rev in enumerate(revs):
2554 for idx, rev in enumerate(revs):
2550 if idx >= limit:
2555 if idx >= limit:
2551 break
2556 break
2552 limitedrevs.append(rev)
2557 limitedrevs.append(rev)
2553 revs = smartset.baseset(limitedrevs)
2558 revs = smartset.baseset(limitedrevs)
2554
2559
2555 return revs, expr, filematcher
2560 return revs, expr, filematcher
2556
2561
2557 def getlogrevs(repo, pats, opts):
2562 def getlogrevs(repo, pats, opts):
2558 """Return (revs, expr, filematcher) where revs is an iterable of
2563 """Return (revs, expr, filematcher) where revs is an iterable of
2559 revision numbers, expr is a revset string built from log options
2564 revision numbers, expr is a revset string built from log options
2560 and file patterns or None, and used to filter 'revs'. If --stat or
2565 and file patterns or None, and used to filter 'revs'. If --stat or
2561 --patch are not passed filematcher is None. Otherwise it is a
2566 --patch are not passed filematcher is None. Otherwise it is a
2562 callable taking a revision number and returning a match objects
2567 callable taking a revision number and returning a match objects
2563 filtering the files to be detailed when displaying the revision.
2568 filtering the files to be detailed when displaying the revision.
2564 """
2569 """
2565 limit = loglimit(opts)
2570 limit = loglimit(opts)
2566 revs = _logrevs(repo, opts)
2571 revs = _logrevs(repo, opts)
2567 if not revs:
2572 if not revs:
2568 return smartset.baseset([]), None, None
2573 return smartset.baseset([]), None, None
2569 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2574 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2570 if expr:
2575 if expr:
2571 matcher = revset.match(repo.ui, expr)
2576 matcher = revset.match(repo.ui, expr)
2572 revs = matcher(repo, revs)
2577 revs = matcher(repo, revs)
2573 if limit is not None:
2578 if limit is not None:
2574 limitedrevs = []
2579 limitedrevs = []
2575 for idx, r in enumerate(revs):
2580 for idx, r in enumerate(revs):
2576 if limit <= idx:
2581 if limit <= idx:
2577 break
2582 break
2578 limitedrevs.append(r)
2583 limitedrevs.append(r)
2579 revs = smartset.baseset(limitedrevs)
2584 revs = smartset.baseset(limitedrevs)
2580
2585
2581 return revs, expr, filematcher
2586 return revs, expr, filematcher
2582
2587
2583 def _graphnodeformatter(ui, displayer):
2588 def _graphnodeformatter(ui, displayer):
2584 spec = ui.config('ui', 'graphnodetemplate')
2589 spec = ui.config('ui', 'graphnodetemplate')
2585 if not spec:
2590 if not spec:
2586 return templatekw.showgraphnode # fast path for "{graphnode}"
2591 return templatekw.showgraphnode # fast path for "{graphnode}"
2587
2592
2588 spec = templater.unquotestring(spec)
2593 spec = templater.unquotestring(spec)
2589 templ = formatter.maketemplater(ui, spec)
2594 templ = formatter.maketemplater(ui, spec)
2590 cache = {}
2595 cache = {}
2591 if isinstance(displayer, changeset_templater):
2596 if isinstance(displayer, changeset_templater):
2592 cache = displayer.cache # reuse cache of slow templates
2597 cache = displayer.cache # reuse cache of slow templates
2593 props = templatekw.keywords.copy()
2598 props = templatekw.keywords.copy()
2594 props['templ'] = templ
2599 props['templ'] = templ
2595 props['cache'] = cache
2600 props['cache'] = cache
2596 def formatnode(repo, ctx):
2601 def formatnode(repo, ctx):
2597 props['ctx'] = ctx
2602 props['ctx'] = ctx
2598 props['repo'] = repo
2603 props['repo'] = repo
2599 props['ui'] = repo.ui
2604 props['ui'] = repo.ui
2600 props['revcache'] = {}
2605 props['revcache'] = {}
2601 return templ.render(props)
2606 return templ.render(props)
2602 return formatnode
2607 return formatnode
2603
2608
2604 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2609 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2605 filematcher=None, props=None):
2610 filematcher=None, props=None):
2606 props = props or {}
2611 props = props or {}
2607 formatnode = _graphnodeformatter(ui, displayer)
2612 formatnode = _graphnodeformatter(ui, displayer)
2608 state = graphmod.asciistate()
2613 state = graphmod.asciistate()
2609 styles = state['styles']
2614 styles = state['styles']
2610
2615
2611 # only set graph styling if HGPLAIN is not set.
2616 # only set graph styling if HGPLAIN is not set.
2612 if ui.plain('graph'):
2617 if ui.plain('graph'):
2613 # set all edge styles to |, the default pre-3.8 behaviour
2618 # set all edge styles to |, the default pre-3.8 behaviour
2614 styles.update(dict.fromkeys(styles, '|'))
2619 styles.update(dict.fromkeys(styles, '|'))
2615 else:
2620 else:
2616 edgetypes = {
2621 edgetypes = {
2617 'parent': graphmod.PARENT,
2622 'parent': graphmod.PARENT,
2618 'grandparent': graphmod.GRANDPARENT,
2623 'grandparent': graphmod.GRANDPARENT,
2619 'missing': graphmod.MISSINGPARENT
2624 'missing': graphmod.MISSINGPARENT
2620 }
2625 }
2621 for name, key in edgetypes.items():
2626 for name, key in edgetypes.items():
2622 # experimental config: experimental.graphstyle.*
2627 # experimental config: experimental.graphstyle.*
2623 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2628 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2624 styles[key])
2629 styles[key])
2625 if not styles[key]:
2630 if not styles[key]:
2626 styles[key] = None
2631 styles[key] = None
2627
2632
2628 # experimental config: experimental.graphshorten
2633 # experimental config: experimental.graphshorten
2629 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2634 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2630
2635
2631 for rev, type, ctx, parents in dag:
2636 for rev, type, ctx, parents in dag:
2632 char = formatnode(repo, ctx)
2637 char = formatnode(repo, ctx)
2633 copies = None
2638 copies = None
2634 if getrenamed and ctx.rev():
2639 if getrenamed and ctx.rev():
2635 copies = []
2640 copies = []
2636 for fn in ctx.files():
2641 for fn in ctx.files():
2637 rename = getrenamed(fn, ctx.rev())
2642 rename = getrenamed(fn, ctx.rev())
2638 if rename:
2643 if rename:
2639 copies.append((fn, rename[0]))
2644 copies.append((fn, rename[0]))
2640 revmatchfn = None
2645 revmatchfn = None
2641 if filematcher is not None:
2646 if filematcher is not None:
2642 revmatchfn = filematcher(ctx.rev())
2647 revmatchfn = filematcher(ctx.rev())
2643 edges = edgefn(type, char, state, rev, parents)
2648 edges = edgefn(type, char, state, rev, parents)
2644 firstedge = next(edges)
2649 firstedge = next(edges)
2645 width = firstedge[2]
2650 width = firstedge[2]
2646 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2651 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2647 _graphwidth=width, **props)
2652 _graphwidth=width, **props)
2648 lines = displayer.hunk.pop(rev).split('\n')
2653 lines = displayer.hunk.pop(rev).split('\n')
2649 if not lines[-1]:
2654 if not lines[-1]:
2650 del lines[-1]
2655 del lines[-1]
2651 displayer.flush(ctx)
2656 displayer.flush(ctx)
2652 for type, char, width, coldata in itertools.chain([firstedge], edges):
2657 for type, char, width, coldata in itertools.chain([firstedge], edges):
2653 graphmod.ascii(ui, state, type, char, lines, coldata)
2658 graphmod.ascii(ui, state, type, char, lines, coldata)
2654 lines = []
2659 lines = []
2655 displayer.close()
2660 displayer.close()
2656
2661
2657 def graphlog(ui, repo, pats, opts):
2662 def graphlog(ui, repo, pats, opts):
2658 # Parameters are identical to log command ones
2663 # Parameters are identical to log command ones
2659 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2664 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2660 revdag = graphmod.dagwalker(repo, revs)
2665 revdag = graphmod.dagwalker(repo, revs)
2661
2666
2662 getrenamed = None
2667 getrenamed = None
2663 if opts.get('copies'):
2668 if opts.get('copies'):
2664 endrev = None
2669 endrev = None
2665 if opts.get('rev'):
2670 if opts.get('rev'):
2666 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2671 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2667 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2672 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2668
2673
2669 ui.pager('log')
2674 ui.pager('log')
2670 displayer = show_changeset(ui, repo, opts, buffered=True)
2675 displayer = show_changeset(ui, repo, opts, buffered=True)
2671 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2676 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2672 filematcher)
2677 filematcher)
2673
2678
2674 def checkunsupportedgraphflags(pats, opts):
2679 def checkunsupportedgraphflags(pats, opts):
2675 for op in ["newest_first"]:
2680 for op in ["newest_first"]:
2676 if op in opts and opts[op]:
2681 if op in opts and opts[op]:
2677 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2682 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2678 % op.replace("_", "-"))
2683 % op.replace("_", "-"))
2679
2684
2680 def graphrevs(repo, nodes, opts):
2685 def graphrevs(repo, nodes, opts):
2681 limit = loglimit(opts)
2686 limit = loglimit(opts)
2682 nodes.reverse()
2687 nodes.reverse()
2683 if limit is not None:
2688 if limit is not None:
2684 nodes = nodes[:limit]
2689 nodes = nodes[:limit]
2685 return graphmod.nodes(repo, nodes)
2690 return graphmod.nodes(repo, nodes)
2686
2691
2687 def add(ui, repo, match, prefix, explicitonly, **opts):
2692 def add(ui, repo, match, prefix, explicitonly, **opts):
2688 join = lambda f: os.path.join(prefix, f)
2693 join = lambda f: os.path.join(prefix, f)
2689 bad = []
2694 bad = []
2690
2695
2691 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2696 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2692 names = []
2697 names = []
2693 wctx = repo[None]
2698 wctx = repo[None]
2694 cca = None
2699 cca = None
2695 abort, warn = scmutil.checkportabilityalert(ui)
2700 abort, warn = scmutil.checkportabilityalert(ui)
2696 if abort or warn:
2701 if abort or warn:
2697 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2702 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2698
2703
2699 badmatch = matchmod.badmatch(match, badfn)
2704 badmatch = matchmod.badmatch(match, badfn)
2700 dirstate = repo.dirstate
2705 dirstate = repo.dirstate
2701 # We don't want to just call wctx.walk here, since it would return a lot of
2706 # We don't want to just call wctx.walk here, since it would return a lot of
2702 # clean files, which we aren't interested in and takes time.
2707 # clean files, which we aren't interested in and takes time.
2703 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2708 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2704 unknown=True, ignored=False, full=False)):
2709 unknown=True, ignored=False, full=False)):
2705 exact = match.exact(f)
2710 exact = match.exact(f)
2706 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2711 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2707 if cca:
2712 if cca:
2708 cca(f)
2713 cca(f)
2709 names.append(f)
2714 names.append(f)
2710 if ui.verbose or not exact:
2715 if ui.verbose or not exact:
2711 ui.status(_('adding %s\n') % match.rel(f))
2716 ui.status(_('adding %s\n') % match.rel(f))
2712
2717
2713 for subpath in sorted(wctx.substate):
2718 for subpath in sorted(wctx.substate):
2714 sub = wctx.sub(subpath)
2719 sub = wctx.sub(subpath)
2715 try:
2720 try:
2716 submatch = matchmod.subdirmatcher(subpath, match)
2721 submatch = matchmod.subdirmatcher(subpath, match)
2717 if opts.get(r'subrepos'):
2722 if opts.get(r'subrepos'):
2718 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2723 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2719 else:
2724 else:
2720 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2725 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2721 except error.LookupError:
2726 except error.LookupError:
2722 ui.status(_("skipping missing subrepository: %s\n")
2727 ui.status(_("skipping missing subrepository: %s\n")
2723 % join(subpath))
2728 % join(subpath))
2724
2729
2725 if not opts.get(r'dry_run'):
2730 if not opts.get(r'dry_run'):
2726 rejected = wctx.add(names, prefix)
2731 rejected = wctx.add(names, prefix)
2727 bad.extend(f for f in rejected if f in match.files())
2732 bad.extend(f for f in rejected if f in match.files())
2728 return bad
2733 return bad
2729
2734
2730 def addwebdirpath(repo, serverpath, webconf):
2735 def addwebdirpath(repo, serverpath, webconf):
2731 webconf[serverpath] = repo.root
2736 webconf[serverpath] = repo.root
2732 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2737 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2733
2738
2734 for r in repo.revs('filelog("path:.hgsub")'):
2739 for r in repo.revs('filelog("path:.hgsub")'):
2735 ctx = repo[r]
2740 ctx = repo[r]
2736 for subpath in ctx.substate:
2741 for subpath in ctx.substate:
2737 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2742 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2738
2743
2739 def forget(ui, repo, match, prefix, explicitonly):
2744 def forget(ui, repo, match, prefix, explicitonly):
2740 join = lambda f: os.path.join(prefix, f)
2745 join = lambda f: os.path.join(prefix, f)
2741 bad = []
2746 bad = []
2742 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2747 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2743 wctx = repo[None]
2748 wctx = repo[None]
2744 forgot = []
2749 forgot = []
2745
2750
2746 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2751 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2747 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2752 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2748 if explicitonly:
2753 if explicitonly:
2749 forget = [f for f in forget if match.exact(f)]
2754 forget = [f for f in forget if match.exact(f)]
2750
2755
2751 for subpath in sorted(wctx.substate):
2756 for subpath in sorted(wctx.substate):
2752 sub = wctx.sub(subpath)
2757 sub = wctx.sub(subpath)
2753 try:
2758 try:
2754 submatch = matchmod.subdirmatcher(subpath, match)
2759 submatch = matchmod.subdirmatcher(subpath, match)
2755 subbad, subforgot = sub.forget(submatch, prefix)
2760 subbad, subforgot = sub.forget(submatch, prefix)
2756 bad.extend([subpath + '/' + f for f in subbad])
2761 bad.extend([subpath + '/' + f for f in subbad])
2757 forgot.extend([subpath + '/' + f for f in subforgot])
2762 forgot.extend([subpath + '/' + f for f in subforgot])
2758 except error.LookupError:
2763 except error.LookupError:
2759 ui.status(_("skipping missing subrepository: %s\n")
2764 ui.status(_("skipping missing subrepository: %s\n")
2760 % join(subpath))
2765 % join(subpath))
2761
2766
2762 if not explicitonly:
2767 if not explicitonly:
2763 for f in match.files():
2768 for f in match.files():
2764 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2769 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2765 if f not in forgot:
2770 if f not in forgot:
2766 if repo.wvfs.exists(f):
2771 if repo.wvfs.exists(f):
2767 # Don't complain if the exact case match wasn't given.
2772 # Don't complain if the exact case match wasn't given.
2768 # But don't do this until after checking 'forgot', so
2773 # But don't do this until after checking 'forgot', so
2769 # that subrepo files aren't normalized, and this op is
2774 # that subrepo files aren't normalized, and this op is
2770 # purely from data cached by the status walk above.
2775 # purely from data cached by the status walk above.
2771 if repo.dirstate.normalize(f) in repo.dirstate:
2776 if repo.dirstate.normalize(f) in repo.dirstate:
2772 continue
2777 continue
2773 ui.warn(_('not removing %s: '
2778 ui.warn(_('not removing %s: '
2774 'file is already untracked\n')
2779 'file is already untracked\n')
2775 % match.rel(f))
2780 % match.rel(f))
2776 bad.append(f)
2781 bad.append(f)
2777
2782
2778 for f in forget:
2783 for f in forget:
2779 if ui.verbose or not match.exact(f):
2784 if ui.verbose or not match.exact(f):
2780 ui.status(_('removing %s\n') % match.rel(f))
2785 ui.status(_('removing %s\n') % match.rel(f))
2781
2786
2782 rejected = wctx.forget(forget, prefix)
2787 rejected = wctx.forget(forget, prefix)
2783 bad.extend(f for f in rejected if f in match.files())
2788 bad.extend(f for f in rejected if f in match.files())
2784 forgot.extend(f for f in forget if f not in rejected)
2789 forgot.extend(f for f in forget if f not in rejected)
2785 return bad, forgot
2790 return bad, forgot
2786
2791
2787 def files(ui, ctx, m, fm, fmt, subrepos):
2792 def files(ui, ctx, m, fm, fmt, subrepos):
2788 rev = ctx.rev()
2793 rev = ctx.rev()
2789 ret = 1
2794 ret = 1
2790 ds = ctx.repo().dirstate
2795 ds = ctx.repo().dirstate
2791
2796
2792 for f in ctx.matches(m):
2797 for f in ctx.matches(m):
2793 if rev is None and ds[f] == 'r':
2798 if rev is None and ds[f] == 'r':
2794 continue
2799 continue
2795 fm.startitem()
2800 fm.startitem()
2796 if ui.verbose:
2801 if ui.verbose:
2797 fc = ctx[f]
2802 fc = ctx[f]
2798 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2803 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2799 fm.data(abspath=f)
2804 fm.data(abspath=f)
2800 fm.write('path', fmt, m.rel(f))
2805 fm.write('path', fmt, m.rel(f))
2801 ret = 0
2806 ret = 0
2802
2807
2803 for subpath in sorted(ctx.substate):
2808 for subpath in sorted(ctx.substate):
2804 submatch = matchmod.subdirmatcher(subpath, m)
2809 submatch = matchmod.subdirmatcher(subpath, m)
2805 if (subrepos or m.exact(subpath) or any(submatch.files())):
2810 if (subrepos or m.exact(subpath) or any(submatch.files())):
2806 sub = ctx.sub(subpath)
2811 sub = ctx.sub(subpath)
2807 try:
2812 try:
2808 recurse = m.exact(subpath) or subrepos
2813 recurse = m.exact(subpath) or subrepos
2809 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2814 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2810 ret = 0
2815 ret = 0
2811 except error.LookupError:
2816 except error.LookupError:
2812 ui.status(_("skipping missing subrepository: %s\n")
2817 ui.status(_("skipping missing subrepository: %s\n")
2813 % m.abs(subpath))
2818 % m.abs(subpath))
2814
2819
2815 return ret
2820 return ret
2816
2821
2817 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2822 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2818 join = lambda f: os.path.join(prefix, f)
2823 join = lambda f: os.path.join(prefix, f)
2819 ret = 0
2824 ret = 0
2820 s = repo.status(match=m, clean=True)
2825 s = repo.status(match=m, clean=True)
2821 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2826 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2822
2827
2823 wctx = repo[None]
2828 wctx = repo[None]
2824
2829
2825 if warnings is None:
2830 if warnings is None:
2826 warnings = []
2831 warnings = []
2827 warn = True
2832 warn = True
2828 else:
2833 else:
2829 warn = False
2834 warn = False
2830
2835
2831 subs = sorted(wctx.substate)
2836 subs = sorted(wctx.substate)
2832 total = len(subs)
2837 total = len(subs)
2833 count = 0
2838 count = 0
2834 for subpath in subs:
2839 for subpath in subs:
2835 count += 1
2840 count += 1
2836 submatch = matchmod.subdirmatcher(subpath, m)
2841 submatch = matchmod.subdirmatcher(subpath, m)
2837 if subrepos or m.exact(subpath) or any(submatch.files()):
2842 if subrepos or m.exact(subpath) or any(submatch.files()):
2838 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2843 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2839 sub = wctx.sub(subpath)
2844 sub = wctx.sub(subpath)
2840 try:
2845 try:
2841 if sub.removefiles(submatch, prefix, after, force, subrepos,
2846 if sub.removefiles(submatch, prefix, after, force, subrepos,
2842 warnings):
2847 warnings):
2843 ret = 1
2848 ret = 1
2844 except error.LookupError:
2849 except error.LookupError:
2845 warnings.append(_("skipping missing subrepository: %s\n")
2850 warnings.append(_("skipping missing subrepository: %s\n")
2846 % join(subpath))
2851 % join(subpath))
2847 ui.progress(_('searching'), None)
2852 ui.progress(_('searching'), None)
2848
2853
2849 # warn about failure to delete explicit files/dirs
2854 # warn about failure to delete explicit files/dirs
2850 deleteddirs = util.dirs(deleted)
2855 deleteddirs = util.dirs(deleted)
2851 files = m.files()
2856 files = m.files()
2852 total = len(files)
2857 total = len(files)
2853 count = 0
2858 count = 0
2854 for f in files:
2859 for f in files:
2855 def insubrepo():
2860 def insubrepo():
2856 for subpath in wctx.substate:
2861 for subpath in wctx.substate:
2857 if f.startswith(subpath + '/'):
2862 if f.startswith(subpath + '/'):
2858 return True
2863 return True
2859 return False
2864 return False
2860
2865
2861 count += 1
2866 count += 1
2862 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2867 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2863 isdir = f in deleteddirs or wctx.hasdir(f)
2868 isdir = f in deleteddirs or wctx.hasdir(f)
2864 if (f in repo.dirstate or isdir or f == '.'
2869 if (f in repo.dirstate or isdir or f == '.'
2865 or insubrepo() or f in subs):
2870 or insubrepo() or f in subs):
2866 continue
2871 continue
2867
2872
2868 if repo.wvfs.exists(f):
2873 if repo.wvfs.exists(f):
2869 if repo.wvfs.isdir(f):
2874 if repo.wvfs.isdir(f):
2870 warnings.append(_('not removing %s: no tracked files\n')
2875 warnings.append(_('not removing %s: no tracked files\n')
2871 % m.rel(f))
2876 % m.rel(f))
2872 else:
2877 else:
2873 warnings.append(_('not removing %s: file is untracked\n')
2878 warnings.append(_('not removing %s: file is untracked\n')
2874 % m.rel(f))
2879 % m.rel(f))
2875 # missing files will generate a warning elsewhere
2880 # missing files will generate a warning elsewhere
2876 ret = 1
2881 ret = 1
2877 ui.progress(_('deleting'), None)
2882 ui.progress(_('deleting'), None)
2878
2883
2879 if force:
2884 if force:
2880 list = modified + deleted + clean + added
2885 list = modified + deleted + clean + added
2881 elif after:
2886 elif after:
2882 list = deleted
2887 list = deleted
2883 remaining = modified + added + clean
2888 remaining = modified + added + clean
2884 total = len(remaining)
2889 total = len(remaining)
2885 count = 0
2890 count = 0
2886 for f in remaining:
2891 for f in remaining:
2887 count += 1
2892 count += 1
2888 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2893 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2889 warnings.append(_('not removing %s: file still exists\n')
2894 warnings.append(_('not removing %s: file still exists\n')
2890 % m.rel(f))
2895 % m.rel(f))
2891 ret = 1
2896 ret = 1
2892 ui.progress(_('skipping'), None)
2897 ui.progress(_('skipping'), None)
2893 else:
2898 else:
2894 list = deleted + clean
2899 list = deleted + clean
2895 total = len(modified) + len(added)
2900 total = len(modified) + len(added)
2896 count = 0
2901 count = 0
2897 for f in modified:
2902 for f in modified:
2898 count += 1
2903 count += 1
2899 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2904 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2900 warnings.append(_('not removing %s: file is modified (use -f'
2905 warnings.append(_('not removing %s: file is modified (use -f'
2901 ' to force removal)\n') % m.rel(f))
2906 ' to force removal)\n') % m.rel(f))
2902 ret = 1
2907 ret = 1
2903 for f in added:
2908 for f in added:
2904 count += 1
2909 count += 1
2905 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2910 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2906 warnings.append(_("not removing %s: file has been marked for add"
2911 warnings.append(_("not removing %s: file has been marked for add"
2907 " (use 'hg forget' to undo add)\n") % m.rel(f))
2912 " (use 'hg forget' to undo add)\n") % m.rel(f))
2908 ret = 1
2913 ret = 1
2909 ui.progress(_('skipping'), None)
2914 ui.progress(_('skipping'), None)
2910
2915
2911 list = sorted(list)
2916 list = sorted(list)
2912 total = len(list)
2917 total = len(list)
2913 count = 0
2918 count = 0
2914 for f in list:
2919 for f in list:
2915 count += 1
2920 count += 1
2916 if ui.verbose or not m.exact(f):
2921 if ui.verbose or not m.exact(f):
2917 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2922 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2918 ui.status(_('removing %s\n') % m.rel(f))
2923 ui.status(_('removing %s\n') % m.rel(f))
2919 ui.progress(_('deleting'), None)
2924 ui.progress(_('deleting'), None)
2920
2925
2921 with repo.wlock():
2926 with repo.wlock():
2922 if not after:
2927 if not after:
2923 for f in list:
2928 for f in list:
2924 if f in added:
2929 if f in added:
2925 continue # we never unlink added files on remove
2930 continue # we never unlink added files on remove
2926 repo.wvfs.unlinkpath(f, ignoremissing=True)
2931 repo.wvfs.unlinkpath(f, ignoremissing=True)
2927 repo[None].forget(list)
2932 repo[None].forget(list)
2928
2933
2929 if warn:
2934 if warn:
2930 for warning in warnings:
2935 for warning in warnings:
2931 ui.warn(warning)
2936 ui.warn(warning)
2932
2937
2933 return ret
2938 return ret
2934
2939
2935 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2940 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2936 err = 1
2941 err = 1
2937
2942
2938 def write(path):
2943 def write(path):
2939 filename = None
2944 filename = None
2940 if fntemplate:
2945 if fntemplate:
2941 filename = makefilename(repo, fntemplate, ctx.node(),
2946 filename = makefilename(repo, fntemplate, ctx.node(),
2942 pathname=os.path.join(prefix, path))
2947 pathname=os.path.join(prefix, path))
2943 with formatter.maybereopen(basefm, filename, opts) as fm:
2948 with formatter.maybereopen(basefm, filename, opts) as fm:
2944 data = ctx[path].data()
2949 data = ctx[path].data()
2945 if opts.get('decode'):
2950 if opts.get('decode'):
2946 data = repo.wwritedata(path, data)
2951 data = repo.wwritedata(path, data)
2947 fm.startitem()
2952 fm.startitem()
2948 fm.write('data', '%s', data)
2953 fm.write('data', '%s', data)
2949 fm.data(abspath=path, path=matcher.rel(path))
2954 fm.data(abspath=path, path=matcher.rel(path))
2950
2955
2951 # Automation often uses hg cat on single files, so special case it
2956 # Automation often uses hg cat on single files, so special case it
2952 # for performance to avoid the cost of parsing the manifest.
2957 # for performance to avoid the cost of parsing the manifest.
2953 if len(matcher.files()) == 1 and not matcher.anypats():
2958 if len(matcher.files()) == 1 and not matcher.anypats():
2954 file = matcher.files()[0]
2959 file = matcher.files()[0]
2955 mfl = repo.manifestlog
2960 mfl = repo.manifestlog
2956 mfnode = ctx.manifestnode()
2961 mfnode = ctx.manifestnode()
2957 try:
2962 try:
2958 if mfnode and mfl[mfnode].find(file)[0]:
2963 if mfnode and mfl[mfnode].find(file)[0]:
2959 write(file)
2964 write(file)
2960 return 0
2965 return 0
2961 except KeyError:
2966 except KeyError:
2962 pass
2967 pass
2963
2968
2964 for abs in ctx.walk(matcher):
2969 for abs in ctx.walk(matcher):
2965 write(abs)
2970 write(abs)
2966 err = 0
2971 err = 0
2967
2972
2968 for subpath in sorted(ctx.substate):
2973 for subpath in sorted(ctx.substate):
2969 sub = ctx.sub(subpath)
2974 sub = ctx.sub(subpath)
2970 try:
2975 try:
2971 submatch = matchmod.subdirmatcher(subpath, matcher)
2976 submatch = matchmod.subdirmatcher(subpath, matcher)
2972
2977
2973 if not sub.cat(submatch, basefm, fntemplate,
2978 if not sub.cat(submatch, basefm, fntemplate,
2974 os.path.join(prefix, sub._path), **opts):
2979 os.path.join(prefix, sub._path), **opts):
2975 err = 0
2980 err = 0
2976 except error.RepoLookupError:
2981 except error.RepoLookupError:
2977 ui.status(_("skipping missing subrepository: %s\n")
2982 ui.status(_("skipping missing subrepository: %s\n")
2978 % os.path.join(prefix, subpath))
2983 % os.path.join(prefix, subpath))
2979
2984
2980 return err
2985 return err
2981
2986
2982 def commit(ui, repo, commitfunc, pats, opts):
2987 def commit(ui, repo, commitfunc, pats, opts):
2983 '''commit the specified files or all outstanding changes'''
2988 '''commit the specified files or all outstanding changes'''
2984 date = opts.get('date')
2989 date = opts.get('date')
2985 if date:
2990 if date:
2986 opts['date'] = util.parsedate(date)
2991 opts['date'] = util.parsedate(date)
2987 message = logmessage(ui, opts)
2992 message = logmessage(ui, opts)
2988 matcher = scmutil.match(repo[None], pats, opts)
2993 matcher = scmutil.match(repo[None], pats, opts)
2989
2994
2990 dsguard = None
2995 dsguard = None
2991 # extract addremove carefully -- this function can be called from a command
2996 # extract addremove carefully -- this function can be called from a command
2992 # that doesn't support addremove
2997 # that doesn't support addremove
2993 if opts.get('addremove'):
2998 if opts.get('addremove'):
2994 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2999 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2995 with dsguard or util.nullcontextmanager():
3000 with dsguard or util.nullcontextmanager():
2996 if dsguard:
3001 if dsguard:
2997 if scmutil.addremove(repo, matcher, "", opts) != 0:
3002 if scmutil.addremove(repo, matcher, "", opts) != 0:
2998 raise error.Abort(
3003 raise error.Abort(
2999 _("failed to mark all new/missing files as added/removed"))
3004 _("failed to mark all new/missing files as added/removed"))
3000
3005
3001 return commitfunc(ui, repo, message, matcher, opts)
3006 return commitfunc(ui, repo, message, matcher, opts)
3002
3007
3003 def samefile(f, ctx1, ctx2):
3008 def samefile(f, ctx1, ctx2):
3004 if f in ctx1.manifest():
3009 if f in ctx1.manifest():
3005 a = ctx1.filectx(f)
3010 a = ctx1.filectx(f)
3006 if f in ctx2.manifest():
3011 if f in ctx2.manifest():
3007 b = ctx2.filectx(f)
3012 b = ctx2.filectx(f)
3008 return (not a.cmp(b)
3013 return (not a.cmp(b)
3009 and a.flags() == b.flags())
3014 and a.flags() == b.flags())
3010 else:
3015 else:
3011 return False
3016 return False
3012 else:
3017 else:
3013 return f not in ctx2.manifest()
3018 return f not in ctx2.manifest()
3014
3019
3015 def amend(ui, repo, old, extra, pats, opts):
3020 def amend(ui, repo, old, extra, pats, opts):
3016 # avoid cycle context -> subrepo -> cmdutil
3021 # avoid cycle context -> subrepo -> cmdutil
3017 from . import context
3022 from . import context
3018
3023
3019 # amend will reuse the existing user if not specified, but the obsolete
3024 # amend will reuse the existing user if not specified, but the obsolete
3020 # marker creation requires that the current user's name is specified.
3025 # marker creation requires that the current user's name is specified.
3021 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3026 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3022 ui.username() # raise exception if username not set
3027 ui.username() # raise exception if username not set
3023
3028
3024 ui.note(_('amending changeset %s\n') % old)
3029 ui.note(_('amending changeset %s\n') % old)
3025 base = old.p1()
3030 base = old.p1()
3026
3031
3027 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3032 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3028 # Participating changesets:
3033 # Participating changesets:
3029 #
3034 #
3030 # wctx o - workingctx that contains changes from working copy
3035 # wctx o - workingctx that contains changes from working copy
3031 # | to go into amending commit
3036 # | to go into amending commit
3032 # |
3037 # |
3033 # old o - changeset to amend
3038 # old o - changeset to amend
3034 # |
3039 # |
3035 # base o - first parent of the changeset to amend
3040 # base o - first parent of the changeset to amend
3036 wctx = repo[None]
3041 wctx = repo[None]
3037
3042
3038 # Update extra dict from amended commit (e.g. to preserve graft
3043 # Update extra dict from amended commit (e.g. to preserve graft
3039 # source)
3044 # source)
3040 extra.update(old.extra())
3045 extra.update(old.extra())
3041
3046
3042 # Also update it from the from the wctx
3047 # Also update it from the from the wctx
3043 extra.update(wctx.extra())
3048 extra.update(wctx.extra())
3044
3049
3045 user = opts.get('user') or old.user()
3050 user = opts.get('user') or old.user()
3046 date = opts.get('date') or old.date()
3051 date = opts.get('date') or old.date()
3047
3052
3048 # Parse the date to allow comparison between date and old.date()
3053 # Parse the date to allow comparison between date and old.date()
3049 date = util.parsedate(date)
3054 date = util.parsedate(date)
3050
3055
3051 if len(old.parents()) > 1:
3056 if len(old.parents()) > 1:
3052 # ctx.files() isn't reliable for merges, so fall back to the
3057 # ctx.files() isn't reliable for merges, so fall back to the
3053 # slower repo.status() method
3058 # slower repo.status() method
3054 files = set([fn for st in repo.status(base, old)[:3]
3059 files = set([fn for st in repo.status(base, old)[:3]
3055 for fn in st])
3060 for fn in st])
3056 else:
3061 else:
3057 files = set(old.files())
3062 files = set(old.files())
3058
3063
3059 # add/remove the files to the working copy if the "addremove" option
3064 # add/remove the files to the working copy if the "addremove" option
3060 # was specified.
3065 # was specified.
3061 matcher = scmutil.match(wctx, pats, opts)
3066 matcher = scmutil.match(wctx, pats, opts)
3062 if (opts.get('addremove')
3067 if (opts.get('addremove')
3063 and scmutil.addremove(repo, matcher, "", opts)):
3068 and scmutil.addremove(repo, matcher, "", opts)):
3064 raise error.Abort(
3069 raise error.Abort(
3065 _("failed to mark all new/missing files as added/removed"))
3070 _("failed to mark all new/missing files as added/removed"))
3066
3071
3067 filestoamend = set(f for f in wctx.files() if matcher(f))
3072 filestoamend = set(f for f in wctx.files() if matcher(f))
3068
3073
3069 changes = (len(filestoamend) > 0)
3074 changes = (len(filestoamend) > 0)
3070 if changes:
3075 if changes:
3071 # Recompute copies (avoid recording a -> b -> a)
3076 # Recompute copies (avoid recording a -> b -> a)
3072 copied = copies.pathcopies(base, wctx, matcher)
3077 copied = copies.pathcopies(base, wctx, matcher)
3073 if old.p2:
3078 if old.p2:
3074 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3079 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3075
3080
3076 # Prune files which were reverted by the updates: if old
3081 # Prune files which were reverted by the updates: if old
3077 # introduced file X and the file was renamed in the working
3082 # introduced file X and the file was renamed in the working
3078 # copy, then those two files are the same and
3083 # copy, then those two files are the same and
3079 # we can discard X from our list of files. Likewise if X
3084 # we can discard X from our list of files. Likewise if X
3080 # was deleted, it's no longer relevant
3085 # was deleted, it's no longer relevant
3081 files.update(filestoamend)
3086 files.update(filestoamend)
3082 files = [f for f in files if not samefile(f, wctx, base)]
3087 files = [f for f in files if not samefile(f, wctx, base)]
3083
3088
3084 def filectxfn(repo, ctx_, path):
3089 def filectxfn(repo, ctx_, path):
3085 try:
3090 try:
3086 # If the file being considered is not amongst the files
3091 # If the file being considered is not amongst the files
3087 # to be amended, we should return the file context from the
3092 # to be amended, we should return the file context from the
3088 # old changeset. This avoids issues when only some files in
3093 # old changeset. This avoids issues when only some files in
3089 # the working copy are being amended but there are also
3094 # the working copy are being amended but there are also
3090 # changes to other files from the old changeset.
3095 # changes to other files from the old changeset.
3091 if path not in filestoamend:
3096 if path not in filestoamend:
3092 return old.filectx(path)
3097 return old.filectx(path)
3093
3098
3094 fctx = wctx[path]
3099 fctx = wctx[path]
3095
3100
3096 # Return None for removed files.
3101 # Return None for removed files.
3097 if not fctx.exists():
3102 if not fctx.exists():
3098 return None
3103 return None
3099
3104
3100 flags = fctx.flags()
3105 flags = fctx.flags()
3101 mctx = context.memfilectx(repo,
3106 mctx = context.memfilectx(repo,
3102 fctx.path(), fctx.data(),
3107 fctx.path(), fctx.data(),
3103 islink='l' in flags,
3108 islink='l' in flags,
3104 isexec='x' in flags,
3109 isexec='x' in flags,
3105 copied=copied.get(path))
3110 copied=copied.get(path))
3106 return mctx
3111 return mctx
3107 except KeyError:
3112 except KeyError:
3108 return None
3113 return None
3109 else:
3114 else:
3110 ui.note(_('copying changeset %s to %s\n') % (old, base))
3115 ui.note(_('copying changeset %s to %s\n') % (old, base))
3111
3116
3112 # Use version of files as in the old cset
3117 # Use version of files as in the old cset
3113 def filectxfn(repo, ctx_, path):
3118 def filectxfn(repo, ctx_, path):
3114 try:
3119 try:
3115 return old.filectx(path)
3120 return old.filectx(path)
3116 except KeyError:
3121 except KeyError:
3117 return None
3122 return None
3118
3123
3119 # See if we got a message from -m or -l, if not, open the editor with
3124 # See if we got a message from -m or -l, if not, open the editor with
3120 # the message of the changeset to amend.
3125 # the message of the changeset to amend.
3121 message = logmessage(ui, opts)
3126 message = logmessage(ui, opts)
3122
3127
3123 editform = mergeeditform(old, 'commit.amend')
3128 editform = mergeeditform(old, 'commit.amend')
3124 editor = getcommiteditor(editform=editform,
3129 editor = getcommiteditor(editform=editform,
3125 **pycompat.strkwargs(opts))
3130 **pycompat.strkwargs(opts))
3126
3131
3127 if not message:
3132 if not message:
3128 editor = getcommiteditor(edit=True, editform=editform)
3133 editor = getcommiteditor(edit=True, editform=editform)
3129 message = old.description()
3134 message = old.description()
3130
3135
3131 pureextra = extra.copy()
3136 pureextra = extra.copy()
3132 extra['amend_source'] = old.hex()
3137 extra['amend_source'] = old.hex()
3133
3138
3134 new = context.memctx(repo,
3139 new = context.memctx(repo,
3135 parents=[base.node(), old.p2().node()],
3140 parents=[base.node(), old.p2().node()],
3136 text=message,
3141 text=message,
3137 files=files,
3142 files=files,
3138 filectxfn=filectxfn,
3143 filectxfn=filectxfn,
3139 user=user,
3144 user=user,
3140 date=date,
3145 date=date,
3141 extra=extra,
3146 extra=extra,
3142 editor=editor)
3147 editor=editor)
3143
3148
3144 newdesc = changelog.stripdesc(new.description())
3149 newdesc = changelog.stripdesc(new.description())
3145 if ((not changes)
3150 if ((not changes)
3146 and newdesc == old.description()
3151 and newdesc == old.description()
3147 and user == old.user()
3152 and user == old.user()
3148 and date == old.date()
3153 and date == old.date()
3149 and pureextra == old.extra()):
3154 and pureextra == old.extra()):
3150 # nothing changed. continuing here would create a new node
3155 # nothing changed. continuing here would create a new node
3151 # anyway because of the amend_source noise.
3156 # anyway because of the amend_source noise.
3152 #
3157 #
3153 # This not what we expect from amend.
3158 # This not what we expect from amend.
3154 return old.node()
3159 return old.node()
3155
3160
3156 if opts.get('secret'):
3161 if opts.get('secret'):
3157 commitphase = 'secret'
3162 commitphase = 'secret'
3158 else:
3163 else:
3159 commitphase = old.phase()
3164 commitphase = old.phase()
3160 overrides = {('phases', 'new-commit'): commitphase}
3165 overrides = {('phases', 'new-commit'): commitphase}
3161 with ui.configoverride(overrides, 'amend'):
3166 with ui.configoverride(overrides, 'amend'):
3162 newid = repo.commitctx(new)
3167 newid = repo.commitctx(new)
3163
3168
3164 # Reroute the working copy parent to the new changeset
3169 # Reroute the working copy parent to the new changeset
3165 repo.setparents(newid, nullid)
3170 repo.setparents(newid, nullid)
3166 mapping = {old.node(): (newid,)}
3171 mapping = {old.node(): (newid,)}
3167 obsmetadata = None
3172 obsmetadata = None
3168 if opts.get('note'):
3173 if opts.get('note'):
3169 obsmetadata = {'note': opts['note']}
3174 obsmetadata = {'note': opts['note']}
3170 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3175 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3171
3176
3172 # Fixing the dirstate because localrepo.commitctx does not update
3177 # Fixing the dirstate because localrepo.commitctx does not update
3173 # it. This is rather convenient because we did not need to update
3178 # it. This is rather convenient because we did not need to update
3174 # the dirstate for all the files in the new commit which commitctx
3179 # the dirstate for all the files in the new commit which commitctx
3175 # could have done if it updated the dirstate. Now, we can
3180 # could have done if it updated the dirstate. Now, we can
3176 # selectively update the dirstate only for the amended files.
3181 # selectively update the dirstate only for the amended files.
3177 dirstate = repo.dirstate
3182 dirstate = repo.dirstate
3178
3183
3179 # Update the state of the files which were added and
3184 # Update the state of the files which were added and
3180 # and modified in the amend to "normal" in the dirstate.
3185 # and modified in the amend to "normal" in the dirstate.
3181 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3186 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3182 for f in normalfiles:
3187 for f in normalfiles:
3183 dirstate.normal(f)
3188 dirstate.normal(f)
3184
3189
3185 # Update the state of files which were removed in the amend
3190 # Update the state of files which were removed in the amend
3186 # to "removed" in the dirstate.
3191 # to "removed" in the dirstate.
3187 removedfiles = set(wctx.removed()) & filestoamend
3192 removedfiles = set(wctx.removed()) & filestoamend
3188 for f in removedfiles:
3193 for f in removedfiles:
3189 dirstate.drop(f)
3194 dirstate.drop(f)
3190
3195
3191 return newid
3196 return newid
3192
3197
3193 def commiteditor(repo, ctx, subs, editform=''):
3198 def commiteditor(repo, ctx, subs, editform=''):
3194 if ctx.description():
3199 if ctx.description():
3195 return ctx.description()
3200 return ctx.description()
3196 return commitforceeditor(repo, ctx, subs, editform=editform,
3201 return commitforceeditor(repo, ctx, subs, editform=editform,
3197 unchangedmessagedetection=True)
3202 unchangedmessagedetection=True)
3198
3203
3199 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3204 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3200 editform='', unchangedmessagedetection=False):
3205 editform='', unchangedmessagedetection=False):
3201 if not extramsg:
3206 if not extramsg:
3202 extramsg = _("Leave message empty to abort commit.")
3207 extramsg = _("Leave message empty to abort commit.")
3203
3208
3204 forms = [e for e in editform.split('.') if e]
3209 forms = [e for e in editform.split('.') if e]
3205 forms.insert(0, 'changeset')
3210 forms.insert(0, 'changeset')
3206 templatetext = None
3211 templatetext = None
3207 while forms:
3212 while forms:
3208 ref = '.'.join(forms)
3213 ref = '.'.join(forms)
3209 if repo.ui.config('committemplate', ref):
3214 if repo.ui.config('committemplate', ref):
3210 templatetext = committext = buildcommittemplate(
3215 templatetext = committext = buildcommittemplate(
3211 repo, ctx, subs, extramsg, ref)
3216 repo, ctx, subs, extramsg, ref)
3212 break
3217 break
3213 forms.pop()
3218 forms.pop()
3214 else:
3219 else:
3215 committext = buildcommittext(repo, ctx, subs, extramsg)
3220 committext = buildcommittext(repo, ctx, subs, extramsg)
3216
3221
3217 # run editor in the repository root
3222 # run editor in the repository root
3218 olddir = pycompat.getcwd()
3223 olddir = pycompat.getcwd()
3219 os.chdir(repo.root)
3224 os.chdir(repo.root)
3220
3225
3221 # make in-memory changes visible to external process
3226 # make in-memory changes visible to external process
3222 tr = repo.currenttransaction()
3227 tr = repo.currenttransaction()
3223 repo.dirstate.write(tr)
3228 repo.dirstate.write(tr)
3224 pending = tr and tr.writepending() and repo.root
3229 pending = tr and tr.writepending() and repo.root
3225
3230
3226 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3231 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3227 editform=editform, pending=pending,
3232 editform=editform, pending=pending,
3228 repopath=repo.path, action='commit')
3233 repopath=repo.path, action='commit')
3229 text = editortext
3234 text = editortext
3230
3235
3231 # strip away anything below this special string (used for editors that want
3236 # strip away anything below this special string (used for editors that want
3232 # to display the diff)
3237 # to display the diff)
3233 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3238 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3234 if stripbelow:
3239 if stripbelow:
3235 text = text[:stripbelow.start()]
3240 text = text[:stripbelow.start()]
3236
3241
3237 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3242 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3238 os.chdir(olddir)
3243 os.chdir(olddir)
3239
3244
3240 if finishdesc:
3245 if finishdesc:
3241 text = finishdesc(text)
3246 text = finishdesc(text)
3242 if not text.strip():
3247 if not text.strip():
3243 raise error.Abort(_("empty commit message"))
3248 raise error.Abort(_("empty commit message"))
3244 if unchangedmessagedetection and editortext == templatetext:
3249 if unchangedmessagedetection and editortext == templatetext:
3245 raise error.Abort(_("commit message unchanged"))
3250 raise error.Abort(_("commit message unchanged"))
3246
3251
3247 return text
3252 return text
3248
3253
3249 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3254 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3250 ui = repo.ui
3255 ui = repo.ui
3251 spec = formatter.templatespec(ref, None, None)
3256 spec = formatter.templatespec(ref, None, None)
3252 t = changeset_templater(ui, repo, spec, None, {}, False)
3257 t = changeset_templater(ui, repo, spec, None, {}, False)
3253 t.t.cache.update((k, templater.unquotestring(v))
3258 t.t.cache.update((k, templater.unquotestring(v))
3254 for k, v in repo.ui.configitems('committemplate'))
3259 for k, v in repo.ui.configitems('committemplate'))
3255
3260
3256 if not extramsg:
3261 if not extramsg:
3257 extramsg = '' # ensure that extramsg is string
3262 extramsg = '' # ensure that extramsg is string
3258
3263
3259 ui.pushbuffer()
3264 ui.pushbuffer()
3260 t.show(ctx, extramsg=extramsg)
3265 t.show(ctx, extramsg=extramsg)
3261 return ui.popbuffer()
3266 return ui.popbuffer()
3262
3267
3263 def hgprefix(msg):
3268 def hgprefix(msg):
3264 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3269 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3265
3270
3266 def buildcommittext(repo, ctx, subs, extramsg):
3271 def buildcommittext(repo, ctx, subs, extramsg):
3267 edittext = []
3272 edittext = []
3268 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3273 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3269 if ctx.description():
3274 if ctx.description():
3270 edittext.append(ctx.description())
3275 edittext.append(ctx.description())
3271 edittext.append("")
3276 edittext.append("")
3272 edittext.append("") # Empty line between message and comments.
3277 edittext.append("") # Empty line between message and comments.
3273 edittext.append(hgprefix(_("Enter commit message."
3278 edittext.append(hgprefix(_("Enter commit message."
3274 " Lines beginning with 'HG:' are removed.")))
3279 " Lines beginning with 'HG:' are removed.")))
3275 edittext.append(hgprefix(extramsg))
3280 edittext.append(hgprefix(extramsg))
3276 edittext.append("HG: --")
3281 edittext.append("HG: --")
3277 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3282 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3278 if ctx.p2():
3283 if ctx.p2():
3279 edittext.append(hgprefix(_("branch merge")))
3284 edittext.append(hgprefix(_("branch merge")))
3280 if ctx.branch():
3285 if ctx.branch():
3281 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3286 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3282 if bookmarks.isactivewdirparent(repo):
3287 if bookmarks.isactivewdirparent(repo):
3283 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3288 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3284 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3289 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3285 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3290 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3286 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3291 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3287 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3292 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3288 if not added and not modified and not removed:
3293 if not added and not modified and not removed:
3289 edittext.append(hgprefix(_("no files changed")))
3294 edittext.append(hgprefix(_("no files changed")))
3290 edittext.append("")
3295 edittext.append("")
3291
3296
3292 return "\n".join(edittext)
3297 return "\n".join(edittext)
3293
3298
3294 def commitstatus(repo, node, branch, bheads=None, opts=None):
3299 def commitstatus(repo, node, branch, bheads=None, opts=None):
3295 if opts is None:
3300 if opts is None:
3296 opts = {}
3301 opts = {}
3297 ctx = repo[node]
3302 ctx = repo[node]
3298 parents = ctx.parents()
3303 parents = ctx.parents()
3299
3304
3300 if (not opts.get('amend') and bheads and node not in bheads and not
3305 if (not opts.get('amend') and bheads and node not in bheads and not
3301 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3306 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3302 repo.ui.status(_('created new head\n'))
3307 repo.ui.status(_('created new head\n'))
3303 # The message is not printed for initial roots. For the other
3308 # The message is not printed for initial roots. For the other
3304 # changesets, it is printed in the following situations:
3309 # changesets, it is printed in the following situations:
3305 #
3310 #
3306 # Par column: for the 2 parents with ...
3311 # Par column: for the 2 parents with ...
3307 # N: null or no parent
3312 # N: null or no parent
3308 # B: parent is on another named branch
3313 # B: parent is on another named branch
3309 # C: parent is a regular non head changeset
3314 # C: parent is a regular non head changeset
3310 # H: parent was a branch head of the current branch
3315 # H: parent was a branch head of the current branch
3311 # Msg column: whether we print "created new head" message
3316 # Msg column: whether we print "created new head" message
3312 # In the following, it is assumed that there already exists some
3317 # In the following, it is assumed that there already exists some
3313 # initial branch heads of the current branch, otherwise nothing is
3318 # initial branch heads of the current branch, otherwise nothing is
3314 # printed anyway.
3319 # printed anyway.
3315 #
3320 #
3316 # Par Msg Comment
3321 # Par Msg Comment
3317 # N N y additional topo root
3322 # N N y additional topo root
3318 #
3323 #
3319 # B N y additional branch root
3324 # B N y additional branch root
3320 # C N y additional topo head
3325 # C N y additional topo head
3321 # H N n usual case
3326 # H N n usual case
3322 #
3327 #
3323 # B B y weird additional branch root
3328 # B B y weird additional branch root
3324 # C B y branch merge
3329 # C B y branch merge
3325 # H B n merge with named branch
3330 # H B n merge with named branch
3326 #
3331 #
3327 # C C y additional head from merge
3332 # C C y additional head from merge
3328 # C H n merge with a head
3333 # C H n merge with a head
3329 #
3334 #
3330 # H H n head merge: head count decreases
3335 # H H n head merge: head count decreases
3331
3336
3332 if not opts.get('close_branch'):
3337 if not opts.get('close_branch'):
3333 for r in parents:
3338 for r in parents:
3334 if r.closesbranch() and r.branch() == branch:
3339 if r.closesbranch() and r.branch() == branch:
3335 repo.ui.status(_('reopening closed branch head %d\n') % r)
3340 repo.ui.status(_('reopening closed branch head %d\n') % r)
3336
3341
3337 if repo.ui.debugflag:
3342 if repo.ui.debugflag:
3338 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3343 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3339 elif repo.ui.verbose:
3344 elif repo.ui.verbose:
3340 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3345 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3341
3346
3342 def postcommitstatus(repo, pats, opts):
3347 def postcommitstatus(repo, pats, opts):
3343 return repo.status(match=scmutil.match(repo[None], pats, opts))
3348 return repo.status(match=scmutil.match(repo[None], pats, opts))
3344
3349
3345 def revert(ui, repo, ctx, parents, *pats, **opts):
3350 def revert(ui, repo, ctx, parents, *pats, **opts):
3346 parent, p2 = parents
3351 parent, p2 = parents
3347 node = ctx.node()
3352 node = ctx.node()
3348
3353
3349 mf = ctx.manifest()
3354 mf = ctx.manifest()
3350 if node == p2:
3355 if node == p2:
3351 parent = p2
3356 parent = p2
3352
3357
3353 # need all matching names in dirstate and manifest of target rev,
3358 # need all matching names in dirstate and manifest of target rev,
3354 # so have to walk both. do not print errors if files exist in one
3359 # so have to walk both. do not print errors if files exist in one
3355 # but not other. in both cases, filesets should be evaluated against
3360 # but not other. in both cases, filesets should be evaluated against
3356 # workingctx to get consistent result (issue4497). this means 'set:**'
3361 # workingctx to get consistent result (issue4497). this means 'set:**'
3357 # cannot be used to select missing files from target rev.
3362 # cannot be used to select missing files from target rev.
3358
3363
3359 # `names` is a mapping for all elements in working copy and target revision
3364 # `names` is a mapping for all elements in working copy and target revision
3360 # The mapping is in the form:
3365 # The mapping is in the form:
3361 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3366 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3362 names = {}
3367 names = {}
3363
3368
3364 with repo.wlock():
3369 with repo.wlock():
3365 ## filling of the `names` mapping
3370 ## filling of the `names` mapping
3366 # walk dirstate to fill `names`
3371 # walk dirstate to fill `names`
3367
3372
3368 interactive = opts.get('interactive', False)
3373 interactive = opts.get('interactive', False)
3369 wctx = repo[None]
3374 wctx = repo[None]
3370 m = scmutil.match(wctx, pats, opts)
3375 m = scmutil.match(wctx, pats, opts)
3371
3376
3372 # we'll need this later
3377 # we'll need this later
3373 targetsubs = sorted(s for s in wctx.substate if m(s))
3378 targetsubs = sorted(s for s in wctx.substate if m(s))
3374
3379
3375 if not m.always():
3380 if not m.always():
3376 matcher = matchmod.badmatch(m, lambda x, y: False)
3381 matcher = matchmod.badmatch(m, lambda x, y: False)
3377 for abs in wctx.walk(matcher):
3382 for abs in wctx.walk(matcher):
3378 names[abs] = m.rel(abs), m.exact(abs)
3383 names[abs] = m.rel(abs), m.exact(abs)
3379
3384
3380 # walk target manifest to fill `names`
3385 # walk target manifest to fill `names`
3381
3386
3382 def badfn(path, msg):
3387 def badfn(path, msg):
3383 if path in names:
3388 if path in names:
3384 return
3389 return
3385 if path in ctx.substate:
3390 if path in ctx.substate:
3386 return
3391 return
3387 path_ = path + '/'
3392 path_ = path + '/'
3388 for f in names:
3393 for f in names:
3389 if f.startswith(path_):
3394 if f.startswith(path_):
3390 return
3395 return
3391 ui.warn("%s: %s\n" % (m.rel(path), msg))
3396 ui.warn("%s: %s\n" % (m.rel(path), msg))
3392
3397
3393 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3398 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3394 if abs not in names:
3399 if abs not in names:
3395 names[abs] = m.rel(abs), m.exact(abs)
3400 names[abs] = m.rel(abs), m.exact(abs)
3396
3401
3397 # Find status of all file in `names`.
3402 # Find status of all file in `names`.
3398 m = scmutil.matchfiles(repo, names)
3403 m = scmutil.matchfiles(repo, names)
3399
3404
3400 changes = repo.status(node1=node, match=m,
3405 changes = repo.status(node1=node, match=m,
3401 unknown=True, ignored=True, clean=True)
3406 unknown=True, ignored=True, clean=True)
3402 else:
3407 else:
3403 changes = repo.status(node1=node, match=m)
3408 changes = repo.status(node1=node, match=m)
3404 for kind in changes:
3409 for kind in changes:
3405 for abs in kind:
3410 for abs in kind:
3406 names[abs] = m.rel(abs), m.exact(abs)
3411 names[abs] = m.rel(abs), m.exact(abs)
3407
3412
3408 m = scmutil.matchfiles(repo, names)
3413 m = scmutil.matchfiles(repo, names)
3409
3414
3410 modified = set(changes.modified)
3415 modified = set(changes.modified)
3411 added = set(changes.added)
3416 added = set(changes.added)
3412 removed = set(changes.removed)
3417 removed = set(changes.removed)
3413 _deleted = set(changes.deleted)
3418 _deleted = set(changes.deleted)
3414 unknown = set(changes.unknown)
3419 unknown = set(changes.unknown)
3415 unknown.update(changes.ignored)
3420 unknown.update(changes.ignored)
3416 clean = set(changes.clean)
3421 clean = set(changes.clean)
3417 modadded = set()
3422 modadded = set()
3418
3423
3419 # We need to account for the state of the file in the dirstate,
3424 # We need to account for the state of the file in the dirstate,
3420 # even when we revert against something else than parent. This will
3425 # even when we revert against something else than parent. This will
3421 # slightly alter the behavior of revert (doing back up or not, delete
3426 # slightly alter the behavior of revert (doing back up or not, delete
3422 # or just forget etc).
3427 # or just forget etc).
3423 if parent == node:
3428 if parent == node:
3424 dsmodified = modified
3429 dsmodified = modified
3425 dsadded = added
3430 dsadded = added
3426 dsremoved = removed
3431 dsremoved = removed
3427 # store all local modifications, useful later for rename detection
3432 # store all local modifications, useful later for rename detection
3428 localchanges = dsmodified | dsadded
3433 localchanges = dsmodified | dsadded
3429 modified, added, removed = set(), set(), set()
3434 modified, added, removed = set(), set(), set()
3430 else:
3435 else:
3431 changes = repo.status(node1=parent, match=m)
3436 changes = repo.status(node1=parent, match=m)
3432 dsmodified = set(changes.modified)
3437 dsmodified = set(changes.modified)
3433 dsadded = set(changes.added)
3438 dsadded = set(changes.added)
3434 dsremoved = set(changes.removed)
3439 dsremoved = set(changes.removed)
3435 # store all local modifications, useful later for rename detection
3440 # store all local modifications, useful later for rename detection
3436 localchanges = dsmodified | dsadded
3441 localchanges = dsmodified | dsadded
3437
3442
3438 # only take into account for removes between wc and target
3443 # only take into account for removes between wc and target
3439 clean |= dsremoved - removed
3444 clean |= dsremoved - removed
3440 dsremoved &= removed
3445 dsremoved &= removed
3441 # distinct between dirstate remove and other
3446 # distinct between dirstate remove and other
3442 removed -= dsremoved
3447 removed -= dsremoved
3443
3448
3444 modadded = added & dsmodified
3449 modadded = added & dsmodified
3445 added -= modadded
3450 added -= modadded
3446
3451
3447 # tell newly modified apart.
3452 # tell newly modified apart.
3448 dsmodified &= modified
3453 dsmodified &= modified
3449 dsmodified |= modified & dsadded # dirstate added may need backup
3454 dsmodified |= modified & dsadded # dirstate added may need backup
3450 modified -= dsmodified
3455 modified -= dsmodified
3451
3456
3452 # We need to wait for some post-processing to update this set
3457 # We need to wait for some post-processing to update this set
3453 # before making the distinction. The dirstate will be used for
3458 # before making the distinction. The dirstate will be used for
3454 # that purpose.
3459 # that purpose.
3455 dsadded = added
3460 dsadded = added
3456
3461
3457 # in case of merge, files that are actually added can be reported as
3462 # in case of merge, files that are actually added can be reported as
3458 # modified, we need to post process the result
3463 # modified, we need to post process the result
3459 if p2 != nullid:
3464 if p2 != nullid:
3460 mergeadd = set(dsmodified)
3465 mergeadd = set(dsmodified)
3461 for path in dsmodified:
3466 for path in dsmodified:
3462 if path in mf:
3467 if path in mf:
3463 mergeadd.remove(path)
3468 mergeadd.remove(path)
3464 dsadded |= mergeadd
3469 dsadded |= mergeadd
3465 dsmodified -= mergeadd
3470 dsmodified -= mergeadd
3466
3471
3467 # if f is a rename, update `names` to also revert the source
3472 # if f is a rename, update `names` to also revert the source
3468 cwd = repo.getcwd()
3473 cwd = repo.getcwd()
3469 for f in localchanges:
3474 for f in localchanges:
3470 src = repo.dirstate.copied(f)
3475 src = repo.dirstate.copied(f)
3471 # XXX should we check for rename down to target node?
3476 # XXX should we check for rename down to target node?
3472 if src and src not in names and repo.dirstate[src] == 'r':
3477 if src and src not in names and repo.dirstate[src] == 'r':
3473 dsremoved.add(src)
3478 dsremoved.add(src)
3474 names[src] = (repo.pathto(src, cwd), True)
3479 names[src] = (repo.pathto(src, cwd), True)
3475
3480
3476 # determine the exact nature of the deleted changesets
3481 # determine the exact nature of the deleted changesets
3477 deladded = set(_deleted)
3482 deladded = set(_deleted)
3478 for path in _deleted:
3483 for path in _deleted:
3479 if path in mf:
3484 if path in mf:
3480 deladded.remove(path)
3485 deladded.remove(path)
3481 deleted = _deleted - deladded
3486 deleted = _deleted - deladded
3482
3487
3483 # distinguish between file to forget and the other
3488 # distinguish between file to forget and the other
3484 added = set()
3489 added = set()
3485 for abs in dsadded:
3490 for abs in dsadded:
3486 if repo.dirstate[abs] != 'a':
3491 if repo.dirstate[abs] != 'a':
3487 added.add(abs)
3492 added.add(abs)
3488 dsadded -= added
3493 dsadded -= added
3489
3494
3490 for abs in deladded:
3495 for abs in deladded:
3491 if repo.dirstate[abs] == 'a':
3496 if repo.dirstate[abs] == 'a':
3492 dsadded.add(abs)
3497 dsadded.add(abs)
3493 deladded -= dsadded
3498 deladded -= dsadded
3494
3499
3495 # For files marked as removed, we check if an unknown file is present at
3500 # For files marked as removed, we check if an unknown file is present at
3496 # the same path. If a such file exists it may need to be backed up.
3501 # the same path. If a such file exists it may need to be backed up.
3497 # Making the distinction at this stage helps have simpler backup
3502 # Making the distinction at this stage helps have simpler backup
3498 # logic.
3503 # logic.
3499 removunk = set()
3504 removunk = set()
3500 for abs in removed:
3505 for abs in removed:
3501 target = repo.wjoin(abs)
3506 target = repo.wjoin(abs)
3502 if os.path.lexists(target):
3507 if os.path.lexists(target):
3503 removunk.add(abs)
3508 removunk.add(abs)
3504 removed -= removunk
3509 removed -= removunk
3505
3510
3506 dsremovunk = set()
3511 dsremovunk = set()
3507 for abs in dsremoved:
3512 for abs in dsremoved:
3508 target = repo.wjoin(abs)
3513 target = repo.wjoin(abs)
3509 if os.path.lexists(target):
3514 if os.path.lexists(target):
3510 dsremovunk.add(abs)
3515 dsremovunk.add(abs)
3511 dsremoved -= dsremovunk
3516 dsremoved -= dsremovunk
3512
3517
3513 # action to be actually performed by revert
3518 # action to be actually performed by revert
3514 # (<list of file>, message>) tuple
3519 # (<list of file>, message>) tuple
3515 actions = {'revert': ([], _('reverting %s\n')),
3520 actions = {'revert': ([], _('reverting %s\n')),
3516 'add': ([], _('adding %s\n')),
3521 'add': ([], _('adding %s\n')),
3517 'remove': ([], _('removing %s\n')),
3522 'remove': ([], _('removing %s\n')),
3518 'drop': ([], _('removing %s\n')),
3523 'drop': ([], _('removing %s\n')),
3519 'forget': ([], _('forgetting %s\n')),
3524 'forget': ([], _('forgetting %s\n')),
3520 'undelete': ([], _('undeleting %s\n')),
3525 'undelete': ([], _('undeleting %s\n')),
3521 'noop': (None, _('no changes needed to %s\n')),
3526 'noop': (None, _('no changes needed to %s\n')),
3522 'unknown': (None, _('file not managed: %s\n')),
3527 'unknown': (None, _('file not managed: %s\n')),
3523 }
3528 }
3524
3529
3525 # "constant" that convey the backup strategy.
3530 # "constant" that convey the backup strategy.
3526 # All set to `discard` if `no-backup` is set do avoid checking
3531 # All set to `discard` if `no-backup` is set do avoid checking
3527 # no_backup lower in the code.
3532 # no_backup lower in the code.
3528 # These values are ordered for comparison purposes
3533 # These values are ordered for comparison purposes
3529 backupinteractive = 3 # do backup if interactively modified
3534 backupinteractive = 3 # do backup if interactively modified
3530 backup = 2 # unconditionally do backup
3535 backup = 2 # unconditionally do backup
3531 check = 1 # check if the existing file differs from target
3536 check = 1 # check if the existing file differs from target
3532 discard = 0 # never do backup
3537 discard = 0 # never do backup
3533 if opts.get('no_backup'):
3538 if opts.get('no_backup'):
3534 backupinteractive = backup = check = discard
3539 backupinteractive = backup = check = discard
3535 if interactive:
3540 if interactive:
3536 dsmodifiedbackup = backupinteractive
3541 dsmodifiedbackup = backupinteractive
3537 else:
3542 else:
3538 dsmodifiedbackup = backup
3543 dsmodifiedbackup = backup
3539 tobackup = set()
3544 tobackup = set()
3540
3545
3541 backupanddel = actions['remove']
3546 backupanddel = actions['remove']
3542 if not opts.get('no_backup'):
3547 if not opts.get('no_backup'):
3543 backupanddel = actions['drop']
3548 backupanddel = actions['drop']
3544
3549
3545 disptable = (
3550 disptable = (
3546 # dispatch table:
3551 # dispatch table:
3547 # file state
3552 # file state
3548 # action
3553 # action
3549 # make backup
3554 # make backup
3550
3555
3551 ## Sets that results that will change file on disk
3556 ## Sets that results that will change file on disk
3552 # Modified compared to target, no local change
3557 # Modified compared to target, no local change
3553 (modified, actions['revert'], discard),
3558 (modified, actions['revert'], discard),
3554 # Modified compared to target, but local file is deleted
3559 # Modified compared to target, but local file is deleted
3555 (deleted, actions['revert'], discard),
3560 (deleted, actions['revert'], discard),
3556 # Modified compared to target, local change
3561 # Modified compared to target, local change
3557 (dsmodified, actions['revert'], dsmodifiedbackup),
3562 (dsmodified, actions['revert'], dsmodifiedbackup),
3558 # Added since target
3563 # Added since target
3559 (added, actions['remove'], discard),
3564 (added, actions['remove'], discard),
3560 # Added in working directory
3565 # Added in working directory
3561 (dsadded, actions['forget'], discard),
3566 (dsadded, actions['forget'], discard),
3562 # Added since target, have local modification
3567 # Added since target, have local modification
3563 (modadded, backupanddel, backup),
3568 (modadded, backupanddel, backup),
3564 # Added since target but file is missing in working directory
3569 # Added since target but file is missing in working directory
3565 (deladded, actions['drop'], discard),
3570 (deladded, actions['drop'], discard),
3566 # Removed since target, before working copy parent
3571 # Removed since target, before working copy parent
3567 (removed, actions['add'], discard),
3572 (removed, actions['add'], discard),
3568 # Same as `removed` but an unknown file exists at the same path
3573 # Same as `removed` but an unknown file exists at the same path
3569 (removunk, actions['add'], check),
3574 (removunk, actions['add'], check),
3570 # Removed since targe, marked as such in working copy parent
3575 # Removed since targe, marked as such in working copy parent
3571 (dsremoved, actions['undelete'], discard),
3576 (dsremoved, actions['undelete'], discard),
3572 # Same as `dsremoved` but an unknown file exists at the same path
3577 # Same as `dsremoved` but an unknown file exists at the same path
3573 (dsremovunk, actions['undelete'], check),
3578 (dsremovunk, actions['undelete'], check),
3574 ## the following sets does not result in any file changes
3579 ## the following sets does not result in any file changes
3575 # File with no modification
3580 # File with no modification
3576 (clean, actions['noop'], discard),
3581 (clean, actions['noop'], discard),
3577 # Existing file, not tracked anywhere
3582 # Existing file, not tracked anywhere
3578 (unknown, actions['unknown'], discard),
3583 (unknown, actions['unknown'], discard),
3579 )
3584 )
3580
3585
3581 for abs, (rel, exact) in sorted(names.items()):
3586 for abs, (rel, exact) in sorted(names.items()):
3582 # target file to be touch on disk (relative to cwd)
3587 # target file to be touch on disk (relative to cwd)
3583 target = repo.wjoin(abs)
3588 target = repo.wjoin(abs)
3584 # search the entry in the dispatch table.
3589 # search the entry in the dispatch table.
3585 # if the file is in any of these sets, it was touched in the working
3590 # if the file is in any of these sets, it was touched in the working
3586 # directory parent and we are sure it needs to be reverted.
3591 # directory parent and we are sure it needs to be reverted.
3587 for table, (xlist, msg), dobackup in disptable:
3592 for table, (xlist, msg), dobackup in disptable:
3588 if abs not in table:
3593 if abs not in table:
3589 continue
3594 continue
3590 if xlist is not None:
3595 if xlist is not None:
3591 xlist.append(abs)
3596 xlist.append(abs)
3592 if dobackup:
3597 if dobackup:
3593 # If in interactive mode, don't automatically create
3598 # If in interactive mode, don't automatically create
3594 # .orig files (issue4793)
3599 # .orig files (issue4793)
3595 if dobackup == backupinteractive:
3600 if dobackup == backupinteractive:
3596 tobackup.add(abs)
3601 tobackup.add(abs)
3597 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3602 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3598 bakname = scmutil.origpath(ui, repo, rel)
3603 bakname = scmutil.origpath(ui, repo, rel)
3599 ui.note(_('saving current version of %s as %s\n') %
3604 ui.note(_('saving current version of %s as %s\n') %
3600 (rel, bakname))
3605 (rel, bakname))
3601 if not opts.get('dry_run'):
3606 if not opts.get('dry_run'):
3602 if interactive:
3607 if interactive:
3603 util.copyfile(target, bakname)
3608 util.copyfile(target, bakname)
3604 else:
3609 else:
3605 util.rename(target, bakname)
3610 util.rename(target, bakname)
3606 if ui.verbose or not exact:
3611 if ui.verbose or not exact:
3607 if not isinstance(msg, basestring):
3612 if not isinstance(msg, basestring):
3608 msg = msg(abs)
3613 msg = msg(abs)
3609 ui.status(msg % rel)
3614 ui.status(msg % rel)
3610 elif exact:
3615 elif exact:
3611 ui.warn(msg % rel)
3616 ui.warn(msg % rel)
3612 break
3617 break
3613
3618
3614 if not opts.get('dry_run'):
3619 if not opts.get('dry_run'):
3615 needdata = ('revert', 'add', 'undelete')
3620 needdata = ('revert', 'add', 'undelete')
3616 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3621 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3617 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3622 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3618
3623
3619 if targetsubs:
3624 if targetsubs:
3620 # Revert the subrepos on the revert list
3625 # Revert the subrepos on the revert list
3621 for sub in targetsubs:
3626 for sub in targetsubs:
3622 try:
3627 try:
3623 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3628 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3624 except KeyError:
3629 except KeyError:
3625 raise error.Abort("subrepository '%s' does not exist in %s!"
3630 raise error.Abort("subrepository '%s' does not exist in %s!"
3626 % (sub, short(ctx.node())))
3631 % (sub, short(ctx.node())))
3627
3632
3628 def _revertprefetch(repo, ctx, *files):
3633 def _revertprefetch(repo, ctx, *files):
3629 """Let extension changing the storage layer prefetch content"""
3634 """Let extension changing the storage layer prefetch content"""
3630
3635
3631 def _performrevert(repo, parents, ctx, actions, interactive=False,
3636 def _performrevert(repo, parents, ctx, actions, interactive=False,
3632 tobackup=None):
3637 tobackup=None):
3633 """function that actually perform all the actions computed for revert
3638 """function that actually perform all the actions computed for revert
3634
3639
3635 This is an independent function to let extension to plug in and react to
3640 This is an independent function to let extension to plug in and react to
3636 the imminent revert.
3641 the imminent revert.
3637
3642
3638 Make sure you have the working directory locked when calling this function.
3643 Make sure you have the working directory locked when calling this function.
3639 """
3644 """
3640 parent, p2 = parents
3645 parent, p2 = parents
3641 node = ctx.node()
3646 node = ctx.node()
3642 excluded_files = []
3647 excluded_files = []
3643 matcher_opts = {"exclude": excluded_files}
3648 matcher_opts = {"exclude": excluded_files}
3644
3649
3645 def checkout(f):
3650 def checkout(f):
3646 fc = ctx[f]
3651 fc = ctx[f]
3647 repo.wwrite(f, fc.data(), fc.flags())
3652 repo.wwrite(f, fc.data(), fc.flags())
3648
3653
3649 def doremove(f):
3654 def doremove(f):
3650 try:
3655 try:
3651 repo.wvfs.unlinkpath(f)
3656 repo.wvfs.unlinkpath(f)
3652 except OSError:
3657 except OSError:
3653 pass
3658 pass
3654 repo.dirstate.remove(f)
3659 repo.dirstate.remove(f)
3655
3660
3656 audit_path = pathutil.pathauditor(repo.root, cached=True)
3661 audit_path = pathutil.pathauditor(repo.root, cached=True)
3657 for f in actions['forget'][0]:
3662 for f in actions['forget'][0]:
3658 if interactive:
3663 if interactive:
3659 choice = repo.ui.promptchoice(
3664 choice = repo.ui.promptchoice(
3660 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3665 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3661 if choice == 0:
3666 if choice == 0:
3662 repo.dirstate.drop(f)
3667 repo.dirstate.drop(f)
3663 else:
3668 else:
3664 excluded_files.append(repo.wjoin(f))
3669 excluded_files.append(repo.wjoin(f))
3665 else:
3670 else:
3666 repo.dirstate.drop(f)
3671 repo.dirstate.drop(f)
3667 for f in actions['remove'][0]:
3672 for f in actions['remove'][0]:
3668 audit_path(f)
3673 audit_path(f)
3669 if interactive:
3674 if interactive:
3670 choice = repo.ui.promptchoice(
3675 choice = repo.ui.promptchoice(
3671 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3676 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3672 if choice == 0:
3677 if choice == 0:
3673 doremove(f)
3678 doremove(f)
3674 else:
3679 else:
3675 excluded_files.append(repo.wjoin(f))
3680 excluded_files.append(repo.wjoin(f))
3676 else:
3681 else:
3677 doremove(f)
3682 doremove(f)
3678 for f in actions['drop'][0]:
3683 for f in actions['drop'][0]:
3679 audit_path(f)
3684 audit_path(f)
3680 repo.dirstate.remove(f)
3685 repo.dirstate.remove(f)
3681
3686
3682 normal = None
3687 normal = None
3683 if node == parent:
3688 if node == parent:
3684 # We're reverting to our parent. If possible, we'd like status
3689 # We're reverting to our parent. If possible, we'd like status
3685 # to report the file as clean. We have to use normallookup for
3690 # to report the file as clean. We have to use normallookup for
3686 # merges to avoid losing information about merged/dirty files.
3691 # merges to avoid losing information about merged/dirty files.
3687 if p2 != nullid:
3692 if p2 != nullid:
3688 normal = repo.dirstate.normallookup
3693 normal = repo.dirstate.normallookup
3689 else:
3694 else:
3690 normal = repo.dirstate.normal
3695 normal = repo.dirstate.normal
3691
3696
3692 newlyaddedandmodifiedfiles = set()
3697 newlyaddedandmodifiedfiles = set()
3693 if interactive:
3698 if interactive:
3694 # Prompt the user for changes to revert
3699 # Prompt the user for changes to revert
3695 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3700 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3696 m = scmutil.match(ctx, torevert, matcher_opts)
3701 m = scmutil.match(ctx, torevert, matcher_opts)
3697 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3702 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3698 diffopts.nodates = True
3703 diffopts.nodates = True
3699 diffopts.git = True
3704 diffopts.git = True
3700 operation = 'discard'
3705 operation = 'discard'
3701 reversehunks = True
3706 reversehunks = True
3702 if node != parent:
3707 if node != parent:
3703 operation = 'revert'
3708 operation = 'revert'
3704 reversehunks = repo.ui.configbool('experimental',
3709 reversehunks = repo.ui.configbool('experimental',
3705 'revertalternateinteractivemode')
3710 'revertalternateinteractivemode')
3706 if reversehunks:
3711 if reversehunks:
3707 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3712 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3708 else:
3713 else:
3709 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3714 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3710 originalchunks = patch.parsepatch(diff)
3715 originalchunks = patch.parsepatch(diff)
3711
3716
3712 try:
3717 try:
3713
3718
3714 chunks, opts = recordfilter(repo.ui, originalchunks,
3719 chunks, opts = recordfilter(repo.ui, originalchunks,
3715 operation=operation)
3720 operation=operation)
3716 if reversehunks:
3721 if reversehunks:
3717 chunks = patch.reversehunks(chunks)
3722 chunks = patch.reversehunks(chunks)
3718
3723
3719 except error.PatchError as err:
3724 except error.PatchError as err:
3720 raise error.Abort(_('error parsing patch: %s') % err)
3725 raise error.Abort(_('error parsing patch: %s') % err)
3721
3726
3722 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3727 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3723 if tobackup is None:
3728 if tobackup is None:
3724 tobackup = set()
3729 tobackup = set()
3725 # Apply changes
3730 # Apply changes
3726 fp = stringio()
3731 fp = stringio()
3727 for c in chunks:
3732 for c in chunks:
3728 # Create a backup file only if this hunk should be backed up
3733 # Create a backup file only if this hunk should be backed up
3729 if ishunk(c) and c.header.filename() in tobackup:
3734 if ishunk(c) and c.header.filename() in tobackup:
3730 abs = c.header.filename()
3735 abs = c.header.filename()
3731 target = repo.wjoin(abs)
3736 target = repo.wjoin(abs)
3732 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3737 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3733 util.copyfile(target, bakname)
3738 util.copyfile(target, bakname)
3734 tobackup.remove(abs)
3739 tobackup.remove(abs)
3735 c.write(fp)
3740 c.write(fp)
3736 dopatch = fp.tell()
3741 dopatch = fp.tell()
3737 fp.seek(0)
3742 fp.seek(0)
3738 if dopatch:
3743 if dopatch:
3739 try:
3744 try:
3740 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3745 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3741 except error.PatchError as err:
3746 except error.PatchError as err:
3742 raise error.Abort(str(err))
3747 raise error.Abort(str(err))
3743 del fp
3748 del fp
3744 else:
3749 else:
3745 for f in actions['revert'][0]:
3750 for f in actions['revert'][0]:
3746 checkout(f)
3751 checkout(f)
3747 if normal:
3752 if normal:
3748 normal(f)
3753 normal(f)
3749
3754
3750 for f in actions['add'][0]:
3755 for f in actions['add'][0]:
3751 # Don't checkout modified files, they are already created by the diff
3756 # Don't checkout modified files, they are already created by the diff
3752 if f not in newlyaddedandmodifiedfiles:
3757 if f not in newlyaddedandmodifiedfiles:
3753 checkout(f)
3758 checkout(f)
3754 repo.dirstate.add(f)
3759 repo.dirstate.add(f)
3755
3760
3756 normal = repo.dirstate.normallookup
3761 normal = repo.dirstate.normallookup
3757 if node == parent and p2 == nullid:
3762 if node == parent and p2 == nullid:
3758 normal = repo.dirstate.normal
3763 normal = repo.dirstate.normal
3759 for f in actions['undelete'][0]:
3764 for f in actions['undelete'][0]:
3760 checkout(f)
3765 checkout(f)
3761 normal(f)
3766 normal(f)
3762
3767
3763 copied = copies.pathcopies(repo[parent], ctx)
3768 copied = copies.pathcopies(repo[parent], ctx)
3764
3769
3765 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3770 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3766 if f in copied:
3771 if f in copied:
3767 repo.dirstate.copy(copied[f], f)
3772 repo.dirstate.copy(copied[f], f)
3768
3773
3769 class command(registrar.command):
3774 class command(registrar.command):
3770 def _doregister(self, func, name, *args, **kwargs):
3775 def _doregister(self, func, name, *args, **kwargs):
3771 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3776 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3772 return super(command, self)._doregister(func, name, *args, **kwargs)
3777 return super(command, self)._doregister(func, name, *args, **kwargs)
3773
3778
3774 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3779 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3775 # commands.outgoing. "missing" is "missing" of the result of
3780 # commands.outgoing. "missing" is "missing" of the result of
3776 # "findcommonoutgoing()"
3781 # "findcommonoutgoing()"
3777 outgoinghooks = util.hooks()
3782 outgoinghooks = util.hooks()
3778
3783
3779 # a list of (ui, repo) functions called by commands.summary
3784 # a list of (ui, repo) functions called by commands.summary
3780 summaryhooks = util.hooks()
3785 summaryhooks = util.hooks()
3781
3786
3782 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3787 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3783 #
3788 #
3784 # functions should return tuple of booleans below, if 'changes' is None:
3789 # functions should return tuple of booleans below, if 'changes' is None:
3785 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3790 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3786 #
3791 #
3787 # otherwise, 'changes' is a tuple of tuples below:
3792 # otherwise, 'changes' is a tuple of tuples below:
3788 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3793 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3789 # - (desturl, destbranch, destpeer, outgoing)
3794 # - (desturl, destbranch, destpeer, outgoing)
3790 summaryremotehooks = util.hooks()
3795 summaryremotehooks = util.hooks()
3791
3796
3792 # A list of state files kept by multistep operations like graft.
3797 # A list of state files kept by multistep operations like graft.
3793 # Since graft cannot be aborted, it is considered 'clearable' by update.
3798 # Since graft cannot be aborted, it is considered 'clearable' by update.
3794 # note: bisect is intentionally excluded
3799 # note: bisect is intentionally excluded
3795 # (state file, clearable, allowcommit, error, hint)
3800 # (state file, clearable, allowcommit, error, hint)
3796 unfinishedstates = [
3801 unfinishedstates = [
3797 ('graftstate', True, False, _('graft in progress'),
3802 ('graftstate', True, False, _('graft in progress'),
3798 _("use 'hg graft --continue' or 'hg update' to abort")),
3803 _("use 'hg graft --continue' or 'hg update' to abort")),
3799 ('updatestate', True, False, _('last update was interrupted'),
3804 ('updatestate', True, False, _('last update was interrupted'),
3800 _("use 'hg update' to get a consistent checkout"))
3805 _("use 'hg update' to get a consistent checkout"))
3801 ]
3806 ]
3802
3807
3803 def checkunfinished(repo, commit=False):
3808 def checkunfinished(repo, commit=False):
3804 '''Look for an unfinished multistep operation, like graft, and abort
3809 '''Look for an unfinished multistep operation, like graft, and abort
3805 if found. It's probably good to check this right before
3810 if found. It's probably good to check this right before
3806 bailifchanged().
3811 bailifchanged().
3807 '''
3812 '''
3808 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3813 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3809 if commit and allowcommit:
3814 if commit and allowcommit:
3810 continue
3815 continue
3811 if repo.vfs.exists(f):
3816 if repo.vfs.exists(f):
3812 raise error.Abort(msg, hint=hint)
3817 raise error.Abort(msg, hint=hint)
3813
3818
3814 def clearunfinished(repo):
3819 def clearunfinished(repo):
3815 '''Check for unfinished operations (as above), and clear the ones
3820 '''Check for unfinished operations (as above), and clear the ones
3816 that are clearable.
3821 that are clearable.
3817 '''
3822 '''
3818 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3823 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3819 if not clearable and repo.vfs.exists(f):
3824 if not clearable and repo.vfs.exists(f):
3820 raise error.Abort(msg, hint=hint)
3825 raise error.Abort(msg, hint=hint)
3821 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3826 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3822 if clearable and repo.vfs.exists(f):
3827 if clearable and repo.vfs.exists(f):
3823 util.unlink(repo.vfs.join(f))
3828 util.unlink(repo.vfs.join(f))
3824
3829
3825 afterresolvedstates = [
3830 afterresolvedstates = [
3826 ('graftstate',
3831 ('graftstate',
3827 _('hg graft --continue')),
3832 _('hg graft --continue')),
3828 ]
3833 ]
3829
3834
3830 def howtocontinue(repo):
3835 def howtocontinue(repo):
3831 '''Check for an unfinished operation and return the command to finish
3836 '''Check for an unfinished operation and return the command to finish
3832 it.
3837 it.
3833
3838
3834 afterresolvedstates tuples define a .hg/{file} and the corresponding
3839 afterresolvedstates tuples define a .hg/{file} and the corresponding
3835 command needed to finish it.
3840 command needed to finish it.
3836
3841
3837 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3842 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3838 a boolean.
3843 a boolean.
3839 '''
3844 '''
3840 contmsg = _("continue: %s")
3845 contmsg = _("continue: %s")
3841 for f, msg in afterresolvedstates:
3846 for f, msg in afterresolvedstates:
3842 if repo.vfs.exists(f):
3847 if repo.vfs.exists(f):
3843 return contmsg % msg, True
3848 return contmsg % msg, True
3844 if repo[None].dirty(missing=True, merge=False, branch=False):
3849 if repo[None].dirty(missing=True, merge=False, branch=False):
3845 return contmsg % _("hg commit"), False
3850 return contmsg % _("hg commit"), False
3846 return None, None
3851 return None, None
3847
3852
3848 def checkafterresolved(repo):
3853 def checkafterresolved(repo):
3849 '''Inform the user about the next action after completing hg resolve
3854 '''Inform the user about the next action after completing hg resolve
3850
3855
3851 If there's a matching afterresolvedstates, howtocontinue will yield
3856 If there's a matching afterresolvedstates, howtocontinue will yield
3852 repo.ui.warn as the reporter.
3857 repo.ui.warn as the reporter.
3853
3858
3854 Otherwise, it will yield repo.ui.note.
3859 Otherwise, it will yield repo.ui.note.
3855 '''
3860 '''
3856 msg, warning = howtocontinue(repo)
3861 msg, warning = howtocontinue(repo)
3857 if msg is not None:
3862 if msg is not None:
3858 if warning:
3863 if warning:
3859 repo.ui.warn("%s\n" % msg)
3864 repo.ui.warn("%s\n" % msg)
3860 else:
3865 else:
3861 repo.ui.note("%s\n" % msg)
3866 repo.ui.note("%s\n" % msg)
3862
3867
3863 def wrongtooltocontinue(repo, task):
3868 def wrongtooltocontinue(repo, task):
3864 '''Raise an abort suggesting how to properly continue if there is an
3869 '''Raise an abort suggesting how to properly continue if there is an
3865 active task.
3870 active task.
3866
3871
3867 Uses howtocontinue() to find the active task.
3872 Uses howtocontinue() to find the active task.
3868
3873
3869 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3874 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3870 a hint.
3875 a hint.
3871 '''
3876 '''
3872 after = howtocontinue(repo)
3877 after = howtocontinue(repo)
3873 hint = None
3878 hint = None
3874 if after[1]:
3879 if after[1]:
3875 hint = after[0]
3880 hint = after[0]
3876 raise error.Abort(_('no %s in progress') % task, hint=hint)
3881 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,2798 +1,2805 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import email
13 import email
14 import errno
14 import errno
15 import hashlib
15 import hashlib
16 import os
16 import os
17 import posixpath
17 import posixpath
18 import re
18 import re
19 import shutil
19 import shutil
20 import tempfile
20 import tempfile
21 import zlib
21 import zlib
22
22
23 from .i18n import _
23 from .i18n import _
24 from .node import (
24 from .node import (
25 hex,
25 hex,
26 short,
26 short,
27 )
27 )
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 mail,
32 mail,
33 mdiff,
33 mdiff,
34 pathutil,
34 pathutil,
35 policy,
35 policy,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42
42
43 diffhelpers = policy.importmod(r'diffhelpers')
43 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
44 stringio = util.stringio
45
45
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
48
49 PatchError = error.PatchError
49 PatchError = error.PatchError
50
50
51 # public functions
51 # public functions
52
52
53 def split(stream):
53 def split(stream):
54 '''return an iterator of individual patches from a stream'''
54 '''return an iterator of individual patches from a stream'''
55 def isheader(line, inheader):
55 def isheader(line, inheader):
56 if inheader and line[0] in (' ', '\t'):
56 if inheader and line[0] in (' ', '\t'):
57 # continuation
57 # continuation
58 return True
58 return True
59 if line[0] in (' ', '-', '+'):
59 if line[0] in (' ', '-', '+'):
60 # diff line - don't check for header pattern in there
60 # diff line - don't check for header pattern in there
61 return False
61 return False
62 l = line.split(': ', 1)
62 l = line.split(': ', 1)
63 return len(l) == 2 and ' ' not in l[0]
63 return len(l) == 2 and ' ' not in l[0]
64
64
65 def chunk(lines):
65 def chunk(lines):
66 return stringio(''.join(lines))
66 return stringio(''.join(lines))
67
67
68 def hgsplit(stream, cur):
68 def hgsplit(stream, cur):
69 inheader = True
69 inheader = True
70
70
71 for line in stream:
71 for line in stream:
72 if not line.strip():
72 if not line.strip():
73 inheader = False
73 inheader = False
74 if not inheader and line.startswith('# HG changeset patch'):
74 if not inheader and line.startswith('# HG changeset patch'):
75 yield chunk(cur)
75 yield chunk(cur)
76 cur = []
76 cur = []
77 inheader = True
77 inheader = True
78
78
79 cur.append(line)
79 cur.append(line)
80
80
81 if cur:
81 if cur:
82 yield chunk(cur)
82 yield chunk(cur)
83
83
84 def mboxsplit(stream, cur):
84 def mboxsplit(stream, cur):
85 for line in stream:
85 for line in stream:
86 if line.startswith('From '):
86 if line.startswith('From '):
87 for c in split(chunk(cur[1:])):
87 for c in split(chunk(cur[1:])):
88 yield c
88 yield c
89 cur = []
89 cur = []
90
90
91 cur.append(line)
91 cur.append(line)
92
92
93 if cur:
93 if cur:
94 for c in split(chunk(cur[1:])):
94 for c in split(chunk(cur[1:])):
95 yield c
95 yield c
96
96
97 def mimesplit(stream, cur):
97 def mimesplit(stream, cur):
98 def msgfp(m):
98 def msgfp(m):
99 fp = stringio()
99 fp = stringio()
100 g = email.Generator.Generator(fp, mangle_from_=False)
100 g = email.Generator.Generator(fp, mangle_from_=False)
101 g.flatten(m)
101 g.flatten(m)
102 fp.seek(0)
102 fp.seek(0)
103 return fp
103 return fp
104
104
105 for line in stream:
105 for line in stream:
106 cur.append(line)
106 cur.append(line)
107 c = chunk(cur)
107 c = chunk(cur)
108
108
109 m = email.Parser.Parser().parse(c)
109 m = email.Parser.Parser().parse(c)
110 if not m.is_multipart():
110 if not m.is_multipart():
111 yield msgfp(m)
111 yield msgfp(m)
112 else:
112 else:
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 for part in m.walk():
114 for part in m.walk():
115 ct = part.get_content_type()
115 ct = part.get_content_type()
116 if ct not in ok_types:
116 if ct not in ok_types:
117 continue
117 continue
118 yield msgfp(part)
118 yield msgfp(part)
119
119
120 def headersplit(stream, cur):
120 def headersplit(stream, cur):
121 inheader = False
121 inheader = False
122
122
123 for line in stream:
123 for line in stream:
124 if not inheader and isheader(line, inheader):
124 if not inheader and isheader(line, inheader):
125 yield chunk(cur)
125 yield chunk(cur)
126 cur = []
126 cur = []
127 inheader = True
127 inheader = True
128 if inheader and not isheader(line, inheader):
128 if inheader and not isheader(line, inheader):
129 inheader = False
129 inheader = False
130
130
131 cur.append(line)
131 cur.append(line)
132
132
133 if cur:
133 if cur:
134 yield chunk(cur)
134 yield chunk(cur)
135
135
136 def remainder(cur):
136 def remainder(cur):
137 yield chunk(cur)
137 yield chunk(cur)
138
138
139 class fiter(object):
139 class fiter(object):
140 def __init__(self, fp):
140 def __init__(self, fp):
141 self.fp = fp
141 self.fp = fp
142
142
143 def __iter__(self):
143 def __iter__(self):
144 return self
144 return self
145
145
146 def next(self):
146 def next(self):
147 l = self.fp.readline()
147 l = self.fp.readline()
148 if not l:
148 if not l:
149 raise StopIteration
149 raise StopIteration
150 return l
150 return l
151
151
152 inheader = False
152 inheader = False
153 cur = []
153 cur = []
154
154
155 mimeheaders = ['content-type']
155 mimeheaders = ['content-type']
156
156
157 if not util.safehasattr(stream, 'next'):
157 if not util.safehasattr(stream, 'next'):
158 # http responses, for example, have readline but not next
158 # http responses, for example, have readline but not next
159 stream = fiter(stream)
159 stream = fiter(stream)
160
160
161 for line in stream:
161 for line in stream:
162 cur.append(line)
162 cur.append(line)
163 if line.startswith('# HG changeset patch'):
163 if line.startswith('# HG changeset patch'):
164 return hgsplit(stream, cur)
164 return hgsplit(stream, cur)
165 elif line.startswith('From '):
165 elif line.startswith('From '):
166 return mboxsplit(stream, cur)
166 return mboxsplit(stream, cur)
167 elif isheader(line, inheader):
167 elif isheader(line, inheader):
168 inheader = True
168 inheader = True
169 if line.split(':', 1)[0].lower() in mimeheaders:
169 if line.split(':', 1)[0].lower() in mimeheaders:
170 # let email parser handle this
170 # let email parser handle this
171 return mimesplit(stream, cur)
171 return mimesplit(stream, cur)
172 elif line.startswith('--- ') and inheader:
172 elif line.startswith('--- ') and inheader:
173 # No evil headers seen by diff start, split by hand
173 # No evil headers seen by diff start, split by hand
174 return headersplit(stream, cur)
174 return headersplit(stream, cur)
175 # Not enough info, keep reading
175 # Not enough info, keep reading
176
176
177 # if we are here, we have a very plain patch
177 # if we are here, we have a very plain patch
178 return remainder(cur)
178 return remainder(cur)
179
179
180 ## Some facility for extensible patch parsing:
180 ## Some facility for extensible patch parsing:
181 # list of pairs ("header to match", "data key")
181 # list of pairs ("header to match", "data key")
182 patchheadermap = [('Date', 'date'),
182 patchheadermap = [('Date', 'date'),
183 ('Branch', 'branch'),
183 ('Branch', 'branch'),
184 ('Node ID', 'nodeid'),
184 ('Node ID', 'nodeid'),
185 ]
185 ]
186
186
187 def extract(ui, fileobj):
187 def extract(ui, fileobj):
188 '''extract patch from data read from fileobj.
188 '''extract patch from data read from fileobj.
189
189
190 patch can be a normal patch or contained in an email message.
190 patch can be a normal patch or contained in an email message.
191
191
192 return a dictionary. Standard keys are:
192 return a dictionary. Standard keys are:
193 - filename,
193 - filename,
194 - message,
194 - message,
195 - user,
195 - user,
196 - date,
196 - date,
197 - branch,
197 - branch,
198 - node,
198 - node,
199 - p1,
199 - p1,
200 - p2.
200 - p2.
201 Any item can be missing from the dictionary. If filename is missing,
201 Any item can be missing from the dictionary. If filename is missing,
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
202 fileobj did not contain a patch. Caller must unlink filename when done.'''
203
203
204 # attempt to detect the start of a patch
204 # attempt to detect the start of a patch
205 # (this heuristic is borrowed from quilt)
205 # (this heuristic is borrowed from quilt)
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
206 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]|RCS file: |'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
207 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
208 br'---[ \t].*?^\+\+\+[ \t]|'
209 br'\*\*\*[ \t].*?^---[ \t])',
209 br'\*\*\*[ \t].*?^---[ \t])',
210 re.MULTILINE | re.DOTALL)
210 re.MULTILINE | re.DOTALL)
211
211
212 data = {}
212 data = {}
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
213 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
214 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
215 try:
215 try:
216 msg = email.Parser.Parser().parse(fileobj)
216 msg = email.Parser.Parser().parse(fileobj)
217
217
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
218 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
219 data['user'] = msg['From'] and mail.headdecode(msg['From'])
220 if not subject and not data['user']:
220 if not subject and not data['user']:
221 # Not an email, restore parsed headers if any
221 # Not an email, restore parsed headers if any
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
222 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
223
223
224 # should try to parse msg['Date']
224 # should try to parse msg['Date']
225 parents = []
225 parents = []
226
226
227 if subject:
227 if subject:
228 if subject.startswith('[PATCH'):
228 if subject.startswith('[PATCH'):
229 pend = subject.find(']')
229 pend = subject.find(']')
230 if pend >= 0:
230 if pend >= 0:
231 subject = subject[pend + 1:].lstrip()
231 subject = subject[pend + 1:].lstrip()
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
232 subject = re.sub(br'\n[ \t]+', ' ', subject)
233 ui.debug('Subject: %s\n' % subject)
233 ui.debug('Subject: %s\n' % subject)
234 if data['user']:
234 if data['user']:
235 ui.debug('From: %s\n' % data['user'])
235 ui.debug('From: %s\n' % data['user'])
236 diffs_seen = 0
236 diffs_seen = 0
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
237 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
238 message = ''
238 message = ''
239 for part in msg.walk():
239 for part in msg.walk():
240 content_type = part.get_content_type()
240 content_type = part.get_content_type()
241 ui.debug('Content-Type: %s\n' % content_type)
241 ui.debug('Content-Type: %s\n' % content_type)
242 if content_type not in ok_types:
242 if content_type not in ok_types:
243 continue
243 continue
244 payload = part.get_payload(decode=True)
244 payload = part.get_payload(decode=True)
245 m = diffre.search(payload)
245 m = diffre.search(payload)
246 if m:
246 if m:
247 hgpatch = False
247 hgpatch = False
248 hgpatchheader = False
248 hgpatchheader = False
249 ignoretext = False
249 ignoretext = False
250
250
251 ui.debug('found patch at byte %d\n' % m.start(0))
251 ui.debug('found patch at byte %d\n' % m.start(0))
252 diffs_seen += 1
252 diffs_seen += 1
253 cfp = stringio()
253 cfp = stringio()
254 for line in payload[:m.start(0)].splitlines():
254 for line in payload[:m.start(0)].splitlines():
255 if line.startswith('# HG changeset patch') and not hgpatch:
255 if line.startswith('# HG changeset patch') and not hgpatch:
256 ui.debug('patch generated by hg export\n')
256 ui.debug('patch generated by hg export\n')
257 hgpatch = True
257 hgpatch = True
258 hgpatchheader = True
258 hgpatchheader = True
259 # drop earlier commit message content
259 # drop earlier commit message content
260 cfp.seek(0)
260 cfp.seek(0)
261 cfp.truncate()
261 cfp.truncate()
262 subject = None
262 subject = None
263 elif hgpatchheader:
263 elif hgpatchheader:
264 if line.startswith('# User '):
264 if line.startswith('# User '):
265 data['user'] = line[7:]
265 data['user'] = line[7:]
266 ui.debug('From: %s\n' % data['user'])
266 ui.debug('From: %s\n' % data['user'])
267 elif line.startswith("# Parent "):
267 elif line.startswith("# Parent "):
268 parents.append(line[9:].lstrip())
268 parents.append(line[9:].lstrip())
269 elif line.startswith("# "):
269 elif line.startswith("# "):
270 for header, key in patchheadermap:
270 for header, key in patchheadermap:
271 prefix = '# %s ' % header
271 prefix = '# %s ' % header
272 if line.startswith(prefix):
272 if line.startswith(prefix):
273 data[key] = line[len(prefix):]
273 data[key] = line[len(prefix):]
274 else:
274 else:
275 hgpatchheader = False
275 hgpatchheader = False
276 elif line == '---':
276 elif line == '---':
277 ignoretext = True
277 ignoretext = True
278 if not hgpatchheader and not ignoretext:
278 if not hgpatchheader and not ignoretext:
279 cfp.write(line)
279 cfp.write(line)
280 cfp.write('\n')
280 cfp.write('\n')
281 message = cfp.getvalue()
281 message = cfp.getvalue()
282 if tmpfp:
282 if tmpfp:
283 tmpfp.write(payload)
283 tmpfp.write(payload)
284 if not payload.endswith('\n'):
284 if not payload.endswith('\n'):
285 tmpfp.write('\n')
285 tmpfp.write('\n')
286 elif not diffs_seen and message and content_type == 'text/plain':
286 elif not diffs_seen and message and content_type == 'text/plain':
287 message += '\n' + payload
287 message += '\n' + payload
288 except: # re-raises
288 except: # re-raises
289 tmpfp.close()
289 tmpfp.close()
290 os.unlink(tmpname)
290 os.unlink(tmpname)
291 raise
291 raise
292
292
293 if subject and not message.startswith(subject):
293 if subject and not message.startswith(subject):
294 message = '%s\n%s' % (subject, message)
294 message = '%s\n%s' % (subject, message)
295 data['message'] = message
295 data['message'] = message
296 tmpfp.close()
296 tmpfp.close()
297 if parents:
297 if parents:
298 data['p1'] = parents.pop(0)
298 data['p1'] = parents.pop(0)
299 if parents:
299 if parents:
300 data['p2'] = parents.pop(0)
300 data['p2'] = parents.pop(0)
301
301
302 if diffs_seen:
302 if diffs_seen:
303 data['filename'] = tmpname
303 data['filename'] = tmpname
304 else:
304 else:
305 os.unlink(tmpname)
305 os.unlink(tmpname)
306 return data
306 return data
307
307
308 class patchmeta(object):
308 class patchmeta(object):
309 """Patched file metadata
309 """Patched file metadata
310
310
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
311 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
312 or COPY. 'path' is patched file path. 'oldpath' is set to the
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
313 origin file when 'op' is either COPY or RENAME, None otherwise. If
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
314 file mode is changed, 'mode' is a tuple (islink, isexec) where
315 'islink' is True if the file is a symlink and 'isexec' is True if
315 'islink' is True if the file is a symlink and 'isexec' is True if
316 the file is executable. Otherwise, 'mode' is None.
316 the file is executable. Otherwise, 'mode' is None.
317 """
317 """
318 def __init__(self, path):
318 def __init__(self, path):
319 self.path = path
319 self.path = path
320 self.oldpath = None
320 self.oldpath = None
321 self.mode = None
321 self.mode = None
322 self.op = 'MODIFY'
322 self.op = 'MODIFY'
323 self.binary = False
323 self.binary = False
324
324
325 def setmode(self, mode):
325 def setmode(self, mode):
326 islink = mode & 0o20000
326 islink = mode & 0o20000
327 isexec = mode & 0o100
327 isexec = mode & 0o100
328 self.mode = (islink, isexec)
328 self.mode = (islink, isexec)
329
329
330 def copy(self):
330 def copy(self):
331 other = patchmeta(self.path)
331 other = patchmeta(self.path)
332 other.oldpath = self.oldpath
332 other.oldpath = self.oldpath
333 other.mode = self.mode
333 other.mode = self.mode
334 other.op = self.op
334 other.op = self.op
335 other.binary = self.binary
335 other.binary = self.binary
336 return other
336 return other
337
337
338 def _ispatchinga(self, afile):
338 def _ispatchinga(self, afile):
339 if afile == '/dev/null':
339 if afile == '/dev/null':
340 return self.op == 'ADD'
340 return self.op == 'ADD'
341 return afile == 'a/' + (self.oldpath or self.path)
341 return afile == 'a/' + (self.oldpath or self.path)
342
342
343 def _ispatchingb(self, bfile):
343 def _ispatchingb(self, bfile):
344 if bfile == '/dev/null':
344 if bfile == '/dev/null':
345 return self.op == 'DELETE'
345 return self.op == 'DELETE'
346 return bfile == 'b/' + self.path
346 return bfile == 'b/' + self.path
347
347
348 def ispatching(self, afile, bfile):
348 def ispatching(self, afile, bfile):
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
349 return self._ispatchinga(afile) and self._ispatchingb(bfile)
350
350
351 def __repr__(self):
351 def __repr__(self):
352 return "<patchmeta %s %r>" % (self.op, self.path)
352 return "<patchmeta %s %r>" % (self.op, self.path)
353
353
354 def readgitpatch(lr):
354 def readgitpatch(lr):
355 """extract git-style metadata about patches from <patchname>"""
355 """extract git-style metadata about patches from <patchname>"""
356
356
357 # Filter patch for git information
357 # Filter patch for git information
358 gp = None
358 gp = None
359 gitpatches = []
359 gitpatches = []
360 for line in lr:
360 for line in lr:
361 line = line.rstrip(' \r\n')
361 line = line.rstrip(' \r\n')
362 if line.startswith('diff --git a/'):
362 if line.startswith('diff --git a/'):
363 m = gitre.match(line)
363 m = gitre.match(line)
364 if m:
364 if m:
365 if gp:
365 if gp:
366 gitpatches.append(gp)
366 gitpatches.append(gp)
367 dst = m.group(2)
367 dst = m.group(2)
368 gp = patchmeta(dst)
368 gp = patchmeta(dst)
369 elif gp:
369 elif gp:
370 if line.startswith('--- '):
370 if line.startswith('--- '):
371 gitpatches.append(gp)
371 gitpatches.append(gp)
372 gp = None
372 gp = None
373 continue
373 continue
374 if line.startswith('rename from '):
374 if line.startswith('rename from '):
375 gp.op = 'RENAME'
375 gp.op = 'RENAME'
376 gp.oldpath = line[12:]
376 gp.oldpath = line[12:]
377 elif line.startswith('rename to '):
377 elif line.startswith('rename to '):
378 gp.path = line[10:]
378 gp.path = line[10:]
379 elif line.startswith('copy from '):
379 elif line.startswith('copy from '):
380 gp.op = 'COPY'
380 gp.op = 'COPY'
381 gp.oldpath = line[10:]
381 gp.oldpath = line[10:]
382 elif line.startswith('copy to '):
382 elif line.startswith('copy to '):
383 gp.path = line[8:]
383 gp.path = line[8:]
384 elif line.startswith('deleted file'):
384 elif line.startswith('deleted file'):
385 gp.op = 'DELETE'
385 gp.op = 'DELETE'
386 elif line.startswith('new file mode '):
386 elif line.startswith('new file mode '):
387 gp.op = 'ADD'
387 gp.op = 'ADD'
388 gp.setmode(int(line[-6:], 8))
388 gp.setmode(int(line[-6:], 8))
389 elif line.startswith('new mode '):
389 elif line.startswith('new mode '):
390 gp.setmode(int(line[-6:], 8))
390 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('GIT binary patch'):
391 elif line.startswith('GIT binary patch'):
392 gp.binary = True
392 gp.binary = True
393 if gp:
393 if gp:
394 gitpatches.append(gp)
394 gitpatches.append(gp)
395
395
396 return gitpatches
396 return gitpatches
397
397
398 class linereader(object):
398 class linereader(object):
399 # simple class to allow pushing lines back into the input stream
399 # simple class to allow pushing lines back into the input stream
400 def __init__(self, fp):
400 def __init__(self, fp):
401 self.fp = fp
401 self.fp = fp
402 self.buf = []
402 self.buf = []
403
403
404 def push(self, line):
404 def push(self, line):
405 if line is not None:
405 if line is not None:
406 self.buf.append(line)
406 self.buf.append(line)
407
407
408 def readline(self):
408 def readline(self):
409 if self.buf:
409 if self.buf:
410 l = self.buf[0]
410 l = self.buf[0]
411 del self.buf[0]
411 del self.buf[0]
412 return l
412 return l
413 return self.fp.readline()
413 return self.fp.readline()
414
414
415 def __iter__(self):
415 def __iter__(self):
416 return iter(self.readline, '')
416 return iter(self.readline, '')
417
417
418 class abstractbackend(object):
418 class abstractbackend(object):
419 def __init__(self, ui):
419 def __init__(self, ui):
420 self.ui = ui
420 self.ui = ui
421
421
422 def getfile(self, fname):
422 def getfile(self, fname):
423 """Return target file data and flags as a (data, (islink,
423 """Return target file data and flags as a (data, (islink,
424 isexec)) tuple. Data is None if file is missing/deleted.
424 isexec)) tuple. Data is None if file is missing/deleted.
425 """
425 """
426 raise NotImplementedError
426 raise NotImplementedError
427
427
428 def setfile(self, fname, data, mode, copysource):
428 def setfile(self, fname, data, mode, copysource):
429 """Write data to target file fname and set its mode. mode is a
429 """Write data to target file fname and set its mode. mode is a
430 (islink, isexec) tuple. If data is None, the file content should
430 (islink, isexec) tuple. If data is None, the file content should
431 be left unchanged. If the file is modified after being copied,
431 be left unchanged. If the file is modified after being copied,
432 copysource is set to the original file name.
432 copysource is set to the original file name.
433 """
433 """
434 raise NotImplementedError
434 raise NotImplementedError
435
435
436 def unlink(self, fname):
436 def unlink(self, fname):
437 """Unlink target file."""
437 """Unlink target file."""
438 raise NotImplementedError
438 raise NotImplementedError
439
439
440 def writerej(self, fname, failed, total, lines):
440 def writerej(self, fname, failed, total, lines):
441 """Write rejected lines for fname. total is the number of hunks
441 """Write rejected lines for fname. total is the number of hunks
442 which failed to apply and total the total number of hunks for this
442 which failed to apply and total the total number of hunks for this
443 files.
443 files.
444 """
444 """
445
445
446 def exists(self, fname):
446 def exists(self, fname):
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def close(self):
449 def close(self):
450 raise NotImplementedError
450 raise NotImplementedError
451
451
452 class fsbackend(abstractbackend):
452 class fsbackend(abstractbackend):
453 def __init__(self, ui, basedir):
453 def __init__(self, ui, basedir):
454 super(fsbackend, self).__init__(ui)
454 super(fsbackend, self).__init__(ui)
455 self.opener = vfsmod.vfs(basedir)
455 self.opener = vfsmod.vfs(basedir)
456
456
457 def getfile(self, fname):
457 def getfile(self, fname):
458 if self.opener.islink(fname):
458 if self.opener.islink(fname):
459 return (self.opener.readlink(fname), (True, False))
459 return (self.opener.readlink(fname), (True, False))
460
460
461 isexec = False
461 isexec = False
462 try:
462 try:
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
463 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
464 except OSError as e:
464 except OSError as e:
465 if e.errno != errno.ENOENT:
465 if e.errno != errno.ENOENT:
466 raise
466 raise
467 try:
467 try:
468 return (self.opener.read(fname), (False, isexec))
468 return (self.opener.read(fname), (False, isexec))
469 except IOError as e:
469 except IOError as e:
470 if e.errno != errno.ENOENT:
470 if e.errno != errno.ENOENT:
471 raise
471 raise
472 return None, None
472 return None, None
473
473
474 def setfile(self, fname, data, mode, copysource):
474 def setfile(self, fname, data, mode, copysource):
475 islink, isexec = mode
475 islink, isexec = mode
476 if data is None:
476 if data is None:
477 self.opener.setflags(fname, islink, isexec)
477 self.opener.setflags(fname, islink, isexec)
478 return
478 return
479 if islink:
479 if islink:
480 self.opener.symlink(data, fname)
480 self.opener.symlink(data, fname)
481 else:
481 else:
482 self.opener.write(fname, data)
482 self.opener.write(fname, data)
483 if isexec:
483 if isexec:
484 self.opener.setflags(fname, False, True)
484 self.opener.setflags(fname, False, True)
485
485
486 def unlink(self, fname):
486 def unlink(self, fname):
487 self.opener.unlinkpath(fname, ignoremissing=True)
487 self.opener.unlinkpath(fname, ignoremissing=True)
488
488
489 def writerej(self, fname, failed, total, lines):
489 def writerej(self, fname, failed, total, lines):
490 fname = fname + ".rej"
490 fname = fname + ".rej"
491 self.ui.warn(
491 self.ui.warn(
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
492 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
493 (failed, total, fname))
493 (failed, total, fname))
494 fp = self.opener(fname, 'w')
494 fp = self.opener(fname, 'w')
495 fp.writelines(lines)
495 fp.writelines(lines)
496 fp.close()
496 fp.close()
497
497
498 def exists(self, fname):
498 def exists(self, fname):
499 return self.opener.lexists(fname)
499 return self.opener.lexists(fname)
500
500
501 class workingbackend(fsbackend):
501 class workingbackend(fsbackend):
502 def __init__(self, ui, repo, similarity):
502 def __init__(self, ui, repo, similarity):
503 super(workingbackend, self).__init__(ui, repo.root)
503 super(workingbackend, self).__init__(ui, repo.root)
504 self.repo = repo
504 self.repo = repo
505 self.similarity = similarity
505 self.similarity = similarity
506 self.removed = set()
506 self.removed = set()
507 self.changed = set()
507 self.changed = set()
508 self.copied = []
508 self.copied = []
509
509
510 def _checkknown(self, fname):
510 def _checkknown(self, fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
511 if self.repo.dirstate[fname] == '?' and self.exists(fname):
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
512 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
513
513
514 def setfile(self, fname, data, mode, copysource):
514 def setfile(self, fname, data, mode, copysource):
515 self._checkknown(fname)
515 self._checkknown(fname)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
516 super(workingbackend, self).setfile(fname, data, mode, copysource)
517 if copysource is not None:
517 if copysource is not None:
518 self.copied.append((copysource, fname))
518 self.copied.append((copysource, fname))
519 self.changed.add(fname)
519 self.changed.add(fname)
520
520
521 def unlink(self, fname):
521 def unlink(self, fname):
522 self._checkknown(fname)
522 self._checkknown(fname)
523 super(workingbackend, self).unlink(fname)
523 super(workingbackend, self).unlink(fname)
524 self.removed.add(fname)
524 self.removed.add(fname)
525 self.changed.add(fname)
525 self.changed.add(fname)
526
526
527 def close(self):
527 def close(self):
528 wctx = self.repo[None]
528 wctx = self.repo[None]
529 changed = set(self.changed)
529 changed = set(self.changed)
530 for src, dst in self.copied:
530 for src, dst in self.copied:
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
531 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
532 if self.removed:
532 if self.removed:
533 wctx.forget(sorted(self.removed))
533 wctx.forget(sorted(self.removed))
534 for f in self.removed:
534 for f in self.removed:
535 if f not in self.repo.dirstate:
535 if f not in self.repo.dirstate:
536 # File was deleted and no longer belongs to the
536 # File was deleted and no longer belongs to the
537 # dirstate, it was probably marked added then
537 # dirstate, it was probably marked added then
538 # deleted, and should not be considered by
538 # deleted, and should not be considered by
539 # marktouched().
539 # marktouched().
540 changed.discard(f)
540 changed.discard(f)
541 if changed:
541 if changed:
542 scmutil.marktouched(self.repo, changed, self.similarity)
542 scmutil.marktouched(self.repo, changed, self.similarity)
543 return sorted(self.changed)
543 return sorted(self.changed)
544
544
545 class filestore(object):
545 class filestore(object):
546 def __init__(self, maxsize=None):
546 def __init__(self, maxsize=None):
547 self.opener = None
547 self.opener = None
548 self.files = {}
548 self.files = {}
549 self.created = 0
549 self.created = 0
550 self.maxsize = maxsize
550 self.maxsize = maxsize
551 if self.maxsize is None:
551 if self.maxsize is None:
552 self.maxsize = 4*(2**20)
552 self.maxsize = 4*(2**20)
553 self.size = 0
553 self.size = 0
554 self.data = {}
554 self.data = {}
555
555
556 def setfile(self, fname, data, mode, copied=None):
556 def setfile(self, fname, data, mode, copied=None):
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
557 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
558 self.data[fname] = (data, mode, copied)
558 self.data[fname] = (data, mode, copied)
559 self.size += len(data)
559 self.size += len(data)
560 else:
560 else:
561 if self.opener is None:
561 if self.opener is None:
562 root = tempfile.mkdtemp(prefix='hg-patch-')
562 root = tempfile.mkdtemp(prefix='hg-patch-')
563 self.opener = vfsmod.vfs(root)
563 self.opener = vfsmod.vfs(root)
564 # Avoid filename issues with these simple names
564 # Avoid filename issues with these simple names
565 fn = str(self.created)
565 fn = str(self.created)
566 self.opener.write(fn, data)
566 self.opener.write(fn, data)
567 self.created += 1
567 self.created += 1
568 self.files[fname] = (fn, mode, copied)
568 self.files[fname] = (fn, mode, copied)
569
569
570 def getfile(self, fname):
570 def getfile(self, fname):
571 if fname in self.data:
571 if fname in self.data:
572 return self.data[fname]
572 return self.data[fname]
573 if not self.opener or fname not in self.files:
573 if not self.opener or fname not in self.files:
574 return None, None, None
574 return None, None, None
575 fn, mode, copied = self.files[fname]
575 fn, mode, copied = self.files[fname]
576 return self.opener.read(fn), mode, copied
576 return self.opener.read(fn), mode, copied
577
577
578 def close(self):
578 def close(self):
579 if self.opener:
579 if self.opener:
580 shutil.rmtree(self.opener.base)
580 shutil.rmtree(self.opener.base)
581
581
582 class repobackend(abstractbackend):
582 class repobackend(abstractbackend):
583 def __init__(self, ui, repo, ctx, store):
583 def __init__(self, ui, repo, ctx, store):
584 super(repobackend, self).__init__(ui)
584 super(repobackend, self).__init__(ui)
585 self.repo = repo
585 self.repo = repo
586 self.ctx = ctx
586 self.ctx = ctx
587 self.store = store
587 self.store = store
588 self.changed = set()
588 self.changed = set()
589 self.removed = set()
589 self.removed = set()
590 self.copied = {}
590 self.copied = {}
591
591
592 def _checkknown(self, fname):
592 def _checkknown(self, fname):
593 if fname not in self.ctx:
593 if fname not in self.ctx:
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
594 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
595
595
596 def getfile(self, fname):
596 def getfile(self, fname):
597 try:
597 try:
598 fctx = self.ctx[fname]
598 fctx = self.ctx[fname]
599 except error.LookupError:
599 except error.LookupError:
600 return None, None
600 return None, None
601 flags = fctx.flags()
601 flags = fctx.flags()
602 return fctx.data(), ('l' in flags, 'x' in flags)
602 return fctx.data(), ('l' in flags, 'x' in flags)
603
603
604 def setfile(self, fname, data, mode, copysource):
604 def setfile(self, fname, data, mode, copysource):
605 if copysource:
605 if copysource:
606 self._checkknown(copysource)
606 self._checkknown(copysource)
607 if data is None:
607 if data is None:
608 data = self.ctx[fname].data()
608 data = self.ctx[fname].data()
609 self.store.setfile(fname, data, mode, copysource)
609 self.store.setfile(fname, data, mode, copysource)
610 self.changed.add(fname)
610 self.changed.add(fname)
611 if copysource:
611 if copysource:
612 self.copied[fname] = copysource
612 self.copied[fname] = copysource
613
613
614 def unlink(self, fname):
614 def unlink(self, fname):
615 self._checkknown(fname)
615 self._checkknown(fname)
616 self.removed.add(fname)
616 self.removed.add(fname)
617
617
618 def exists(self, fname):
618 def exists(self, fname):
619 return fname in self.ctx
619 return fname in self.ctx
620
620
621 def close(self):
621 def close(self):
622 return self.changed | self.removed
622 return self.changed | self.removed
623
623
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
624 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
625 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
626 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
627 eolmodes = ['strict', 'crlf', 'lf', 'auto']
628
628
629 class patchfile(object):
629 class patchfile(object):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
630 def __init__(self, ui, gp, backend, store, eolmode='strict'):
631 self.fname = gp.path
631 self.fname = gp.path
632 self.eolmode = eolmode
632 self.eolmode = eolmode
633 self.eol = None
633 self.eol = None
634 self.backend = backend
634 self.backend = backend
635 self.ui = ui
635 self.ui = ui
636 self.lines = []
636 self.lines = []
637 self.exists = False
637 self.exists = False
638 self.missing = True
638 self.missing = True
639 self.mode = gp.mode
639 self.mode = gp.mode
640 self.copysource = gp.oldpath
640 self.copysource = gp.oldpath
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
641 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
642 self.remove = gp.op == 'DELETE'
642 self.remove = gp.op == 'DELETE'
643 if self.copysource is None:
643 if self.copysource is None:
644 data, mode = backend.getfile(self.fname)
644 data, mode = backend.getfile(self.fname)
645 else:
645 else:
646 data, mode = store.getfile(self.copysource)[:2]
646 data, mode = store.getfile(self.copysource)[:2]
647 if data is not None:
647 if data is not None:
648 self.exists = self.copysource is None or backend.exists(self.fname)
648 self.exists = self.copysource is None or backend.exists(self.fname)
649 self.missing = False
649 self.missing = False
650 if data:
650 if data:
651 self.lines = mdiff.splitnewlines(data)
651 self.lines = mdiff.splitnewlines(data)
652 if self.mode is None:
652 if self.mode is None:
653 self.mode = mode
653 self.mode = mode
654 if self.lines:
654 if self.lines:
655 # Normalize line endings
655 # Normalize line endings
656 if self.lines[0].endswith('\r\n'):
656 if self.lines[0].endswith('\r\n'):
657 self.eol = '\r\n'
657 self.eol = '\r\n'
658 elif self.lines[0].endswith('\n'):
658 elif self.lines[0].endswith('\n'):
659 self.eol = '\n'
659 self.eol = '\n'
660 if eolmode != 'strict':
660 if eolmode != 'strict':
661 nlines = []
661 nlines = []
662 for l in self.lines:
662 for l in self.lines:
663 if l.endswith('\r\n'):
663 if l.endswith('\r\n'):
664 l = l[:-2] + '\n'
664 l = l[:-2] + '\n'
665 nlines.append(l)
665 nlines.append(l)
666 self.lines = nlines
666 self.lines = nlines
667 else:
667 else:
668 if self.create:
668 if self.create:
669 self.missing = False
669 self.missing = False
670 if self.mode is None:
670 if self.mode is None:
671 self.mode = (False, False)
671 self.mode = (False, False)
672 if self.missing:
672 if self.missing:
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
673 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
674 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
675 "current directory)\n"))
675 "current directory)\n"))
676
676
677 self.hash = {}
677 self.hash = {}
678 self.dirty = 0
678 self.dirty = 0
679 self.offset = 0
679 self.offset = 0
680 self.skew = 0
680 self.skew = 0
681 self.rej = []
681 self.rej = []
682 self.fileprinted = False
682 self.fileprinted = False
683 self.printfile(False)
683 self.printfile(False)
684 self.hunks = 0
684 self.hunks = 0
685
685
686 def writelines(self, fname, lines, mode):
686 def writelines(self, fname, lines, mode):
687 if self.eolmode == 'auto':
687 if self.eolmode == 'auto':
688 eol = self.eol
688 eol = self.eol
689 elif self.eolmode == 'crlf':
689 elif self.eolmode == 'crlf':
690 eol = '\r\n'
690 eol = '\r\n'
691 else:
691 else:
692 eol = '\n'
692 eol = '\n'
693
693
694 if self.eolmode != 'strict' and eol and eol != '\n':
694 if self.eolmode != 'strict' and eol and eol != '\n':
695 rawlines = []
695 rawlines = []
696 for l in lines:
696 for l in lines:
697 if l and l[-1] == '\n':
697 if l and l[-1] == '\n':
698 l = l[:-1] + eol
698 l = l[:-1] + eol
699 rawlines.append(l)
699 rawlines.append(l)
700 lines = rawlines
700 lines = rawlines
701
701
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
702 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
703
703
704 def printfile(self, warn):
704 def printfile(self, warn):
705 if self.fileprinted:
705 if self.fileprinted:
706 return
706 return
707 if warn or self.ui.verbose:
707 if warn or self.ui.verbose:
708 self.fileprinted = True
708 self.fileprinted = True
709 s = _("patching file %s\n") % self.fname
709 s = _("patching file %s\n") % self.fname
710 if warn:
710 if warn:
711 self.ui.warn(s)
711 self.ui.warn(s)
712 else:
712 else:
713 self.ui.note(s)
713 self.ui.note(s)
714
714
715
715
716 def findlines(self, l, linenum):
716 def findlines(self, l, linenum):
717 # looks through the hash and finds candidate lines. The
717 # looks through the hash and finds candidate lines. The
718 # result is a list of line numbers sorted based on distance
718 # result is a list of line numbers sorted based on distance
719 # from linenum
719 # from linenum
720
720
721 cand = self.hash.get(l, [])
721 cand = self.hash.get(l, [])
722 if len(cand) > 1:
722 if len(cand) > 1:
723 # resort our list of potentials forward then back.
723 # resort our list of potentials forward then back.
724 cand.sort(key=lambda x: abs(x - linenum))
724 cand.sort(key=lambda x: abs(x - linenum))
725 return cand
725 return cand
726
726
727 def write_rej(self):
727 def write_rej(self):
728 # our rejects are a little different from patch(1). This always
728 # our rejects are a little different from patch(1). This always
729 # creates rejects in the same form as the original patch. A file
729 # creates rejects in the same form as the original patch. A file
730 # header is inserted so that you can run the reject through patch again
730 # header is inserted so that you can run the reject through patch again
731 # without having to type the filename.
731 # without having to type the filename.
732 if not self.rej:
732 if not self.rej:
733 return
733 return
734 base = os.path.basename(self.fname)
734 base = os.path.basename(self.fname)
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
735 lines = ["--- %s\n+++ %s\n" % (base, base)]
736 for x in self.rej:
736 for x in self.rej:
737 for l in x.hunk:
737 for l in x.hunk:
738 lines.append(l)
738 lines.append(l)
739 if l[-1:] != '\n':
739 if l[-1:] != '\n':
740 lines.append("\n\ No newline at end of file\n")
740 lines.append("\n\ No newline at end of file\n")
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
741 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
742
742
743 def apply(self, h):
743 def apply(self, h):
744 if not h.complete():
744 if not h.complete():
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
745 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
746 (h.number, h.desc, len(h.a), h.lena, len(h.b),
747 h.lenb))
747 h.lenb))
748
748
749 self.hunks += 1
749 self.hunks += 1
750
750
751 if self.missing:
751 if self.missing:
752 self.rej.append(h)
752 self.rej.append(h)
753 return -1
753 return -1
754
754
755 if self.exists and self.create:
755 if self.exists and self.create:
756 if self.copysource:
756 if self.copysource:
757 self.ui.warn(_("cannot create %s: destination already "
757 self.ui.warn(_("cannot create %s: destination already "
758 "exists\n") % self.fname)
758 "exists\n") % self.fname)
759 else:
759 else:
760 self.ui.warn(_("file %s already exists\n") % self.fname)
760 self.ui.warn(_("file %s already exists\n") % self.fname)
761 self.rej.append(h)
761 self.rej.append(h)
762 return -1
762 return -1
763
763
764 if isinstance(h, binhunk):
764 if isinstance(h, binhunk):
765 if self.remove:
765 if self.remove:
766 self.backend.unlink(self.fname)
766 self.backend.unlink(self.fname)
767 else:
767 else:
768 l = h.new(self.lines)
768 l = h.new(self.lines)
769 self.lines[:] = l
769 self.lines[:] = l
770 self.offset += len(l)
770 self.offset += len(l)
771 self.dirty = True
771 self.dirty = True
772 return 0
772 return 0
773
773
774 horig = h
774 horig = h
775 if (self.eolmode in ('crlf', 'lf')
775 if (self.eolmode in ('crlf', 'lf')
776 or self.eolmode == 'auto' and self.eol):
776 or self.eolmode == 'auto' and self.eol):
777 # If new eols are going to be normalized, then normalize
777 # If new eols are going to be normalized, then normalize
778 # hunk data before patching. Otherwise, preserve input
778 # hunk data before patching. Otherwise, preserve input
779 # line-endings.
779 # line-endings.
780 h = h.getnormalized()
780 h = h.getnormalized()
781
781
782 # fast case first, no offsets, no fuzz
782 # fast case first, no offsets, no fuzz
783 old, oldstart, new, newstart = h.fuzzit(0, False)
783 old, oldstart, new, newstart = h.fuzzit(0, False)
784 oldstart += self.offset
784 oldstart += self.offset
785 orig_start = oldstart
785 orig_start = oldstart
786 # if there's skew we want to emit the "(offset %d lines)" even
786 # if there's skew we want to emit the "(offset %d lines)" even
787 # when the hunk cleanly applies at start + skew, so skip the
787 # when the hunk cleanly applies at start + skew, so skip the
788 # fast case code
788 # fast case code
789 if (self.skew == 0 and
789 if (self.skew == 0 and
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
790 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
791 if self.remove:
791 if self.remove:
792 self.backend.unlink(self.fname)
792 self.backend.unlink(self.fname)
793 else:
793 else:
794 self.lines[oldstart:oldstart + len(old)] = new
794 self.lines[oldstart:oldstart + len(old)] = new
795 self.offset += len(new) - len(old)
795 self.offset += len(new) - len(old)
796 self.dirty = True
796 self.dirty = True
797 return 0
797 return 0
798
798
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
799 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
800 self.hash = {}
800 self.hash = {}
801 for x, s in enumerate(self.lines):
801 for x, s in enumerate(self.lines):
802 self.hash.setdefault(s, []).append(x)
802 self.hash.setdefault(s, []).append(x)
803
803
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
804 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
805 for toponly in [True, False]:
805 for toponly in [True, False]:
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
806 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
807 oldstart = oldstart + self.offset + self.skew
807 oldstart = oldstart + self.offset + self.skew
808 oldstart = min(oldstart, len(self.lines))
808 oldstart = min(oldstart, len(self.lines))
809 if old:
809 if old:
810 cand = self.findlines(old[0][1:], oldstart)
810 cand = self.findlines(old[0][1:], oldstart)
811 else:
811 else:
812 # Only adding lines with no or fuzzed context, just
812 # Only adding lines with no or fuzzed context, just
813 # take the skew in account
813 # take the skew in account
814 cand = [oldstart]
814 cand = [oldstart]
815
815
816 for l in cand:
816 for l in cand:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
817 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
818 self.lines[l : l + len(old)] = new
818 self.lines[l : l + len(old)] = new
819 self.offset += len(new) - len(old)
819 self.offset += len(new) - len(old)
820 self.skew = l - orig_start
820 self.skew = l - orig_start
821 self.dirty = True
821 self.dirty = True
822 offset = l - orig_start - fuzzlen
822 offset = l - orig_start - fuzzlen
823 if fuzzlen:
823 if fuzzlen:
824 msg = _("Hunk #%d succeeded at %d "
824 msg = _("Hunk #%d succeeded at %d "
825 "with fuzz %d "
825 "with fuzz %d "
826 "(offset %d lines).\n")
826 "(offset %d lines).\n")
827 self.printfile(True)
827 self.printfile(True)
828 self.ui.warn(msg %
828 self.ui.warn(msg %
829 (h.number, l + 1, fuzzlen, offset))
829 (h.number, l + 1, fuzzlen, offset))
830 else:
830 else:
831 msg = _("Hunk #%d succeeded at %d "
831 msg = _("Hunk #%d succeeded at %d "
832 "(offset %d lines).\n")
832 "(offset %d lines).\n")
833 self.ui.note(msg % (h.number, l + 1, offset))
833 self.ui.note(msg % (h.number, l + 1, offset))
834 return fuzzlen
834 return fuzzlen
835 self.printfile(True)
835 self.printfile(True)
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
836 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
837 self.rej.append(horig)
837 self.rej.append(horig)
838 return -1
838 return -1
839
839
840 def close(self):
840 def close(self):
841 if self.dirty:
841 if self.dirty:
842 self.writelines(self.fname, self.lines, self.mode)
842 self.writelines(self.fname, self.lines, self.mode)
843 self.write_rej()
843 self.write_rej()
844 return len(self.rej)
844 return len(self.rej)
845
845
846 class header(object):
846 class header(object):
847 """patch header
847 """patch header
848 """
848 """
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
849 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
850 diff_re = re.compile('diff -r .* (.*)$')
851 allhunks_re = re.compile('(?:index|deleted file) ')
851 allhunks_re = re.compile('(?:index|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
852 pretty_re = re.compile('(?:new file|deleted file) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
853 special_re = re.compile('(?:index|deleted|copy|rename) ')
854 newfile_re = re.compile('(?:new file)')
854 newfile_re = re.compile('(?:new file)')
855
855
856 def __init__(self, header):
856 def __init__(self, header):
857 self.header = header
857 self.header = header
858 self.hunks = []
858 self.hunks = []
859
859
860 def binary(self):
860 def binary(self):
861 return any(h.startswith('index ') for h in self.header)
861 return any(h.startswith('index ') for h in self.header)
862
862
863 def pretty(self, fp):
863 def pretty(self, fp):
864 for h in self.header:
864 for h in self.header:
865 if h.startswith('index '):
865 if h.startswith('index '):
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
866 fp.write(_('this modifies a binary file (all or nothing)\n'))
867 break
867 break
868 if self.pretty_re.match(h):
868 if self.pretty_re.match(h):
869 fp.write(h)
869 fp.write(h)
870 if self.binary():
870 if self.binary():
871 fp.write(_('this is a binary file\n'))
871 fp.write(_('this is a binary file\n'))
872 break
872 break
873 if h.startswith('---'):
873 if h.startswith('---'):
874 fp.write(_('%d hunks, %d lines changed\n') %
874 fp.write(_('%d hunks, %d lines changed\n') %
875 (len(self.hunks),
875 (len(self.hunks),
876 sum([max(h.added, h.removed) for h in self.hunks])))
876 sum([max(h.added, h.removed) for h in self.hunks])))
877 break
877 break
878 fp.write(h)
878 fp.write(h)
879
879
880 def write(self, fp):
880 def write(self, fp):
881 fp.write(''.join(self.header))
881 fp.write(''.join(self.header))
882
882
883 def allhunks(self):
883 def allhunks(self):
884 return any(self.allhunks_re.match(h) for h in self.header)
884 return any(self.allhunks_re.match(h) for h in self.header)
885
885
886 def files(self):
886 def files(self):
887 match = self.diffgit_re.match(self.header[0])
887 match = self.diffgit_re.match(self.header[0])
888 if match:
888 if match:
889 fromfile, tofile = match.groups()
889 fromfile, tofile = match.groups()
890 if fromfile == tofile:
890 if fromfile == tofile:
891 return [fromfile]
891 return [fromfile]
892 return [fromfile, tofile]
892 return [fromfile, tofile]
893 else:
893 else:
894 return self.diff_re.match(self.header[0]).groups()
894 return self.diff_re.match(self.header[0]).groups()
895
895
896 def filename(self):
896 def filename(self):
897 return self.files()[-1]
897 return self.files()[-1]
898
898
899 def __repr__(self):
899 def __repr__(self):
900 return '<header %s>' % (' '.join(map(repr, self.files())))
900 return '<header %s>' % (' '.join(map(repr, self.files())))
901
901
902 def isnewfile(self):
902 def isnewfile(self):
903 return any(self.newfile_re.match(h) for h in self.header)
903 return any(self.newfile_re.match(h) for h in self.header)
904
904
905 def special(self):
905 def special(self):
906 # Special files are shown only at the header level and not at the hunk
906 # Special files are shown only at the header level and not at the hunk
907 # level for example a file that has been deleted is a special file.
907 # level for example a file that has been deleted is a special file.
908 # The user cannot change the content of the operation, in the case of
908 # The user cannot change the content of the operation, in the case of
909 # the deleted file he has to take the deletion or not take it, he
909 # the deleted file he has to take the deletion or not take it, he
910 # cannot take some of it.
910 # cannot take some of it.
911 # Newly added files are special if they are empty, they are not special
911 # Newly added files are special if they are empty, they are not special
912 # if they have some content as we want to be able to change it
912 # if they have some content as we want to be able to change it
913 nocontent = len(self.header) == 2
913 nocontent = len(self.header) == 2
914 emptynewfile = self.isnewfile() and nocontent
914 emptynewfile = self.isnewfile() and nocontent
915 return emptynewfile or \
915 return emptynewfile or \
916 any(self.special_re.match(h) for h in self.header)
916 any(self.special_re.match(h) for h in self.header)
917
917
918 class recordhunk(object):
918 class recordhunk(object):
919 """patch hunk
919 """patch hunk
920
920
921 XXX shouldn't we merge this with the other hunk class?
921 XXX shouldn't we merge this with the other hunk class?
922 """
922 """
923
923
924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
924 def __init__(self, header, fromline, toline, proc, before, hunk, after,
925 maxcontext=None):
925 maxcontext=None):
926 def trimcontext(lines, reverse=False):
926 def trimcontext(lines, reverse=False):
927 if maxcontext is not None:
927 if maxcontext is not None:
928 delta = len(lines) - maxcontext
928 delta = len(lines) - maxcontext
929 if delta > 0:
929 if delta > 0:
930 if reverse:
930 if reverse:
931 return delta, lines[delta:]
931 return delta, lines[delta:]
932 else:
932 else:
933 return delta, lines[:maxcontext]
933 return delta, lines[:maxcontext]
934 return 0, lines
934 return 0, lines
935
935
936 self.header = header
936 self.header = header
937 trimedbefore, self.before = trimcontext(before, True)
937 trimedbefore, self.before = trimcontext(before, True)
938 self.fromline = fromline + trimedbefore
938 self.fromline = fromline + trimedbefore
939 self.toline = toline + trimedbefore
939 self.toline = toline + trimedbefore
940 _trimedafter, self.after = trimcontext(after, False)
940 _trimedafter, self.after = trimcontext(after, False)
941 self.proc = proc
941 self.proc = proc
942 self.hunk = hunk
942 self.hunk = hunk
943 self.added, self.removed = self.countchanges(self.hunk)
943 self.added, self.removed = self.countchanges(self.hunk)
944
944
945 def __eq__(self, v):
945 def __eq__(self, v):
946 if not isinstance(v, recordhunk):
946 if not isinstance(v, recordhunk):
947 return False
947 return False
948
948
949 return ((v.hunk == self.hunk) and
949 return ((v.hunk == self.hunk) and
950 (v.proc == self.proc) and
950 (v.proc == self.proc) and
951 (self.fromline == v.fromline) and
951 (self.fromline == v.fromline) and
952 (self.header.files() == v.header.files()))
952 (self.header.files() == v.header.files()))
953
953
954 def __hash__(self):
954 def __hash__(self):
955 return hash((tuple(self.hunk),
955 return hash((tuple(self.hunk),
956 tuple(self.header.files()),
956 tuple(self.header.files()),
957 self.fromline,
957 self.fromline,
958 self.proc))
958 self.proc))
959
959
960 def countchanges(self, hunk):
960 def countchanges(self, hunk):
961 """hunk -> (n+,n-)"""
961 """hunk -> (n+,n-)"""
962 add = len([h for h in hunk if h.startswith('+')])
962 add = len([h for h in hunk if h.startswith('+')])
963 rem = len([h for h in hunk if h.startswith('-')])
963 rem = len([h for h in hunk if h.startswith('-')])
964 return add, rem
964 return add, rem
965
965
966 def reversehunk(self):
966 def reversehunk(self):
967 """return another recordhunk which is the reverse of the hunk
967 """return another recordhunk which is the reverse of the hunk
968
968
969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
969 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
970 that, swap fromline/toline and +/- signs while keep other things
970 that, swap fromline/toline and +/- signs while keep other things
971 unchanged.
971 unchanged.
972 """
972 """
973 m = {'+': '-', '-': '+', '\\': '\\'}
973 m = {'+': '-', '-': '+', '\\': '\\'}
974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
974 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
975 return recordhunk(self.header, self.toline, self.fromline, self.proc,
976 self.before, hunk, self.after)
976 self.before, hunk, self.after)
977
977
978 def write(self, fp):
978 def write(self, fp):
979 delta = len(self.before) + len(self.after)
979 delta = len(self.before) + len(self.after)
980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
980 if self.after and self.after[-1] == '\\ No newline at end of file\n':
981 delta -= 1
981 delta -= 1
982 fromlen = delta + self.removed
982 fromlen = delta + self.removed
983 tolen = delta + self.added
983 tolen = delta + self.added
984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
984 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
985 (self.fromline, fromlen, self.toline, tolen,
985 (self.fromline, fromlen, self.toline, tolen,
986 self.proc and (' ' + self.proc)))
986 self.proc and (' ' + self.proc)))
987 fp.write(''.join(self.before + self.hunk + self.after))
987 fp.write(''.join(self.before + self.hunk + self.after))
988
988
989 pretty = write
989 pretty = write
990
990
991 def filename(self):
991 def filename(self):
992 return self.header.filename()
992 return self.header.filename()
993
993
994 def __repr__(self):
994 def __repr__(self):
995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
995 return '<hunk %r@%d>' % (self.filename(), self.fromline)
996
996
997 def getmessages():
997 def getmessages():
998 return {
998 return {
999 'multiple': {
999 'multiple': {
1000 'discard': _("discard change %d/%d to '%s'?"),
1000 'discard': _("discard change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1001 'record': _("record change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1002 'revert': _("revert change %d/%d to '%s'?"),
1003 },
1003 },
1004 'single': {
1004 'single': {
1005 'discard': _("discard this change to '%s'?"),
1005 'discard': _("discard this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1006 'record': _("record this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1007 'revert': _("revert this change to '%s'?"),
1008 },
1008 },
1009 'help': {
1009 'help': {
1010 'discard': _('[Ynesfdaq?]'
1010 'discard': _('[Ynesfdaq?]'
1011 '$$ &Yes, discard this change'
1011 '$$ &Yes, discard this change'
1012 '$$ &No, skip this change'
1012 '$$ &No, skip this change'
1013 '$$ &Edit this change manually'
1013 '$$ &Edit this change manually'
1014 '$$ &Skip remaining changes to this file'
1014 '$$ &Skip remaining changes to this file'
1015 '$$ Discard remaining changes to this &file'
1015 '$$ Discard remaining changes to this &file'
1016 '$$ &Done, skip remaining changes and files'
1016 '$$ &Done, skip remaining changes and files'
1017 '$$ Discard &all changes to all remaining files'
1017 '$$ Discard &all changes to all remaining files'
1018 '$$ &Quit, discarding no changes'
1018 '$$ &Quit, discarding no changes'
1019 '$$ &? (display help)'),
1019 '$$ &? (display help)'),
1020 'record': _('[Ynesfdaq?]'
1020 'record': _('[Ynesfdaq?]'
1021 '$$ &Yes, record this change'
1021 '$$ &Yes, record this change'
1022 '$$ &No, skip this change'
1022 '$$ &No, skip this change'
1023 '$$ &Edit this change manually'
1023 '$$ &Edit this change manually'
1024 '$$ &Skip remaining changes to this file'
1024 '$$ &Skip remaining changes to this file'
1025 '$$ Record remaining changes to this &file'
1025 '$$ Record remaining changes to this &file'
1026 '$$ &Done, skip remaining changes and files'
1026 '$$ &Done, skip remaining changes and files'
1027 '$$ Record &all changes to all remaining files'
1027 '$$ Record &all changes to all remaining files'
1028 '$$ &Quit, recording no changes'
1028 '$$ &Quit, recording no changes'
1029 '$$ &? (display help)'),
1029 '$$ &? (display help)'),
1030 'revert': _('[Ynesfdaq?]'
1030 'revert': _('[Ynesfdaq?]'
1031 '$$ &Yes, revert this change'
1031 '$$ &Yes, revert this change'
1032 '$$ &No, skip this change'
1032 '$$ &No, skip this change'
1033 '$$ &Edit this change manually'
1033 '$$ &Edit this change manually'
1034 '$$ &Skip remaining changes to this file'
1034 '$$ &Skip remaining changes to this file'
1035 '$$ Revert remaining changes to this &file'
1035 '$$ Revert remaining changes to this &file'
1036 '$$ &Done, skip remaining changes and files'
1036 '$$ &Done, skip remaining changes and files'
1037 '$$ Revert &all changes to all remaining files'
1037 '$$ Revert &all changes to all remaining files'
1038 '$$ &Quit, reverting no changes'
1038 '$$ &Quit, reverting no changes'
1039 '$$ &? (display help)')
1039 '$$ &? (display help)')
1040 }
1040 }
1041 }
1041 }
1042
1042
1043 def filterpatch(ui, headers, operation=None):
1043 def filterpatch(ui, headers, operation=None):
1044 """Interactively filter patch chunks into applied-only chunks"""
1044 """Interactively filter patch chunks into applied-only chunks"""
1045 messages = getmessages()
1045 messages = getmessages()
1046
1046
1047 if operation is None:
1047 if operation is None:
1048 operation = 'record'
1048 operation = 'record'
1049
1049
1050 def prompt(skipfile, skipall, query, chunk):
1050 def prompt(skipfile, skipall, query, chunk):
1051 """prompt query, and process base inputs
1051 """prompt query, and process base inputs
1052
1052
1053 - y/n for the rest of file
1053 - y/n for the rest of file
1054 - y/n for the rest
1054 - y/n for the rest
1055 - ? (help)
1055 - ? (help)
1056 - q (quit)
1056 - q (quit)
1057
1057
1058 Return True/False and possibly updated skipfile and skipall.
1058 Return True/False and possibly updated skipfile and skipall.
1059 """
1059 """
1060 newpatches = None
1060 newpatches = None
1061 if skipall is not None:
1061 if skipall is not None:
1062 return skipall, skipfile, skipall, newpatches
1062 return skipall, skipfile, skipall, newpatches
1063 if skipfile is not None:
1063 if skipfile is not None:
1064 return skipfile, skipfile, skipall, newpatches
1064 return skipfile, skipfile, skipall, newpatches
1065 while True:
1065 while True:
1066 resps = messages['help'][operation]
1066 resps = messages['help'][operation]
1067 r = ui.promptchoice("%s %s" % (query, resps))
1067 r = ui.promptchoice("%s %s" % (query, resps))
1068 ui.write("\n")
1068 ui.write("\n")
1069 if r == 8: # ?
1069 if r == 8: # ?
1070 for c, t in ui.extractchoices(resps)[1]:
1070 for c, t in ui.extractchoices(resps)[1]:
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1071 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1072 continue
1072 continue
1073 elif r == 0: # yes
1073 elif r == 0: # yes
1074 ret = True
1074 ret = True
1075 elif r == 1: # no
1075 elif r == 1: # no
1076 ret = False
1076 ret = False
1077 elif r == 2: # Edit patch
1077 elif r == 2: # Edit patch
1078 if chunk is None:
1078 if chunk is None:
1079 ui.write(_('cannot edit patch for whole file'))
1079 ui.write(_('cannot edit patch for whole file'))
1080 ui.write("\n")
1080 ui.write("\n")
1081 continue
1081 continue
1082 if chunk.header.binary():
1082 if chunk.header.binary():
1083 ui.write(_('cannot edit patch for binary file'))
1083 ui.write(_('cannot edit patch for binary file'))
1084 ui.write("\n")
1084 ui.write("\n")
1085 continue
1085 continue
1086 # Patch comment based on the Git one (based on comment at end of
1086 # Patch comment based on the Git one (based on comment at end of
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1087 # https://mercurial-scm.org/wiki/RecordExtension)
1088 phelp = '---' + _("""
1088 phelp = '---' + _("""
1089 To remove '-' lines, make them ' ' lines (context).
1089 To remove '-' lines, make them ' ' lines (context).
1090 To remove '+' lines, delete them.
1090 To remove '+' lines, delete them.
1091 Lines starting with # will be removed from the patch.
1091 Lines starting with # will be removed from the patch.
1092
1092
1093 If the patch applies cleanly, the edited hunk will immediately be
1093 If the patch applies cleanly, the edited hunk will immediately be
1094 added to the record list. If it does not apply cleanly, a rejects
1094 added to the record list. If it does not apply cleanly, a rejects
1095 file will be generated: you can use that when you try again. If
1095 file will be generated: you can use that when you try again. If
1096 all lines of the hunk are removed, then the edit is aborted and
1096 all lines of the hunk are removed, then the edit is aborted and
1097 the hunk is left unchanged.
1097 the hunk is left unchanged.
1098 """)
1098 """)
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1099 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1100 suffix=".diff", text=True)
1100 suffix=".diff", text=True)
1101 ncpatchfp = None
1101 ncpatchfp = None
1102 try:
1102 try:
1103 # Write the initial patch
1103 # Write the initial patch
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1104 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1105 chunk.header.write(f)
1105 chunk.header.write(f)
1106 chunk.write(f)
1106 chunk.write(f)
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1107 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1108 f.close()
1108 f.close()
1109 # Start the editor and wait for it to complete
1109 # Start the editor and wait for it to complete
1110 editor = ui.geteditor()
1110 editor = ui.geteditor()
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1111 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1112 environ={'HGUSER': ui.username()},
1112 environ={'HGUSER': ui.username()},
1113 blockedtag='filterpatch')
1113 blockedtag='filterpatch')
1114 if ret != 0:
1114 if ret != 0:
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1115 ui.warn(_("editor exited with exit code %d\n") % ret)
1116 continue
1116 continue
1117 # Remove comment lines
1117 # Remove comment lines
1118 patchfp = open(patchfn)
1118 patchfp = open(patchfn)
1119 ncpatchfp = stringio()
1119 ncpatchfp = stringio()
1120 for line in util.iterfile(patchfp):
1120 for line in util.iterfile(patchfp):
1121 if not line.startswith('#'):
1121 if not line.startswith('#'):
1122 ncpatchfp.write(line)
1122 ncpatchfp.write(line)
1123 patchfp.close()
1123 patchfp.close()
1124 ncpatchfp.seek(0)
1124 ncpatchfp.seek(0)
1125 newpatches = parsepatch(ncpatchfp)
1125 newpatches = parsepatch(ncpatchfp)
1126 finally:
1126 finally:
1127 os.unlink(patchfn)
1127 os.unlink(patchfn)
1128 del ncpatchfp
1128 del ncpatchfp
1129 # Signal that the chunk shouldn't be applied as-is, but
1129 # Signal that the chunk shouldn't be applied as-is, but
1130 # provide the new patch to be used instead.
1130 # provide the new patch to be used instead.
1131 ret = False
1131 ret = False
1132 elif r == 3: # Skip
1132 elif r == 3: # Skip
1133 ret = skipfile = False
1133 ret = skipfile = False
1134 elif r == 4: # file (Record remaining)
1134 elif r == 4: # file (Record remaining)
1135 ret = skipfile = True
1135 ret = skipfile = True
1136 elif r == 5: # done, skip remaining
1136 elif r == 5: # done, skip remaining
1137 ret = skipall = False
1137 ret = skipall = False
1138 elif r == 6: # all
1138 elif r == 6: # all
1139 ret = skipall = True
1139 ret = skipall = True
1140 elif r == 7: # quit
1140 elif r == 7: # quit
1141 raise error.Abort(_('user quit'))
1141 raise error.Abort(_('user quit'))
1142 return ret, skipfile, skipall, newpatches
1142 return ret, skipfile, skipall, newpatches
1143
1143
1144 seen = set()
1144 seen = set()
1145 applied = {} # 'filename' -> [] of chunks
1145 applied = {} # 'filename' -> [] of chunks
1146 skipfile, skipall = None, None
1146 skipfile, skipall = None, None
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1147 pos, total = 1, sum(len(h.hunks) for h in headers)
1148 for h in headers:
1148 for h in headers:
1149 pos += len(h.hunks)
1149 pos += len(h.hunks)
1150 skipfile = None
1150 skipfile = None
1151 fixoffset = 0
1151 fixoffset = 0
1152 hdr = ''.join(h.header)
1152 hdr = ''.join(h.header)
1153 if hdr in seen:
1153 if hdr in seen:
1154 continue
1154 continue
1155 seen.add(hdr)
1155 seen.add(hdr)
1156 if skipall is None:
1156 if skipall is None:
1157 h.pretty(ui)
1157 h.pretty(ui)
1158 msg = (_('examine changes to %s?') %
1158 msg = (_('examine changes to %s?') %
1159 _(' and ').join("'%s'" % f for f in h.files()))
1159 _(' and ').join("'%s'" % f for f in h.files()))
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1160 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1161 if not r:
1161 if not r:
1162 continue
1162 continue
1163 applied[h.filename()] = [h]
1163 applied[h.filename()] = [h]
1164 if h.allhunks():
1164 if h.allhunks():
1165 applied[h.filename()] += h.hunks
1165 applied[h.filename()] += h.hunks
1166 continue
1166 continue
1167 for i, chunk in enumerate(h.hunks):
1167 for i, chunk in enumerate(h.hunks):
1168 if skipfile is None and skipall is None:
1168 if skipfile is None and skipall is None:
1169 chunk.pretty(ui)
1169 chunk.pretty(ui)
1170 if total == 1:
1170 if total == 1:
1171 msg = messages['single'][operation] % chunk.filename()
1171 msg = messages['single'][operation] % chunk.filename()
1172 else:
1172 else:
1173 idx = pos - len(h.hunks) + i
1173 idx = pos - len(h.hunks) + i
1174 msg = messages['multiple'][operation] % (idx, total,
1174 msg = messages['multiple'][operation] % (idx, total,
1175 chunk.filename())
1175 chunk.filename())
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1176 r, skipfile, skipall, newpatches = prompt(skipfile,
1177 skipall, msg, chunk)
1177 skipall, msg, chunk)
1178 if r:
1178 if r:
1179 if fixoffset:
1179 if fixoffset:
1180 chunk = copy.copy(chunk)
1180 chunk = copy.copy(chunk)
1181 chunk.toline += fixoffset
1181 chunk.toline += fixoffset
1182 applied[chunk.filename()].append(chunk)
1182 applied[chunk.filename()].append(chunk)
1183 elif newpatches is not None:
1183 elif newpatches is not None:
1184 for newpatch in newpatches:
1184 for newpatch in newpatches:
1185 for newhunk in newpatch.hunks:
1185 for newhunk in newpatch.hunks:
1186 if fixoffset:
1186 if fixoffset:
1187 newhunk.toline += fixoffset
1187 newhunk.toline += fixoffset
1188 applied[newhunk.filename()].append(newhunk)
1188 applied[newhunk.filename()].append(newhunk)
1189 else:
1189 else:
1190 fixoffset += chunk.removed - chunk.added
1190 fixoffset += chunk.removed - chunk.added
1191 return (sum([h for h in applied.itervalues()
1191 return (sum([h for h in applied.itervalues()
1192 if h[0].special() or len(h) > 1], []), {})
1192 if h[0].special() or len(h) > 1], []), {})
1193 class hunk(object):
1193 class hunk(object):
1194 def __init__(self, desc, num, lr, context):
1194 def __init__(self, desc, num, lr, context):
1195 self.number = num
1195 self.number = num
1196 self.desc = desc
1196 self.desc = desc
1197 self.hunk = [desc]
1197 self.hunk = [desc]
1198 self.a = []
1198 self.a = []
1199 self.b = []
1199 self.b = []
1200 self.starta = self.lena = None
1200 self.starta = self.lena = None
1201 self.startb = self.lenb = None
1201 self.startb = self.lenb = None
1202 if lr is not None:
1202 if lr is not None:
1203 if context:
1203 if context:
1204 self.read_context_hunk(lr)
1204 self.read_context_hunk(lr)
1205 else:
1205 else:
1206 self.read_unified_hunk(lr)
1206 self.read_unified_hunk(lr)
1207
1207
1208 def getnormalized(self):
1208 def getnormalized(self):
1209 """Return a copy with line endings normalized to LF."""
1209 """Return a copy with line endings normalized to LF."""
1210
1210
1211 def normalize(lines):
1211 def normalize(lines):
1212 nlines = []
1212 nlines = []
1213 for line in lines:
1213 for line in lines:
1214 if line.endswith('\r\n'):
1214 if line.endswith('\r\n'):
1215 line = line[:-2] + '\n'
1215 line = line[:-2] + '\n'
1216 nlines.append(line)
1216 nlines.append(line)
1217 return nlines
1217 return nlines
1218
1218
1219 # Dummy object, it is rebuilt manually
1219 # Dummy object, it is rebuilt manually
1220 nh = hunk(self.desc, self.number, None, None)
1220 nh = hunk(self.desc, self.number, None, None)
1221 nh.number = self.number
1221 nh.number = self.number
1222 nh.desc = self.desc
1222 nh.desc = self.desc
1223 nh.hunk = self.hunk
1223 nh.hunk = self.hunk
1224 nh.a = normalize(self.a)
1224 nh.a = normalize(self.a)
1225 nh.b = normalize(self.b)
1225 nh.b = normalize(self.b)
1226 nh.starta = self.starta
1226 nh.starta = self.starta
1227 nh.startb = self.startb
1227 nh.startb = self.startb
1228 nh.lena = self.lena
1228 nh.lena = self.lena
1229 nh.lenb = self.lenb
1229 nh.lenb = self.lenb
1230 return nh
1230 return nh
1231
1231
1232 def read_unified_hunk(self, lr):
1232 def read_unified_hunk(self, lr):
1233 m = unidesc.match(self.desc)
1233 m = unidesc.match(self.desc)
1234 if not m:
1234 if not m:
1235 raise PatchError(_("bad hunk #%d") % self.number)
1235 raise PatchError(_("bad hunk #%d") % self.number)
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1236 self.starta, self.lena, self.startb, self.lenb = m.groups()
1237 if self.lena is None:
1237 if self.lena is None:
1238 self.lena = 1
1238 self.lena = 1
1239 else:
1239 else:
1240 self.lena = int(self.lena)
1240 self.lena = int(self.lena)
1241 if self.lenb is None:
1241 if self.lenb is None:
1242 self.lenb = 1
1242 self.lenb = 1
1243 else:
1243 else:
1244 self.lenb = int(self.lenb)
1244 self.lenb = int(self.lenb)
1245 self.starta = int(self.starta)
1245 self.starta = int(self.starta)
1246 self.startb = int(self.startb)
1246 self.startb = int(self.startb)
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1247 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1248 self.b)
1248 self.b)
1249 # if we hit eof before finishing out the hunk, the last line will
1249 # if we hit eof before finishing out the hunk, the last line will
1250 # be zero length. Lets try to fix it up.
1250 # be zero length. Lets try to fix it up.
1251 while len(self.hunk[-1]) == 0:
1251 while len(self.hunk[-1]) == 0:
1252 del self.hunk[-1]
1252 del self.hunk[-1]
1253 del self.a[-1]
1253 del self.a[-1]
1254 del self.b[-1]
1254 del self.b[-1]
1255 self.lena -= 1
1255 self.lena -= 1
1256 self.lenb -= 1
1256 self.lenb -= 1
1257 self._fixnewline(lr)
1257 self._fixnewline(lr)
1258
1258
1259 def read_context_hunk(self, lr):
1259 def read_context_hunk(self, lr):
1260 self.desc = lr.readline()
1260 self.desc = lr.readline()
1261 m = contextdesc.match(self.desc)
1261 m = contextdesc.match(self.desc)
1262 if not m:
1262 if not m:
1263 raise PatchError(_("bad hunk #%d") % self.number)
1263 raise PatchError(_("bad hunk #%d") % self.number)
1264 self.starta, aend = m.groups()
1264 self.starta, aend = m.groups()
1265 self.starta = int(self.starta)
1265 self.starta = int(self.starta)
1266 if aend is None:
1266 if aend is None:
1267 aend = self.starta
1267 aend = self.starta
1268 self.lena = int(aend) - self.starta
1268 self.lena = int(aend) - self.starta
1269 if self.starta:
1269 if self.starta:
1270 self.lena += 1
1270 self.lena += 1
1271 for x in xrange(self.lena):
1271 for x in xrange(self.lena):
1272 l = lr.readline()
1272 l = lr.readline()
1273 if l.startswith('---'):
1273 if l.startswith('---'):
1274 # lines addition, old block is empty
1274 # lines addition, old block is empty
1275 lr.push(l)
1275 lr.push(l)
1276 break
1276 break
1277 s = l[2:]
1277 s = l[2:]
1278 if l.startswith('- ') or l.startswith('! '):
1278 if l.startswith('- ') or l.startswith('! '):
1279 u = '-' + s
1279 u = '-' + s
1280 elif l.startswith(' '):
1280 elif l.startswith(' '):
1281 u = ' ' + s
1281 u = ' ' + s
1282 else:
1282 else:
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1283 raise PatchError(_("bad hunk #%d old text line %d") %
1284 (self.number, x))
1284 (self.number, x))
1285 self.a.append(u)
1285 self.a.append(u)
1286 self.hunk.append(u)
1286 self.hunk.append(u)
1287
1287
1288 l = lr.readline()
1288 l = lr.readline()
1289 if l.startswith('\ '):
1289 if l.startswith('\ '):
1290 s = self.a[-1][:-1]
1290 s = self.a[-1][:-1]
1291 self.a[-1] = s
1291 self.a[-1] = s
1292 self.hunk[-1] = s
1292 self.hunk[-1] = s
1293 l = lr.readline()
1293 l = lr.readline()
1294 m = contextdesc.match(l)
1294 m = contextdesc.match(l)
1295 if not m:
1295 if not m:
1296 raise PatchError(_("bad hunk #%d") % self.number)
1296 raise PatchError(_("bad hunk #%d") % self.number)
1297 self.startb, bend = m.groups()
1297 self.startb, bend = m.groups()
1298 self.startb = int(self.startb)
1298 self.startb = int(self.startb)
1299 if bend is None:
1299 if bend is None:
1300 bend = self.startb
1300 bend = self.startb
1301 self.lenb = int(bend) - self.startb
1301 self.lenb = int(bend) - self.startb
1302 if self.startb:
1302 if self.startb:
1303 self.lenb += 1
1303 self.lenb += 1
1304 hunki = 1
1304 hunki = 1
1305 for x in xrange(self.lenb):
1305 for x in xrange(self.lenb):
1306 l = lr.readline()
1306 l = lr.readline()
1307 if l.startswith('\ '):
1307 if l.startswith('\ '):
1308 # XXX: the only way to hit this is with an invalid line range.
1308 # XXX: the only way to hit this is with an invalid line range.
1309 # The no-eol marker is not counted in the line range, but I
1309 # The no-eol marker is not counted in the line range, but I
1310 # guess there are diff(1) out there which behave differently.
1310 # guess there are diff(1) out there which behave differently.
1311 s = self.b[-1][:-1]
1311 s = self.b[-1][:-1]
1312 self.b[-1] = s
1312 self.b[-1] = s
1313 self.hunk[hunki - 1] = s
1313 self.hunk[hunki - 1] = s
1314 continue
1314 continue
1315 if not l:
1315 if not l:
1316 # line deletions, new block is empty and we hit EOF
1316 # line deletions, new block is empty and we hit EOF
1317 lr.push(l)
1317 lr.push(l)
1318 break
1318 break
1319 s = l[2:]
1319 s = l[2:]
1320 if l.startswith('+ ') or l.startswith('! '):
1320 if l.startswith('+ ') or l.startswith('! '):
1321 u = '+' + s
1321 u = '+' + s
1322 elif l.startswith(' '):
1322 elif l.startswith(' '):
1323 u = ' ' + s
1323 u = ' ' + s
1324 elif len(self.b) == 0:
1324 elif len(self.b) == 0:
1325 # line deletions, new block is empty
1325 # line deletions, new block is empty
1326 lr.push(l)
1326 lr.push(l)
1327 break
1327 break
1328 else:
1328 else:
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1329 raise PatchError(_("bad hunk #%d old text line %d") %
1330 (self.number, x))
1330 (self.number, x))
1331 self.b.append(s)
1331 self.b.append(s)
1332 while True:
1332 while True:
1333 if hunki >= len(self.hunk):
1333 if hunki >= len(self.hunk):
1334 h = ""
1334 h = ""
1335 else:
1335 else:
1336 h = self.hunk[hunki]
1336 h = self.hunk[hunki]
1337 hunki += 1
1337 hunki += 1
1338 if h == u:
1338 if h == u:
1339 break
1339 break
1340 elif h.startswith('-'):
1340 elif h.startswith('-'):
1341 continue
1341 continue
1342 else:
1342 else:
1343 self.hunk.insert(hunki - 1, u)
1343 self.hunk.insert(hunki - 1, u)
1344 break
1344 break
1345
1345
1346 if not self.a:
1346 if not self.a:
1347 # this happens when lines were only added to the hunk
1347 # this happens when lines were only added to the hunk
1348 for x in self.hunk:
1348 for x in self.hunk:
1349 if x.startswith('-') or x.startswith(' '):
1349 if x.startswith('-') or x.startswith(' '):
1350 self.a.append(x)
1350 self.a.append(x)
1351 if not self.b:
1351 if not self.b:
1352 # this happens when lines were only deleted from the hunk
1352 # this happens when lines were only deleted from the hunk
1353 for x in self.hunk:
1353 for x in self.hunk:
1354 if x.startswith('+') or x.startswith(' '):
1354 if x.startswith('+') or x.startswith(' '):
1355 self.b.append(x[1:])
1355 self.b.append(x[1:])
1356 # @@ -start,len +start,len @@
1356 # @@ -start,len +start,len @@
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1357 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1358 self.startb, self.lenb)
1358 self.startb, self.lenb)
1359 self.hunk[0] = self.desc
1359 self.hunk[0] = self.desc
1360 self._fixnewline(lr)
1360 self._fixnewline(lr)
1361
1361
1362 def _fixnewline(self, lr):
1362 def _fixnewline(self, lr):
1363 l = lr.readline()
1363 l = lr.readline()
1364 if l.startswith('\ '):
1364 if l.startswith('\ '):
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1365 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1366 else:
1366 else:
1367 lr.push(l)
1367 lr.push(l)
1368
1368
1369 def complete(self):
1369 def complete(self):
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1370 return len(self.a) == self.lena and len(self.b) == self.lenb
1371
1371
1372 def _fuzzit(self, old, new, fuzz, toponly):
1372 def _fuzzit(self, old, new, fuzz, toponly):
1373 # this removes context lines from the top and bottom of list 'l'. It
1373 # this removes context lines from the top and bottom of list 'l'. It
1374 # checks the hunk to make sure only context lines are removed, and then
1374 # checks the hunk to make sure only context lines are removed, and then
1375 # returns a new shortened list of lines.
1375 # returns a new shortened list of lines.
1376 fuzz = min(fuzz, len(old))
1376 fuzz = min(fuzz, len(old))
1377 if fuzz:
1377 if fuzz:
1378 top = 0
1378 top = 0
1379 bot = 0
1379 bot = 0
1380 hlen = len(self.hunk)
1380 hlen = len(self.hunk)
1381 for x in xrange(hlen - 1):
1381 for x in xrange(hlen - 1):
1382 # the hunk starts with the @@ line, so use x+1
1382 # the hunk starts with the @@ line, so use x+1
1383 if self.hunk[x + 1][0] == ' ':
1383 if self.hunk[x + 1][0] == ' ':
1384 top += 1
1384 top += 1
1385 else:
1385 else:
1386 break
1386 break
1387 if not toponly:
1387 if not toponly:
1388 for x in xrange(hlen - 1):
1388 for x in xrange(hlen - 1):
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1389 if self.hunk[hlen - bot - 1][0] == ' ':
1390 bot += 1
1390 bot += 1
1391 else:
1391 else:
1392 break
1392 break
1393
1393
1394 bot = min(fuzz, bot)
1394 bot = min(fuzz, bot)
1395 top = min(fuzz, top)
1395 top = min(fuzz, top)
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1396 return old[top:len(old) - bot], new[top:len(new) - bot], top
1397 return old, new, 0
1397 return old, new, 0
1398
1398
1399 def fuzzit(self, fuzz, toponly):
1399 def fuzzit(self, fuzz, toponly):
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1400 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1401 oldstart = self.starta + top
1401 oldstart = self.starta + top
1402 newstart = self.startb + top
1402 newstart = self.startb + top
1403 # zero length hunk ranges already have their start decremented
1403 # zero length hunk ranges already have their start decremented
1404 if self.lena and oldstart > 0:
1404 if self.lena and oldstart > 0:
1405 oldstart -= 1
1405 oldstart -= 1
1406 if self.lenb and newstart > 0:
1406 if self.lenb and newstart > 0:
1407 newstart -= 1
1407 newstart -= 1
1408 return old, oldstart, new, newstart
1408 return old, oldstart, new, newstart
1409
1409
1410 class binhunk(object):
1410 class binhunk(object):
1411 'A binary patch file.'
1411 'A binary patch file.'
1412 def __init__(self, lr, fname):
1412 def __init__(self, lr, fname):
1413 self.text = None
1413 self.text = None
1414 self.delta = False
1414 self.delta = False
1415 self.hunk = ['GIT binary patch\n']
1415 self.hunk = ['GIT binary patch\n']
1416 self._fname = fname
1416 self._fname = fname
1417 self._read(lr)
1417 self._read(lr)
1418
1418
1419 def complete(self):
1419 def complete(self):
1420 return self.text is not None
1420 return self.text is not None
1421
1421
1422 def new(self, lines):
1422 def new(self, lines):
1423 if self.delta:
1423 if self.delta:
1424 return [applybindelta(self.text, ''.join(lines))]
1424 return [applybindelta(self.text, ''.join(lines))]
1425 return [self.text]
1425 return [self.text]
1426
1426
1427 def _read(self, lr):
1427 def _read(self, lr):
1428 def getline(lr, hunk):
1428 def getline(lr, hunk):
1429 l = lr.readline()
1429 l = lr.readline()
1430 hunk.append(l)
1430 hunk.append(l)
1431 return l.rstrip('\r\n')
1431 return l.rstrip('\r\n')
1432
1432
1433 size = 0
1433 size = 0
1434 while True:
1434 while True:
1435 line = getline(lr, self.hunk)
1435 line = getline(lr, self.hunk)
1436 if not line:
1436 if not line:
1437 raise PatchError(_('could not extract "%s" binary data')
1437 raise PatchError(_('could not extract "%s" binary data')
1438 % self._fname)
1438 % self._fname)
1439 if line.startswith('literal '):
1439 if line.startswith('literal '):
1440 size = int(line[8:].rstrip())
1440 size = int(line[8:].rstrip())
1441 break
1441 break
1442 if line.startswith('delta '):
1442 if line.startswith('delta '):
1443 size = int(line[6:].rstrip())
1443 size = int(line[6:].rstrip())
1444 self.delta = True
1444 self.delta = True
1445 break
1445 break
1446 dec = []
1446 dec = []
1447 line = getline(lr, self.hunk)
1447 line = getline(lr, self.hunk)
1448 while len(line) > 1:
1448 while len(line) > 1:
1449 l = line[0]
1449 l = line[0]
1450 if l <= 'Z' and l >= 'A':
1450 if l <= 'Z' and l >= 'A':
1451 l = ord(l) - ord('A') + 1
1451 l = ord(l) - ord('A') + 1
1452 else:
1452 else:
1453 l = ord(l) - ord('a') + 27
1453 l = ord(l) - ord('a') + 27
1454 try:
1454 try:
1455 dec.append(util.b85decode(line[1:])[:l])
1455 dec.append(util.b85decode(line[1:])[:l])
1456 except ValueError as e:
1456 except ValueError as e:
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1457 raise PatchError(_('could not decode "%s" binary patch: %s')
1458 % (self._fname, str(e)))
1458 % (self._fname, str(e)))
1459 line = getline(lr, self.hunk)
1459 line = getline(lr, self.hunk)
1460 text = zlib.decompress(''.join(dec))
1460 text = zlib.decompress(''.join(dec))
1461 if len(text) != size:
1461 if len(text) != size:
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1462 raise PatchError(_('"%s" length is %d bytes, should be %d')
1463 % (self._fname, len(text), size))
1463 % (self._fname, len(text), size))
1464 self.text = text
1464 self.text = text
1465
1465
1466 def parsefilename(str):
1466 def parsefilename(str):
1467 # --- filename \t|space stuff
1467 # --- filename \t|space stuff
1468 s = str[4:].rstrip('\r\n')
1468 s = str[4:].rstrip('\r\n')
1469 i = s.find('\t')
1469 i = s.find('\t')
1470 if i < 0:
1470 if i < 0:
1471 i = s.find(' ')
1471 i = s.find(' ')
1472 if i < 0:
1472 if i < 0:
1473 return s
1473 return s
1474 return s[:i]
1474 return s[:i]
1475
1475
1476 def reversehunks(hunks):
1476 def reversehunks(hunks):
1477 '''reverse the signs in the hunks given as argument
1477 '''reverse the signs in the hunks given as argument
1478
1478
1479 This function operates on hunks coming out of patch.filterpatch, that is
1479 This function operates on hunks coming out of patch.filterpatch, that is
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1480 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1481
1481
1482 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1482 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1483 ... --- a/folder1/g
1483 ... --- a/folder1/g
1484 ... +++ b/folder1/g
1484 ... +++ b/folder1/g
1485 ... @@ -1,7 +1,7 @@
1485 ... @@ -1,7 +1,7 @@
1486 ... +firstline
1486 ... +firstline
1487 ... c
1487 ... c
1488 ... 1
1488 ... 1
1489 ... 2
1489 ... 2
1490 ... + 3
1490 ... + 3
1491 ... -4
1491 ... -4
1492 ... 5
1492 ... 5
1493 ... d
1493 ... d
1494 ... +lastline"""
1494 ... +lastline"""
1495 >>> hunks = parsepatch([rawpatch])
1495 >>> hunks = parsepatch([rawpatch])
1496 >>> hunkscomingfromfilterpatch = []
1496 >>> hunkscomingfromfilterpatch = []
1497 >>> for h in hunks:
1497 >>> for h in hunks:
1498 ... hunkscomingfromfilterpatch.append(h)
1498 ... hunkscomingfromfilterpatch.append(h)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1499 ... hunkscomingfromfilterpatch.extend(h.hunks)
1500
1500
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1501 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1502 >>> from . import util
1502 >>> from . import util
1503 >>> fp = util.stringio()
1503 >>> fp = util.stringio()
1504 >>> for c in reversedhunks:
1504 >>> for c in reversedhunks:
1505 ... c.write(fp)
1505 ... c.write(fp)
1506 >>> fp.seek(0) or None
1506 >>> fp.seek(0) or None
1507 >>> reversedpatch = fp.read()
1507 >>> reversedpatch = fp.read()
1508 >>> print(pycompat.sysstr(reversedpatch))
1508 >>> print(pycompat.sysstr(reversedpatch))
1509 diff --git a/folder1/g b/folder1/g
1509 diff --git a/folder1/g b/folder1/g
1510 --- a/folder1/g
1510 --- a/folder1/g
1511 +++ b/folder1/g
1511 +++ b/folder1/g
1512 @@ -1,4 +1,3 @@
1512 @@ -1,4 +1,3 @@
1513 -firstline
1513 -firstline
1514 c
1514 c
1515 1
1515 1
1516 2
1516 2
1517 @@ -2,6 +1,6 @@
1517 @@ -2,6 +1,6 @@
1518 c
1518 c
1519 1
1519 1
1520 2
1520 2
1521 - 3
1521 - 3
1522 +4
1522 +4
1523 5
1523 5
1524 d
1524 d
1525 @@ -6,3 +5,2 @@
1525 @@ -6,3 +5,2 @@
1526 5
1526 5
1527 d
1527 d
1528 -lastline
1528 -lastline
1529
1529
1530 '''
1530 '''
1531
1531
1532 newhunks = []
1532 newhunks = []
1533 for c in hunks:
1533 for c in hunks:
1534 if util.safehasattr(c, 'reversehunk'):
1534 if util.safehasattr(c, 'reversehunk'):
1535 c = c.reversehunk()
1535 c = c.reversehunk()
1536 newhunks.append(c)
1536 newhunks.append(c)
1537 return newhunks
1537 return newhunks
1538
1538
1539 def parsepatch(originalchunks, maxcontext=None):
1539 def parsepatch(originalchunks, maxcontext=None):
1540 """patch -> [] of headers -> [] of hunks
1540 """patch -> [] of headers -> [] of hunks
1541
1541
1542 If maxcontext is not None, trim context lines if necessary.
1542 If maxcontext is not None, trim context lines if necessary.
1543
1543
1544 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1544 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1545 ... --- a/folder1/g
1545 ... --- a/folder1/g
1546 ... +++ b/folder1/g
1546 ... +++ b/folder1/g
1547 ... @@ -1,8 +1,10 @@
1547 ... @@ -1,8 +1,10 @@
1548 ... 1
1548 ... 1
1549 ... 2
1549 ... 2
1550 ... -3
1550 ... -3
1551 ... 4
1551 ... 4
1552 ... 5
1552 ... 5
1553 ... 6
1553 ... 6
1554 ... +6.1
1554 ... +6.1
1555 ... +6.2
1555 ... +6.2
1556 ... 7
1556 ... 7
1557 ... 8
1557 ... 8
1558 ... +9'''
1558 ... +9'''
1559 >>> out = util.stringio()
1559 >>> out = util.stringio()
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1560 >>> headers = parsepatch([rawpatch], maxcontext=1)
1561 >>> for header in headers:
1561 >>> for header in headers:
1562 ... header.write(out)
1562 ... header.write(out)
1563 ... for hunk in header.hunks:
1563 ... for hunk in header.hunks:
1564 ... hunk.write(out)
1564 ... hunk.write(out)
1565 >>> print(pycompat.sysstr(out.getvalue()))
1565 >>> print(pycompat.sysstr(out.getvalue()))
1566 diff --git a/folder1/g b/folder1/g
1566 diff --git a/folder1/g b/folder1/g
1567 --- a/folder1/g
1567 --- a/folder1/g
1568 +++ b/folder1/g
1568 +++ b/folder1/g
1569 @@ -2,3 +2,2 @@
1569 @@ -2,3 +2,2 @@
1570 2
1570 2
1571 -3
1571 -3
1572 4
1572 4
1573 @@ -6,2 +5,4 @@
1573 @@ -6,2 +5,4 @@
1574 6
1574 6
1575 +6.1
1575 +6.1
1576 +6.2
1576 +6.2
1577 7
1577 7
1578 @@ -8,1 +9,2 @@
1578 @@ -8,1 +9,2 @@
1579 8
1579 8
1580 +9
1580 +9
1581 """
1581 """
1582 class parser(object):
1582 class parser(object):
1583 """patch parsing state machine"""
1583 """patch parsing state machine"""
1584 def __init__(self):
1584 def __init__(self):
1585 self.fromline = 0
1585 self.fromline = 0
1586 self.toline = 0
1586 self.toline = 0
1587 self.proc = ''
1587 self.proc = ''
1588 self.header = None
1588 self.header = None
1589 self.context = []
1589 self.context = []
1590 self.before = []
1590 self.before = []
1591 self.hunk = []
1591 self.hunk = []
1592 self.headers = []
1592 self.headers = []
1593
1593
1594 def addrange(self, limits):
1594 def addrange(self, limits):
1595 fromstart, fromend, tostart, toend, proc = limits
1595 fromstart, fromend, tostart, toend, proc = limits
1596 self.fromline = int(fromstart)
1596 self.fromline = int(fromstart)
1597 self.toline = int(tostart)
1597 self.toline = int(tostart)
1598 self.proc = proc
1598 self.proc = proc
1599
1599
1600 def addcontext(self, context):
1600 def addcontext(self, context):
1601 if self.hunk:
1601 if self.hunk:
1602 h = recordhunk(self.header, self.fromline, self.toline,
1602 h = recordhunk(self.header, self.fromline, self.toline,
1603 self.proc, self.before, self.hunk, context, maxcontext)
1603 self.proc, self.before, self.hunk, context, maxcontext)
1604 self.header.hunks.append(h)
1604 self.header.hunks.append(h)
1605 self.fromline += len(self.before) + h.removed
1605 self.fromline += len(self.before) + h.removed
1606 self.toline += len(self.before) + h.added
1606 self.toline += len(self.before) + h.added
1607 self.before = []
1607 self.before = []
1608 self.hunk = []
1608 self.hunk = []
1609 self.context = context
1609 self.context = context
1610
1610
1611 def addhunk(self, hunk):
1611 def addhunk(self, hunk):
1612 if self.context:
1612 if self.context:
1613 self.before = self.context
1613 self.before = self.context
1614 self.context = []
1614 self.context = []
1615 self.hunk = hunk
1615 self.hunk = hunk
1616
1616
1617 def newfile(self, hdr):
1617 def newfile(self, hdr):
1618 self.addcontext([])
1618 self.addcontext([])
1619 h = header(hdr)
1619 h = header(hdr)
1620 self.headers.append(h)
1620 self.headers.append(h)
1621 self.header = h
1621 self.header = h
1622
1622
1623 def addother(self, line):
1623 def addother(self, line):
1624 pass # 'other' lines are ignored
1624 pass # 'other' lines are ignored
1625
1625
1626 def finished(self):
1626 def finished(self):
1627 self.addcontext([])
1627 self.addcontext([])
1628 return self.headers
1628 return self.headers
1629
1629
1630 transitions = {
1630 transitions = {
1631 'file': {'context': addcontext,
1631 'file': {'context': addcontext,
1632 'file': newfile,
1632 'file': newfile,
1633 'hunk': addhunk,
1633 'hunk': addhunk,
1634 'range': addrange},
1634 'range': addrange},
1635 'context': {'file': newfile,
1635 'context': {'file': newfile,
1636 'hunk': addhunk,
1636 'hunk': addhunk,
1637 'range': addrange,
1637 'range': addrange,
1638 'other': addother},
1638 'other': addother},
1639 'hunk': {'context': addcontext,
1639 'hunk': {'context': addcontext,
1640 'file': newfile,
1640 'file': newfile,
1641 'range': addrange},
1641 'range': addrange},
1642 'range': {'context': addcontext,
1642 'range': {'context': addcontext,
1643 'hunk': addhunk},
1643 'hunk': addhunk},
1644 'other': {'other': addother},
1644 'other': {'other': addother},
1645 }
1645 }
1646
1646
1647 p = parser()
1647 p = parser()
1648 fp = stringio()
1648 fp = stringio()
1649 fp.write(''.join(originalchunks))
1649 fp.write(''.join(originalchunks))
1650 fp.seek(0)
1650 fp.seek(0)
1651
1651
1652 state = 'context'
1652 state = 'context'
1653 for newstate, data in scanpatch(fp):
1653 for newstate, data in scanpatch(fp):
1654 try:
1654 try:
1655 p.transitions[state][newstate](p, data)
1655 p.transitions[state][newstate](p, data)
1656 except KeyError:
1656 except KeyError:
1657 raise PatchError('unhandled transition: %s -> %s' %
1657 raise PatchError('unhandled transition: %s -> %s' %
1658 (state, newstate))
1658 (state, newstate))
1659 state = newstate
1659 state = newstate
1660 del fp
1660 del fp
1661 return p.finished()
1661 return p.finished()
1662
1662
1663 def pathtransform(path, strip, prefix):
1663 def pathtransform(path, strip, prefix):
1664 '''turn a path from a patch into a path suitable for the repository
1664 '''turn a path from a patch into a path suitable for the repository
1665
1665
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1666 prefix, if not empty, is expected to be normalized with a / at the end.
1667
1667
1668 Returns (stripped components, path in repository).
1668 Returns (stripped components, path in repository).
1669
1669
1670 >>> pathtransform(b'a/b/c', 0, b'')
1670 >>> pathtransform(b'a/b/c', 0, b'')
1671 ('', 'a/b/c')
1671 ('', 'a/b/c')
1672 >>> pathtransform(b' a/b/c ', 0, b'')
1672 >>> pathtransform(b' a/b/c ', 0, b'')
1673 ('', ' a/b/c')
1673 ('', ' a/b/c')
1674 >>> pathtransform(b' a/b/c ', 2, b'')
1674 >>> pathtransform(b' a/b/c ', 2, b'')
1675 ('a/b/', 'c')
1675 ('a/b/', 'c')
1676 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1676 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1677 ('', 'd/e/a/b/c')
1677 ('', 'd/e/a/b/c')
1678 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1678 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1679 ('a//b/', 'd/e/c')
1679 ('a//b/', 'd/e/c')
1680 >>> pathtransform(b'a/b/c', 3, b'')
1680 >>> pathtransform(b'a/b/c', 3, b'')
1681 Traceback (most recent call last):
1681 Traceback (most recent call last):
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1682 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1683 '''
1683 '''
1684 pathlen = len(path)
1684 pathlen = len(path)
1685 i = 0
1685 i = 0
1686 if strip == 0:
1686 if strip == 0:
1687 return '', prefix + path.rstrip()
1687 return '', prefix + path.rstrip()
1688 count = strip
1688 count = strip
1689 while count > 0:
1689 while count > 0:
1690 i = path.find('/', i)
1690 i = path.find('/', i)
1691 if i == -1:
1691 if i == -1:
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1692 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1693 (count, strip, path))
1693 (count, strip, path))
1694 i += 1
1694 i += 1
1695 # consume '//' in the path
1695 # consume '//' in the path
1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1696 while i < pathlen - 1 and path[i:i + 1] == '/':
1697 i += 1
1697 i += 1
1698 count -= 1
1698 count -= 1
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1699 return path[:i].lstrip(), prefix + path[i:].rstrip()
1700
1700
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1701 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1702 nulla = afile_orig == "/dev/null"
1702 nulla = afile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1703 nullb = bfile_orig == "/dev/null"
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1704 create = nulla and hunk.starta == 0 and hunk.lena == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1705 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1706 abase, afile = pathtransform(afile_orig, strip, prefix)
1707 gooda = not nulla and backend.exists(afile)
1707 gooda = not nulla and backend.exists(afile)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1708 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1709 if afile == bfile:
1709 if afile == bfile:
1710 goodb = gooda
1710 goodb = gooda
1711 else:
1711 else:
1712 goodb = not nullb and backend.exists(bfile)
1712 goodb = not nullb and backend.exists(bfile)
1713 missing = not goodb and not gooda and not create
1713 missing = not goodb and not gooda and not create
1714
1714
1715 # some diff programs apparently produce patches where the afile is
1715 # some diff programs apparently produce patches where the afile is
1716 # not /dev/null, but afile starts with bfile
1716 # not /dev/null, but afile starts with bfile
1717 abasedir = afile[:afile.rfind('/') + 1]
1717 abasedir = afile[:afile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1718 bbasedir = bfile[:bfile.rfind('/') + 1]
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1719 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1720 and hunk.starta == 0 and hunk.lena == 0):
1720 and hunk.starta == 0 and hunk.lena == 0):
1721 create = True
1721 create = True
1722 missing = False
1722 missing = False
1723
1723
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1724 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1725 # diff is between a file and its backup. In this case, the original
1725 # diff is between a file and its backup. In this case, the original
1726 # file should be patched (see original mpatch code).
1726 # file should be patched (see original mpatch code).
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1727 isbackup = (abase == bbase and bfile.startswith(afile))
1728 fname = None
1728 fname = None
1729 if not missing:
1729 if not missing:
1730 if gooda and goodb:
1730 if gooda and goodb:
1731 if isbackup:
1731 if isbackup:
1732 fname = afile
1732 fname = afile
1733 else:
1733 else:
1734 fname = bfile
1734 fname = bfile
1735 elif gooda:
1735 elif gooda:
1736 fname = afile
1736 fname = afile
1737
1737
1738 if not fname:
1738 if not fname:
1739 if not nullb:
1739 if not nullb:
1740 if isbackup:
1740 if isbackup:
1741 fname = afile
1741 fname = afile
1742 else:
1742 else:
1743 fname = bfile
1743 fname = bfile
1744 elif not nulla:
1744 elif not nulla:
1745 fname = afile
1745 fname = afile
1746 else:
1746 else:
1747 raise PatchError(_("undefined source and destination files"))
1747 raise PatchError(_("undefined source and destination files"))
1748
1748
1749 gp = patchmeta(fname)
1749 gp = patchmeta(fname)
1750 if create:
1750 if create:
1751 gp.op = 'ADD'
1751 gp.op = 'ADD'
1752 elif remove:
1752 elif remove:
1753 gp.op = 'DELETE'
1753 gp.op = 'DELETE'
1754 return gp
1754 return gp
1755
1755
1756 def scanpatch(fp):
1756 def scanpatch(fp):
1757 """like patch.iterhunks, but yield different events
1757 """like patch.iterhunks, but yield different events
1758
1758
1759 - ('file', [header_lines + fromfile + tofile])
1759 - ('file', [header_lines + fromfile + tofile])
1760 - ('context', [context_lines])
1760 - ('context', [context_lines])
1761 - ('hunk', [hunk_lines])
1761 - ('hunk', [hunk_lines])
1762 - ('range', (-start,len, +start,len, proc))
1762 - ('range', (-start,len, +start,len, proc))
1763 """
1763 """
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1764 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1765 lr = linereader(fp)
1765 lr = linereader(fp)
1766
1766
1767 def scanwhile(first, p):
1767 def scanwhile(first, p):
1768 """scan lr while predicate holds"""
1768 """scan lr while predicate holds"""
1769 lines = [first]
1769 lines = [first]
1770 for line in iter(lr.readline, ''):
1770 for line in iter(lr.readline, ''):
1771 if p(line):
1771 if p(line):
1772 lines.append(line)
1772 lines.append(line)
1773 else:
1773 else:
1774 lr.push(line)
1774 lr.push(line)
1775 break
1775 break
1776 return lines
1776 return lines
1777
1777
1778 for line in iter(lr.readline, ''):
1778 for line in iter(lr.readline, ''):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1779 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1780 def notheader(line):
1780 def notheader(line):
1781 s = line.split(None, 1)
1781 s = line.split(None, 1)
1782 return not s or s[0] not in ('---', 'diff')
1782 return not s or s[0] not in ('---', 'diff')
1783 header = scanwhile(line, notheader)
1783 header = scanwhile(line, notheader)
1784 fromfile = lr.readline()
1784 fromfile = lr.readline()
1785 if fromfile.startswith('---'):
1785 if fromfile.startswith('---'):
1786 tofile = lr.readline()
1786 tofile = lr.readline()
1787 header += [fromfile, tofile]
1787 header += [fromfile, tofile]
1788 else:
1788 else:
1789 lr.push(fromfile)
1789 lr.push(fromfile)
1790 yield 'file', header
1790 yield 'file', header
1791 elif line[0:1] == ' ':
1791 elif line[0:1] == ' ':
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1792 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1793 elif line[0] in '-+':
1793 elif line[0] in '-+':
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1794 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1795 else:
1795 else:
1796 m = lines_re.match(line)
1796 m = lines_re.match(line)
1797 if m:
1797 if m:
1798 yield 'range', m.groups()
1798 yield 'range', m.groups()
1799 else:
1799 else:
1800 yield 'other', line
1800 yield 'other', line
1801
1801
1802 def scangitpatch(lr, firstline):
1802 def scangitpatch(lr, firstline):
1803 """
1803 """
1804 Git patches can emit:
1804 Git patches can emit:
1805 - rename a to b
1805 - rename a to b
1806 - change b
1806 - change b
1807 - copy a to c
1807 - copy a to c
1808 - change c
1808 - change c
1809
1809
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1810 We cannot apply this sequence as-is, the renamed 'a' could not be
1811 found for it would have been renamed already. And we cannot copy
1811 found for it would have been renamed already. And we cannot copy
1812 from 'b' instead because 'b' would have been changed already. So
1812 from 'b' instead because 'b' would have been changed already. So
1813 we scan the git patch for copy and rename commands so we can
1813 we scan the git patch for copy and rename commands so we can
1814 perform the copies ahead of time.
1814 perform the copies ahead of time.
1815 """
1815 """
1816 pos = 0
1816 pos = 0
1817 try:
1817 try:
1818 pos = lr.fp.tell()
1818 pos = lr.fp.tell()
1819 fp = lr.fp
1819 fp = lr.fp
1820 except IOError:
1820 except IOError:
1821 fp = stringio(lr.fp.read())
1821 fp = stringio(lr.fp.read())
1822 gitlr = linereader(fp)
1822 gitlr = linereader(fp)
1823 gitlr.push(firstline)
1823 gitlr.push(firstline)
1824 gitpatches = readgitpatch(gitlr)
1824 gitpatches = readgitpatch(gitlr)
1825 fp.seek(pos)
1825 fp.seek(pos)
1826 return gitpatches
1826 return gitpatches
1827
1827
1828 def iterhunks(fp):
1828 def iterhunks(fp):
1829 """Read a patch and yield the following events:
1829 """Read a patch and yield the following events:
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1830 - ("file", afile, bfile, firsthunk): select a new target file.
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1831 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1832 "file" event.
1832 "file" event.
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1833 - ("git", gitchanges): current diff is in git format, gitchanges
1834 maps filenames to gitpatch records. Unique event.
1834 maps filenames to gitpatch records. Unique event.
1835 """
1835 """
1836 afile = ""
1836 afile = ""
1837 bfile = ""
1837 bfile = ""
1838 state = None
1838 state = None
1839 hunknum = 0
1839 hunknum = 0
1840 emitfile = newfile = False
1840 emitfile = newfile = False
1841 gitpatches = None
1841 gitpatches = None
1842
1842
1843 # our states
1843 # our states
1844 BFILE = 1
1844 BFILE = 1
1845 context = None
1845 context = None
1846 lr = linereader(fp)
1846 lr = linereader(fp)
1847
1847
1848 for x in iter(lr.readline, ''):
1848 for x in iter(lr.readline, ''):
1849 if state == BFILE and (
1849 if state == BFILE and (
1850 (not context and x[0] == '@')
1850 (not context and x[0] == '@')
1851 or (context is not False and x.startswith('***************'))
1851 or (context is not False and x.startswith('***************'))
1852 or x.startswith('GIT binary patch')):
1852 or x.startswith('GIT binary patch')):
1853 gp = None
1853 gp = None
1854 if (gitpatches and
1854 if (gitpatches and
1855 gitpatches[-1].ispatching(afile, bfile)):
1855 gitpatches[-1].ispatching(afile, bfile)):
1856 gp = gitpatches.pop()
1856 gp = gitpatches.pop()
1857 if x.startswith('GIT binary patch'):
1857 if x.startswith('GIT binary patch'):
1858 h = binhunk(lr, gp.path)
1858 h = binhunk(lr, gp.path)
1859 else:
1859 else:
1860 if context is None and x.startswith('***************'):
1860 if context is None and x.startswith('***************'):
1861 context = True
1861 context = True
1862 h = hunk(x, hunknum + 1, lr, context)
1862 h = hunk(x, hunknum + 1, lr, context)
1863 hunknum += 1
1863 hunknum += 1
1864 if emitfile:
1864 if emitfile:
1865 emitfile = False
1865 emitfile = False
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1866 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1867 yield 'hunk', h
1867 yield 'hunk', h
1868 elif x.startswith('diff --git a/'):
1868 elif x.startswith('diff --git a/'):
1869 m = gitre.match(x.rstrip(' \r\n'))
1869 m = gitre.match(x.rstrip(' \r\n'))
1870 if not m:
1870 if not m:
1871 continue
1871 continue
1872 if gitpatches is None:
1872 if gitpatches is None:
1873 # scan whole input for git metadata
1873 # scan whole input for git metadata
1874 gitpatches = scangitpatch(lr, x)
1874 gitpatches = scangitpatch(lr, x)
1875 yield 'git', [g.copy() for g in gitpatches
1875 yield 'git', [g.copy() for g in gitpatches
1876 if g.op in ('COPY', 'RENAME')]
1876 if g.op in ('COPY', 'RENAME')]
1877 gitpatches.reverse()
1877 gitpatches.reverse()
1878 afile = 'a/' + m.group(1)
1878 afile = 'a/' + m.group(1)
1879 bfile = 'b/' + m.group(2)
1879 bfile = 'b/' + m.group(2)
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1880 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1881 gp = gitpatches.pop()
1881 gp = gitpatches.pop()
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1882 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1883 if not gitpatches:
1883 if not gitpatches:
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1884 raise PatchError(_('failed to synchronize metadata for "%s"')
1885 % afile[2:])
1885 % afile[2:])
1886 gp = gitpatches[-1]
1886 gp = gitpatches[-1]
1887 newfile = True
1887 newfile = True
1888 elif x.startswith('---'):
1888 elif x.startswith('---'):
1889 # check for a unified diff
1889 # check for a unified diff
1890 l2 = lr.readline()
1890 l2 = lr.readline()
1891 if not l2.startswith('+++'):
1891 if not l2.startswith('+++'):
1892 lr.push(l2)
1892 lr.push(l2)
1893 continue
1893 continue
1894 newfile = True
1894 newfile = True
1895 context = False
1895 context = False
1896 afile = parsefilename(x)
1896 afile = parsefilename(x)
1897 bfile = parsefilename(l2)
1897 bfile = parsefilename(l2)
1898 elif x.startswith('***'):
1898 elif x.startswith('***'):
1899 # check for a context diff
1899 # check for a context diff
1900 l2 = lr.readline()
1900 l2 = lr.readline()
1901 if not l2.startswith('---'):
1901 if not l2.startswith('---'):
1902 lr.push(l2)
1902 lr.push(l2)
1903 continue
1903 continue
1904 l3 = lr.readline()
1904 l3 = lr.readline()
1905 lr.push(l3)
1905 lr.push(l3)
1906 if not l3.startswith("***************"):
1906 if not l3.startswith("***************"):
1907 lr.push(l2)
1907 lr.push(l2)
1908 continue
1908 continue
1909 newfile = True
1909 newfile = True
1910 context = True
1910 context = True
1911 afile = parsefilename(x)
1911 afile = parsefilename(x)
1912 bfile = parsefilename(l2)
1912 bfile = parsefilename(l2)
1913
1913
1914 if newfile:
1914 if newfile:
1915 newfile = False
1915 newfile = False
1916 emitfile = True
1916 emitfile = True
1917 state = BFILE
1917 state = BFILE
1918 hunknum = 0
1918 hunknum = 0
1919
1919
1920 while gitpatches:
1920 while gitpatches:
1921 gp = gitpatches.pop()
1921 gp = gitpatches.pop()
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923
1923
1924 def applybindelta(binchunk, data):
1924 def applybindelta(binchunk, data):
1925 """Apply a binary delta hunk
1925 """Apply a binary delta hunk
1926 The algorithm used is the algorithm from git's patch-delta.c
1926 The algorithm used is the algorithm from git's patch-delta.c
1927 """
1927 """
1928 def deltahead(binchunk):
1928 def deltahead(binchunk):
1929 i = 0
1929 i = 0
1930 for c in binchunk:
1930 for c in binchunk:
1931 i += 1
1931 i += 1
1932 if not (ord(c) & 0x80):
1932 if not (ord(c) & 0x80):
1933 return i
1933 return i
1934 return i
1934 return i
1935 out = ""
1935 out = ""
1936 s = deltahead(binchunk)
1936 s = deltahead(binchunk)
1937 binchunk = binchunk[s:]
1937 binchunk = binchunk[s:]
1938 s = deltahead(binchunk)
1938 s = deltahead(binchunk)
1939 binchunk = binchunk[s:]
1939 binchunk = binchunk[s:]
1940 i = 0
1940 i = 0
1941 while i < len(binchunk):
1941 while i < len(binchunk):
1942 cmd = ord(binchunk[i])
1942 cmd = ord(binchunk[i])
1943 i += 1
1943 i += 1
1944 if (cmd & 0x80):
1944 if (cmd & 0x80):
1945 offset = 0
1945 offset = 0
1946 size = 0
1946 size = 0
1947 if (cmd & 0x01):
1947 if (cmd & 0x01):
1948 offset = ord(binchunk[i])
1948 offset = ord(binchunk[i])
1949 i += 1
1949 i += 1
1950 if (cmd & 0x02):
1950 if (cmd & 0x02):
1951 offset |= ord(binchunk[i]) << 8
1951 offset |= ord(binchunk[i]) << 8
1952 i += 1
1952 i += 1
1953 if (cmd & 0x04):
1953 if (cmd & 0x04):
1954 offset |= ord(binchunk[i]) << 16
1954 offset |= ord(binchunk[i]) << 16
1955 i += 1
1955 i += 1
1956 if (cmd & 0x08):
1956 if (cmd & 0x08):
1957 offset |= ord(binchunk[i]) << 24
1957 offset |= ord(binchunk[i]) << 24
1958 i += 1
1958 i += 1
1959 if (cmd & 0x10):
1959 if (cmd & 0x10):
1960 size = ord(binchunk[i])
1960 size = ord(binchunk[i])
1961 i += 1
1961 i += 1
1962 if (cmd & 0x20):
1962 if (cmd & 0x20):
1963 size |= ord(binchunk[i]) << 8
1963 size |= ord(binchunk[i]) << 8
1964 i += 1
1964 i += 1
1965 if (cmd & 0x40):
1965 if (cmd & 0x40):
1966 size |= ord(binchunk[i]) << 16
1966 size |= ord(binchunk[i]) << 16
1967 i += 1
1967 i += 1
1968 if size == 0:
1968 if size == 0:
1969 size = 0x10000
1969 size = 0x10000
1970 offset_end = offset + size
1970 offset_end = offset + size
1971 out += data[offset:offset_end]
1971 out += data[offset:offset_end]
1972 elif cmd != 0:
1972 elif cmd != 0:
1973 offset_end = i + cmd
1973 offset_end = i + cmd
1974 out += binchunk[i:offset_end]
1974 out += binchunk[i:offset_end]
1975 i += cmd
1975 i += cmd
1976 else:
1976 else:
1977 raise PatchError(_('unexpected delta opcode 0'))
1977 raise PatchError(_('unexpected delta opcode 0'))
1978 return out
1978 return out
1979
1979
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1980 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1981 """Reads a patch from fp and tries to apply it.
1981 """Reads a patch from fp and tries to apply it.
1982
1982
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1983 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1984 there was any fuzz.
1984 there was any fuzz.
1985
1985
1986 If 'eolmode' is 'strict', the patch content and patched file are
1986 If 'eolmode' is 'strict', the patch content and patched file are
1987 read in binary mode. Otherwise, line endings are ignored when
1987 read in binary mode. Otherwise, line endings are ignored when
1988 patching then normalized according to 'eolmode'.
1988 patching then normalized according to 'eolmode'.
1989 """
1989 """
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1990 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1991 prefix=prefix, eolmode=eolmode)
1991 prefix=prefix, eolmode=eolmode)
1992
1992
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1993 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
1994 eolmode='strict'):
1994 eolmode='strict'):
1995
1995
1996 if prefix:
1996 if prefix:
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1997 prefix = pathutil.canonpath(backend.repo.root, backend.repo.getcwd(),
1998 prefix)
1998 prefix)
1999 if prefix != '':
1999 if prefix != '':
2000 prefix += '/'
2000 prefix += '/'
2001 def pstrip(p):
2001 def pstrip(p):
2002 return pathtransform(p, strip - 1, prefix)[1]
2002 return pathtransform(p, strip - 1, prefix)[1]
2003
2003
2004 rejects = 0
2004 rejects = 0
2005 err = 0
2005 err = 0
2006 current_file = None
2006 current_file = None
2007
2007
2008 for state, values in iterhunks(fp):
2008 for state, values in iterhunks(fp):
2009 if state == 'hunk':
2009 if state == 'hunk':
2010 if not current_file:
2010 if not current_file:
2011 continue
2011 continue
2012 ret = current_file.apply(values)
2012 ret = current_file.apply(values)
2013 if ret > 0:
2013 if ret > 0:
2014 err = 1
2014 err = 1
2015 elif state == 'file':
2015 elif state == 'file':
2016 if current_file:
2016 if current_file:
2017 rejects += current_file.close()
2017 rejects += current_file.close()
2018 current_file = None
2018 current_file = None
2019 afile, bfile, first_hunk, gp = values
2019 afile, bfile, first_hunk, gp = values
2020 if gp:
2020 if gp:
2021 gp.path = pstrip(gp.path)
2021 gp.path = pstrip(gp.path)
2022 if gp.oldpath:
2022 if gp.oldpath:
2023 gp.oldpath = pstrip(gp.oldpath)
2023 gp.oldpath = pstrip(gp.oldpath)
2024 else:
2024 else:
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2025 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2026 prefix)
2026 prefix)
2027 if gp.op == 'RENAME':
2027 if gp.op == 'RENAME':
2028 backend.unlink(gp.oldpath)
2028 backend.unlink(gp.oldpath)
2029 if not first_hunk:
2029 if not first_hunk:
2030 if gp.op == 'DELETE':
2030 if gp.op == 'DELETE':
2031 backend.unlink(gp.path)
2031 backend.unlink(gp.path)
2032 continue
2032 continue
2033 data, mode = None, None
2033 data, mode = None, None
2034 if gp.op in ('RENAME', 'COPY'):
2034 if gp.op in ('RENAME', 'COPY'):
2035 data, mode = store.getfile(gp.oldpath)[:2]
2035 data, mode = store.getfile(gp.oldpath)[:2]
2036 if data is None:
2036 if data is None:
2037 # This means that the old path does not exist
2037 # This means that the old path does not exist
2038 raise PatchError(_("source file '%s' does not exist")
2038 raise PatchError(_("source file '%s' does not exist")
2039 % gp.oldpath)
2039 % gp.oldpath)
2040 if gp.mode:
2040 if gp.mode:
2041 mode = gp.mode
2041 mode = gp.mode
2042 if gp.op == 'ADD':
2042 if gp.op == 'ADD':
2043 # Added files without content have no hunk and
2043 # Added files without content have no hunk and
2044 # must be created
2044 # must be created
2045 data = ''
2045 data = ''
2046 if data or mode:
2046 if data or mode:
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2047 if (gp.op in ('ADD', 'RENAME', 'COPY')
2048 and backend.exists(gp.path)):
2048 and backend.exists(gp.path)):
2049 raise PatchError(_("cannot create %s: destination "
2049 raise PatchError(_("cannot create %s: destination "
2050 "already exists") % gp.path)
2050 "already exists") % gp.path)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2051 backend.setfile(gp.path, data, mode, gp.oldpath)
2052 continue
2052 continue
2053 try:
2053 try:
2054 current_file = patcher(ui, gp, backend, store,
2054 current_file = patcher(ui, gp, backend, store,
2055 eolmode=eolmode)
2055 eolmode=eolmode)
2056 except PatchError as inst:
2056 except PatchError as inst:
2057 ui.warn(str(inst) + '\n')
2057 ui.warn(str(inst) + '\n')
2058 current_file = None
2058 current_file = None
2059 rejects += 1
2059 rejects += 1
2060 continue
2060 continue
2061 elif state == 'git':
2061 elif state == 'git':
2062 for gp in values:
2062 for gp in values:
2063 path = pstrip(gp.oldpath)
2063 path = pstrip(gp.oldpath)
2064 data, mode = backend.getfile(path)
2064 data, mode = backend.getfile(path)
2065 if data is None:
2065 if data is None:
2066 # The error ignored here will trigger a getfile()
2066 # The error ignored here will trigger a getfile()
2067 # error in a place more appropriate for error
2067 # error in a place more appropriate for error
2068 # handling, and will not interrupt the patching
2068 # handling, and will not interrupt the patching
2069 # process.
2069 # process.
2070 pass
2070 pass
2071 else:
2071 else:
2072 store.setfile(path, data, mode)
2072 store.setfile(path, data, mode)
2073 else:
2073 else:
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2074 raise error.Abort(_('unsupported parser state: %s') % state)
2075
2075
2076 if current_file:
2076 if current_file:
2077 rejects += current_file.close()
2077 rejects += current_file.close()
2078
2078
2079 if rejects:
2079 if rejects:
2080 return -1
2080 return -1
2081 return err
2081 return err
2082
2082
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2083 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2084 similarity):
2084 similarity):
2085 """use <patcher> to apply <patchname> to the working directory.
2085 """use <patcher> to apply <patchname> to the working directory.
2086 returns whether patch was applied with fuzz factor."""
2086 returns whether patch was applied with fuzz factor."""
2087
2087
2088 fuzz = False
2088 fuzz = False
2089 args = []
2089 args = []
2090 cwd = repo.root
2090 cwd = repo.root
2091 if cwd:
2091 if cwd:
2092 args.append('-d %s' % util.shellquote(cwd))
2092 args.append('-d %s' % util.shellquote(cwd))
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2093 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2094 util.shellquote(patchname)))
2094 util.shellquote(patchname)))
2095 try:
2095 try:
2096 for line in util.iterfile(fp):
2096 for line in util.iterfile(fp):
2097 line = line.rstrip()
2097 line = line.rstrip()
2098 ui.note(line + '\n')
2098 ui.note(line + '\n')
2099 if line.startswith('patching file '):
2099 if line.startswith('patching file '):
2100 pf = util.parsepatchoutput(line)
2100 pf = util.parsepatchoutput(line)
2101 printed_file = False
2101 printed_file = False
2102 files.add(pf)
2102 files.add(pf)
2103 elif line.find('with fuzz') >= 0:
2103 elif line.find('with fuzz') >= 0:
2104 fuzz = True
2104 fuzz = True
2105 if not printed_file:
2105 if not printed_file:
2106 ui.warn(pf + '\n')
2106 ui.warn(pf + '\n')
2107 printed_file = True
2107 printed_file = True
2108 ui.warn(line + '\n')
2108 ui.warn(line + '\n')
2109 elif line.find('saving rejects to file') >= 0:
2109 elif line.find('saving rejects to file') >= 0:
2110 ui.warn(line + '\n')
2110 ui.warn(line + '\n')
2111 elif line.find('FAILED') >= 0:
2111 elif line.find('FAILED') >= 0:
2112 if not printed_file:
2112 if not printed_file:
2113 ui.warn(pf + '\n')
2113 ui.warn(pf + '\n')
2114 printed_file = True
2114 printed_file = True
2115 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2116 finally:
2116 finally:
2117 if files:
2117 if files:
2118 scmutil.marktouched(repo, files, similarity)
2118 scmutil.marktouched(repo, files, similarity)
2119 code = fp.close()
2119 code = fp.close()
2120 if code:
2120 if code:
2121 raise PatchError(_("patch command failed: %s") %
2121 raise PatchError(_("patch command failed: %s") %
2122 util.explainexit(code)[0])
2122 util.explainexit(code)[0])
2123 return fuzz
2123 return fuzz
2124
2124
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2125 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2126 eolmode='strict'):
2126 eolmode='strict'):
2127 if files is None:
2127 if files is None:
2128 files = set()
2128 files = set()
2129 if eolmode is None:
2129 if eolmode is None:
2130 eolmode = ui.config('patch', 'eol')
2130 eolmode = ui.config('patch', 'eol')
2131 if eolmode.lower() not in eolmodes:
2131 if eolmode.lower() not in eolmodes:
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2132 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2133 eolmode = eolmode.lower()
2133 eolmode = eolmode.lower()
2134
2134
2135 store = filestore()
2135 store = filestore()
2136 try:
2136 try:
2137 fp = open(patchobj, 'rb')
2137 fp = open(patchobj, 'rb')
2138 except TypeError:
2138 except TypeError:
2139 fp = patchobj
2139 fp = patchobj
2140 try:
2140 try:
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2141 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2142 eolmode=eolmode)
2142 eolmode=eolmode)
2143 finally:
2143 finally:
2144 if fp != patchobj:
2144 if fp != patchobj:
2145 fp.close()
2145 fp.close()
2146 files.update(backend.close())
2146 files.update(backend.close())
2147 store.close()
2147 store.close()
2148 if ret < 0:
2148 if ret < 0:
2149 raise PatchError(_('patch failed to apply'))
2149 raise PatchError(_('patch failed to apply'))
2150 return ret > 0
2150 return ret > 0
2151
2151
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2152 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2153 eolmode='strict', similarity=0):
2153 eolmode='strict', similarity=0):
2154 """use builtin patch to apply <patchobj> to the working directory.
2154 """use builtin patch to apply <patchobj> to the working directory.
2155 returns whether patch was applied with fuzz factor."""
2155 returns whether patch was applied with fuzz factor."""
2156 backend = workingbackend(ui, repo, similarity)
2156 backend = workingbackend(ui, repo, similarity)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2157 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2158
2158
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2159 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2160 eolmode='strict'):
2160 eolmode='strict'):
2161 backend = repobackend(ui, repo, ctx, store)
2161 backend = repobackend(ui, repo, ctx, store)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2163
2163
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2164 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2165 similarity=0):
2165 similarity=0):
2166 """Apply <patchname> to the working directory.
2166 """Apply <patchname> to the working directory.
2167
2167
2168 'eolmode' specifies how end of lines should be handled. It can be:
2168 'eolmode' specifies how end of lines should be handled. It can be:
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2169 - 'strict': inputs are read in binary mode, EOLs are preserved
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2170 - 'crlf': EOLs are ignored when patching and reset to CRLF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2171 - 'lf': EOLs are ignored when patching and reset to LF
2172 - None: get it from user settings, default to 'strict'
2172 - None: get it from user settings, default to 'strict'
2173 'eolmode' is ignored when using an external patcher program.
2173 'eolmode' is ignored when using an external patcher program.
2174
2174
2175 Returns whether patch was applied with fuzz factor.
2175 Returns whether patch was applied with fuzz factor.
2176 """
2176 """
2177 patcher = ui.config('ui', 'patch')
2177 patcher = ui.config('ui', 'patch')
2178 if files is None:
2178 if files is None:
2179 files = set()
2179 files = set()
2180 if patcher:
2180 if patcher:
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2181 return _externalpatch(ui, repo, patcher, patchname, strip,
2182 files, similarity)
2182 files, similarity)
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2183 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2184 similarity)
2184 similarity)
2185
2185
2186 def changedfiles(ui, repo, patchpath, strip=1):
2186 def changedfiles(ui, repo, patchpath, strip=1):
2187 backend = fsbackend(ui, repo.root)
2187 backend = fsbackend(ui, repo.root)
2188 with open(patchpath, 'rb') as fp:
2188 with open(patchpath, 'rb') as fp:
2189 changed = set()
2189 changed = set()
2190 for state, values in iterhunks(fp):
2190 for state, values in iterhunks(fp):
2191 if state == 'file':
2191 if state == 'file':
2192 afile, bfile, first_hunk, gp = values
2192 afile, bfile, first_hunk, gp = values
2193 if gp:
2193 if gp:
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2194 gp.path = pathtransform(gp.path, strip - 1, '')[1]
2195 if gp.oldpath:
2195 if gp.oldpath:
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2196 gp.oldpath = pathtransform(gp.oldpath, strip - 1, '')[1]
2197 else:
2197 else:
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2198 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2199 '')
2199 '')
2200 changed.add(gp.path)
2200 changed.add(gp.path)
2201 if gp.op == 'RENAME':
2201 if gp.op == 'RENAME':
2202 changed.add(gp.oldpath)
2202 changed.add(gp.oldpath)
2203 elif state not in ('hunk', 'git'):
2203 elif state not in ('hunk', 'git'):
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2204 raise error.Abort(_('unsupported parser state: %s') % state)
2205 return changed
2205 return changed
2206
2206
2207 class GitDiffRequired(Exception):
2207 class GitDiffRequired(Exception):
2208 pass
2208 pass
2209
2209
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2210 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2211 '''return diffopts with all features supported and parsed'''
2211 '''return diffopts with all features supported and parsed'''
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2212 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2213 git=True, whitespace=True, formatchanging=True)
2213 git=True, whitespace=True, formatchanging=True)
2214
2214
2215 diffopts = diffallopts
2215 diffopts = diffallopts
2216
2216
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2217 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2218 whitespace=False, formatchanging=False):
2218 whitespace=False, formatchanging=False):
2219 '''return diffopts with only opted-in features parsed
2219 '''return diffopts with only opted-in features parsed
2220
2220
2221 Features:
2221 Features:
2222 - git: git-style diffs
2222 - git: git-style diffs
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2223 - whitespace: whitespace options like ignoreblanklines and ignorews
2224 - formatchanging: options that will likely break or cause correctness issues
2224 - formatchanging: options that will likely break or cause correctness issues
2225 with most diff parsers
2225 with most diff parsers
2226 '''
2226 '''
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2227 def get(key, name=None, getter=ui.configbool, forceplain=None):
2228 if opts:
2228 if opts:
2229 v = opts.get(key)
2229 v = opts.get(key)
2230 # diffopts flags are either None-default (which is passed
2230 # diffopts flags are either None-default (which is passed
2231 # through unchanged, so we can identify unset values), or
2231 # through unchanged, so we can identify unset values), or
2232 # some other falsey default (eg --unified, which defaults
2232 # some other falsey default (eg --unified, which defaults
2233 # to an empty string). We only want to override the config
2233 # to an empty string). We only want to override the config
2234 # entries from hgrc with command line values if they
2234 # entries from hgrc with command line values if they
2235 # appear to have been set, which is any truthy value,
2235 # appear to have been set, which is any truthy value,
2236 # True, or False.
2236 # True, or False.
2237 if v or isinstance(v, bool):
2237 if v or isinstance(v, bool):
2238 return v
2238 return v
2239 if forceplain is not None and ui.plain():
2239 if forceplain is not None and ui.plain():
2240 return forceplain
2240 return forceplain
2241 return getter(section, name or key, untrusted=untrusted)
2241 return getter(section, name or key, untrusted=untrusted)
2242
2242
2243 # core options, expected to be understood by every diff parser
2243 # core options, expected to be understood by every diff parser
2244 buildopts = {
2244 buildopts = {
2245 'nodates': get('nodates'),
2245 'nodates': get('nodates'),
2246 'showfunc': get('show_function', 'showfunc'),
2246 'showfunc': get('show_function', 'showfunc'),
2247 'context': get('unified', getter=ui.config),
2247 'context': get('unified', getter=ui.config),
2248 }
2248 }
2249
2249
2250 if git:
2250 if git:
2251 buildopts['git'] = get('git')
2251 buildopts['git'] = get('git')
2252
2252
2253 # since this is in the experimental section, we need to call
2253 # since this is in the experimental section, we need to call
2254 # ui.configbool directory
2254 # ui.configbool directory
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2255 buildopts['showsimilarity'] = ui.configbool('experimental',
2256 'extendedheader.similarity')
2256 'extendedheader.similarity')
2257
2257
2258 # need to inspect the ui object instead of using get() since we want to
2258 # need to inspect the ui object instead of using get() since we want to
2259 # test for an int
2259 # test for an int
2260 hconf = ui.config('experimental', 'extendedheader.index')
2260 hconf = ui.config('experimental', 'extendedheader.index')
2261 if hconf is not None:
2261 if hconf is not None:
2262 hlen = None
2262 hlen = None
2263 try:
2263 try:
2264 # the hash config could be an integer (for length of hash) or a
2264 # the hash config could be an integer (for length of hash) or a
2265 # word (e.g. short, full, none)
2265 # word (e.g. short, full, none)
2266 hlen = int(hconf)
2266 hlen = int(hconf)
2267 if hlen < 0 or hlen > 40:
2267 if hlen < 0 or hlen > 40:
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2268 msg = _("invalid length for extendedheader.index: '%d'\n")
2269 ui.warn(msg % hlen)
2269 ui.warn(msg % hlen)
2270 except ValueError:
2270 except ValueError:
2271 # default value
2271 # default value
2272 if hconf == 'short' or hconf == '':
2272 if hconf == 'short' or hconf == '':
2273 hlen = 12
2273 hlen = 12
2274 elif hconf == 'full':
2274 elif hconf == 'full':
2275 hlen = 40
2275 hlen = 40
2276 elif hconf != 'none':
2276 elif hconf != 'none':
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2277 msg = _("invalid value for extendedheader.index: '%s'\n")
2278 ui.warn(msg % hconf)
2278 ui.warn(msg % hconf)
2279 finally:
2279 finally:
2280 buildopts['index'] = hlen
2280 buildopts['index'] = hlen
2281
2281
2282 if whitespace:
2282 if whitespace:
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2283 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2284 buildopts['ignorewsamount'] = get('ignore_space_change',
2285 'ignorewsamount')
2285 'ignorewsamount')
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2286 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2287 'ignoreblanklines')
2287 'ignoreblanklines')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2288 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2289 if formatchanging:
2289 if formatchanging:
2290 buildopts['text'] = opts and opts.get('text')
2290 buildopts['text'] = opts and opts.get('text')
2291 binary = None if opts is None else opts.get('binary')
2291 binary = None if opts is None else opts.get('binary')
2292 buildopts['nobinary'] = (not binary if binary is not None
2292 buildopts['nobinary'] = (not binary if binary is not None
2293 else get('nobinary', forceplain=False))
2293 else get('nobinary', forceplain=False))
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2294 buildopts['noprefix'] = get('noprefix', forceplain=False)
2295
2295
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2296 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2297
2297
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2298 def diff(repo, node1=None, node2=None, match=None, changes=None,
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2299 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2300 hunksfilterfn=None):
2300 '''yields diff of changes to files between two nodes, or node and
2301 '''yields diff of changes to files between two nodes, or node and
2301 working directory.
2302 working directory.
2302
2303
2303 if node1 is None, use first dirstate parent instead.
2304 if node1 is None, use first dirstate parent instead.
2304 if node2 is None, compare node1 with working directory.
2305 if node2 is None, compare node1 with working directory.
2305
2306
2306 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2307 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2307 every time some change cannot be represented with the current
2308 every time some change cannot be represented with the current
2308 patch format. Return False to upgrade to git patch format, True to
2309 patch format. Return False to upgrade to git patch format, True to
2309 accept the loss or raise an exception to abort the diff. It is
2310 accept the loss or raise an exception to abort the diff. It is
2310 called with the name of current file being diffed as 'fn'. If set
2311 called with the name of current file being diffed as 'fn'. If set
2311 to None, patches will always be upgraded to git format when
2312 to None, patches will always be upgraded to git format when
2312 necessary.
2313 necessary.
2313
2314
2314 prefix is a filename prefix that is prepended to all filenames on
2315 prefix is a filename prefix that is prepended to all filenames on
2315 display (used for subrepos).
2316 display (used for subrepos).
2316
2317
2317 relroot, if not empty, must be normalized with a trailing /. Any match
2318 relroot, if not empty, must be normalized with a trailing /. Any match
2318 patterns that fall outside it will be ignored.
2319 patterns that fall outside it will be ignored.
2319
2320
2320 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2321 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2321 information.'''
2322 information.
2323
2324 hunksfilterfn, if not None, should be a function taking a filectx and
2325 hunks generator that may yield filtered hunks.
2326 '''
2322 for fctx1, fctx2, hdr, hunks in diffhunks(
2327 for fctx1, fctx2, hdr, hunks in diffhunks(
2323 repo, node1=node1, node2=node2,
2328 repo, node1=node1, node2=node2,
2324 match=match, changes=changes, opts=opts,
2329 match=match, changes=changes, opts=opts,
2325 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2330 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2326 ):
2331 ):
2332 if hunksfilterfn is not None:
2333 hunks = hunksfilterfn(fctx2, hunks)
2327 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2334 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2328 if hdr and (text or len(hdr) > 1):
2335 if hdr and (text or len(hdr) > 1):
2329 yield '\n'.join(hdr) + '\n'
2336 yield '\n'.join(hdr) + '\n'
2330 if text:
2337 if text:
2331 yield text
2338 yield text
2332
2339
2333 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2340 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2334 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2341 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2335 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2342 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2336 where `header` is a list of diff headers and `hunks` is an iterable of
2343 where `header` is a list of diff headers and `hunks` is an iterable of
2337 (`hunkrange`, `hunklines`) tuples.
2344 (`hunkrange`, `hunklines`) tuples.
2338
2345
2339 See diff() for the meaning of parameters.
2346 See diff() for the meaning of parameters.
2340 """
2347 """
2341
2348
2342 if opts is None:
2349 if opts is None:
2343 opts = mdiff.defaultopts
2350 opts = mdiff.defaultopts
2344
2351
2345 if not node1 and not node2:
2352 if not node1 and not node2:
2346 node1 = repo.dirstate.p1()
2353 node1 = repo.dirstate.p1()
2347
2354
2348 def lrugetfilectx():
2355 def lrugetfilectx():
2349 cache = {}
2356 cache = {}
2350 order = collections.deque()
2357 order = collections.deque()
2351 def getfilectx(f, ctx):
2358 def getfilectx(f, ctx):
2352 fctx = ctx.filectx(f, filelog=cache.get(f))
2359 fctx = ctx.filectx(f, filelog=cache.get(f))
2353 if f not in cache:
2360 if f not in cache:
2354 if len(cache) > 20:
2361 if len(cache) > 20:
2355 del cache[order.popleft()]
2362 del cache[order.popleft()]
2356 cache[f] = fctx.filelog()
2363 cache[f] = fctx.filelog()
2357 else:
2364 else:
2358 order.remove(f)
2365 order.remove(f)
2359 order.append(f)
2366 order.append(f)
2360 return fctx
2367 return fctx
2361 return getfilectx
2368 return getfilectx
2362 getfilectx = lrugetfilectx()
2369 getfilectx = lrugetfilectx()
2363
2370
2364 ctx1 = repo[node1]
2371 ctx1 = repo[node1]
2365 ctx2 = repo[node2]
2372 ctx2 = repo[node2]
2366
2373
2367 relfiltered = False
2374 relfiltered = False
2368 if relroot != '' and match.always():
2375 if relroot != '' and match.always():
2369 # as a special case, create a new matcher with just the relroot
2376 # as a special case, create a new matcher with just the relroot
2370 pats = [relroot]
2377 pats = [relroot]
2371 match = scmutil.match(ctx2, pats, default='path')
2378 match = scmutil.match(ctx2, pats, default='path')
2372 relfiltered = True
2379 relfiltered = True
2373
2380
2374 if not changes:
2381 if not changes:
2375 changes = repo.status(ctx1, ctx2, match=match)
2382 changes = repo.status(ctx1, ctx2, match=match)
2376 modified, added, removed = changes[:3]
2383 modified, added, removed = changes[:3]
2377
2384
2378 if not modified and not added and not removed:
2385 if not modified and not added and not removed:
2379 return []
2386 return []
2380
2387
2381 if repo.ui.debugflag:
2388 if repo.ui.debugflag:
2382 hexfunc = hex
2389 hexfunc = hex
2383 else:
2390 else:
2384 hexfunc = short
2391 hexfunc = short
2385 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2392 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2386
2393
2387 if copy is None:
2394 if copy is None:
2388 copy = {}
2395 copy = {}
2389 if opts.git or opts.upgrade:
2396 if opts.git or opts.upgrade:
2390 copy = copies.pathcopies(ctx1, ctx2, match=match)
2397 copy = copies.pathcopies(ctx1, ctx2, match=match)
2391
2398
2392 if relroot is not None:
2399 if relroot is not None:
2393 if not relfiltered:
2400 if not relfiltered:
2394 # XXX this would ideally be done in the matcher, but that is
2401 # XXX this would ideally be done in the matcher, but that is
2395 # generally meant to 'or' patterns, not 'and' them. In this case we
2402 # generally meant to 'or' patterns, not 'and' them. In this case we
2396 # need to 'and' all the patterns from the matcher with relroot.
2403 # need to 'and' all the patterns from the matcher with relroot.
2397 def filterrel(l):
2404 def filterrel(l):
2398 return [f for f in l if f.startswith(relroot)]
2405 return [f for f in l if f.startswith(relroot)]
2399 modified = filterrel(modified)
2406 modified = filterrel(modified)
2400 added = filterrel(added)
2407 added = filterrel(added)
2401 removed = filterrel(removed)
2408 removed = filterrel(removed)
2402 relfiltered = True
2409 relfiltered = True
2403 # filter out copies where either side isn't inside the relative root
2410 # filter out copies where either side isn't inside the relative root
2404 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2411 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2405 if dst.startswith(relroot)
2412 if dst.startswith(relroot)
2406 and src.startswith(relroot)))
2413 and src.startswith(relroot)))
2407
2414
2408 modifiedset = set(modified)
2415 modifiedset = set(modified)
2409 addedset = set(added)
2416 addedset = set(added)
2410 removedset = set(removed)
2417 removedset = set(removed)
2411 for f in modified:
2418 for f in modified:
2412 if f not in ctx1:
2419 if f not in ctx1:
2413 # Fix up added, since merged-in additions appear as
2420 # Fix up added, since merged-in additions appear as
2414 # modifications during merges
2421 # modifications during merges
2415 modifiedset.remove(f)
2422 modifiedset.remove(f)
2416 addedset.add(f)
2423 addedset.add(f)
2417 for f in removed:
2424 for f in removed:
2418 if f not in ctx1:
2425 if f not in ctx1:
2419 # Merged-in additions that are then removed are reported as removed.
2426 # Merged-in additions that are then removed are reported as removed.
2420 # They are not in ctx1, so We don't want to show them in the diff.
2427 # They are not in ctx1, so We don't want to show them in the diff.
2421 removedset.remove(f)
2428 removedset.remove(f)
2422 modified = sorted(modifiedset)
2429 modified = sorted(modifiedset)
2423 added = sorted(addedset)
2430 added = sorted(addedset)
2424 removed = sorted(removedset)
2431 removed = sorted(removedset)
2425 for dst, src in copy.items():
2432 for dst, src in copy.items():
2426 if src not in ctx1:
2433 if src not in ctx1:
2427 # Files merged in during a merge and then copied/renamed are
2434 # Files merged in during a merge and then copied/renamed are
2428 # reported as copies. We want to show them in the diff as additions.
2435 # reported as copies. We want to show them in the diff as additions.
2429 del copy[dst]
2436 del copy[dst]
2430
2437
2431 def difffn(opts, losedata):
2438 def difffn(opts, losedata):
2432 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2439 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2433 copy, getfilectx, opts, losedata, prefix, relroot)
2440 copy, getfilectx, opts, losedata, prefix, relroot)
2434 if opts.upgrade and not opts.git:
2441 if opts.upgrade and not opts.git:
2435 try:
2442 try:
2436 def losedata(fn):
2443 def losedata(fn):
2437 if not losedatafn or not losedatafn(fn=fn):
2444 if not losedatafn or not losedatafn(fn=fn):
2438 raise GitDiffRequired
2445 raise GitDiffRequired
2439 # Buffer the whole output until we are sure it can be generated
2446 # Buffer the whole output until we are sure it can be generated
2440 return list(difffn(opts.copy(git=False), losedata))
2447 return list(difffn(opts.copy(git=False), losedata))
2441 except GitDiffRequired:
2448 except GitDiffRequired:
2442 return difffn(opts.copy(git=True), None)
2449 return difffn(opts.copy(git=True), None)
2443 else:
2450 else:
2444 return difffn(opts, None)
2451 return difffn(opts, None)
2445
2452
2446 def difflabel(func, *args, **kw):
2453 def difflabel(func, *args, **kw):
2447 '''yields 2-tuples of (output, label) based on the output of func()'''
2454 '''yields 2-tuples of (output, label) based on the output of func()'''
2448 headprefixes = [('diff', 'diff.diffline'),
2455 headprefixes = [('diff', 'diff.diffline'),
2449 ('copy', 'diff.extended'),
2456 ('copy', 'diff.extended'),
2450 ('rename', 'diff.extended'),
2457 ('rename', 'diff.extended'),
2451 ('old', 'diff.extended'),
2458 ('old', 'diff.extended'),
2452 ('new', 'diff.extended'),
2459 ('new', 'diff.extended'),
2453 ('deleted', 'diff.extended'),
2460 ('deleted', 'diff.extended'),
2454 ('index', 'diff.extended'),
2461 ('index', 'diff.extended'),
2455 ('similarity', 'diff.extended'),
2462 ('similarity', 'diff.extended'),
2456 ('---', 'diff.file_a'),
2463 ('---', 'diff.file_a'),
2457 ('+++', 'diff.file_b')]
2464 ('+++', 'diff.file_b')]
2458 textprefixes = [('@', 'diff.hunk'),
2465 textprefixes = [('@', 'diff.hunk'),
2459 ('-', 'diff.deleted'),
2466 ('-', 'diff.deleted'),
2460 ('+', 'diff.inserted')]
2467 ('+', 'diff.inserted')]
2461 head = False
2468 head = False
2462 for chunk in func(*args, **kw):
2469 for chunk in func(*args, **kw):
2463 lines = chunk.split('\n')
2470 lines = chunk.split('\n')
2464 for i, line in enumerate(lines):
2471 for i, line in enumerate(lines):
2465 if i != 0:
2472 if i != 0:
2466 yield ('\n', '')
2473 yield ('\n', '')
2467 if head:
2474 if head:
2468 if line.startswith('@'):
2475 if line.startswith('@'):
2469 head = False
2476 head = False
2470 else:
2477 else:
2471 if line and line[0] not in ' +-@\\':
2478 if line and line[0] not in ' +-@\\':
2472 head = True
2479 head = True
2473 stripline = line
2480 stripline = line
2474 diffline = False
2481 diffline = False
2475 if not head and line and line[0] in '+-':
2482 if not head and line and line[0] in '+-':
2476 # highlight tabs and trailing whitespace, but only in
2483 # highlight tabs and trailing whitespace, but only in
2477 # changed lines
2484 # changed lines
2478 stripline = line.rstrip()
2485 stripline = line.rstrip()
2479 diffline = True
2486 diffline = True
2480
2487
2481 prefixes = textprefixes
2488 prefixes = textprefixes
2482 if head:
2489 if head:
2483 prefixes = headprefixes
2490 prefixes = headprefixes
2484 for prefix, label in prefixes:
2491 for prefix, label in prefixes:
2485 if stripline.startswith(prefix):
2492 if stripline.startswith(prefix):
2486 if diffline:
2493 if diffline:
2487 for token in tabsplitter.findall(stripline):
2494 for token in tabsplitter.findall(stripline):
2488 if '\t' == token[0]:
2495 if '\t' == token[0]:
2489 yield (token, 'diff.tab')
2496 yield (token, 'diff.tab')
2490 else:
2497 else:
2491 yield (token, label)
2498 yield (token, label)
2492 else:
2499 else:
2493 yield (stripline, label)
2500 yield (stripline, label)
2494 break
2501 break
2495 else:
2502 else:
2496 yield (line, '')
2503 yield (line, '')
2497 if line != stripline:
2504 if line != stripline:
2498 yield (line[len(stripline):], 'diff.trailingwhitespace')
2505 yield (line[len(stripline):], 'diff.trailingwhitespace')
2499
2506
2500 def diffui(*args, **kw):
2507 def diffui(*args, **kw):
2501 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2508 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2502 return difflabel(diff, *args, **kw)
2509 return difflabel(diff, *args, **kw)
2503
2510
2504 def _filepairs(modified, added, removed, copy, opts):
2511 def _filepairs(modified, added, removed, copy, opts):
2505 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2512 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2506 before and f2 is the the name after. For added files, f1 will be None,
2513 before and f2 is the the name after. For added files, f1 will be None,
2507 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2514 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2508 or 'rename' (the latter two only if opts.git is set).'''
2515 or 'rename' (the latter two only if opts.git is set).'''
2509 gone = set()
2516 gone = set()
2510
2517
2511 copyto = dict([(v, k) for k, v in copy.items()])
2518 copyto = dict([(v, k) for k, v in copy.items()])
2512
2519
2513 addedset, removedset = set(added), set(removed)
2520 addedset, removedset = set(added), set(removed)
2514
2521
2515 for f in sorted(modified + added + removed):
2522 for f in sorted(modified + added + removed):
2516 copyop = None
2523 copyop = None
2517 f1, f2 = f, f
2524 f1, f2 = f, f
2518 if f in addedset:
2525 if f in addedset:
2519 f1 = None
2526 f1 = None
2520 if f in copy:
2527 if f in copy:
2521 if opts.git:
2528 if opts.git:
2522 f1 = copy[f]
2529 f1 = copy[f]
2523 if f1 in removedset and f1 not in gone:
2530 if f1 in removedset and f1 not in gone:
2524 copyop = 'rename'
2531 copyop = 'rename'
2525 gone.add(f1)
2532 gone.add(f1)
2526 else:
2533 else:
2527 copyop = 'copy'
2534 copyop = 'copy'
2528 elif f in removedset:
2535 elif f in removedset:
2529 f2 = None
2536 f2 = None
2530 if opts.git:
2537 if opts.git:
2531 # have we already reported a copy above?
2538 # have we already reported a copy above?
2532 if (f in copyto and copyto[f] in addedset
2539 if (f in copyto and copyto[f] in addedset
2533 and copy[copyto[f]] == f):
2540 and copy[copyto[f]] == f):
2534 continue
2541 continue
2535 yield f1, f2, copyop
2542 yield f1, f2, copyop
2536
2543
2537 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2544 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2538 copy, getfilectx, opts, losedatafn, prefix, relroot):
2545 copy, getfilectx, opts, losedatafn, prefix, relroot):
2539 '''given input data, generate a diff and yield it in blocks
2546 '''given input data, generate a diff and yield it in blocks
2540
2547
2541 If generating a diff would lose data like flags or binary data and
2548 If generating a diff would lose data like flags or binary data and
2542 losedatafn is not None, it will be called.
2549 losedatafn is not None, it will be called.
2543
2550
2544 relroot is removed and prefix is added to every path in the diff output.
2551 relroot is removed and prefix is added to every path in the diff output.
2545
2552
2546 If relroot is not empty, this function expects every path in modified,
2553 If relroot is not empty, this function expects every path in modified,
2547 added, removed and copy to start with it.'''
2554 added, removed and copy to start with it.'''
2548
2555
2549 def gitindex(text):
2556 def gitindex(text):
2550 if not text:
2557 if not text:
2551 text = ""
2558 text = ""
2552 l = len(text)
2559 l = len(text)
2553 s = hashlib.sha1('blob %d\0' % l)
2560 s = hashlib.sha1('blob %d\0' % l)
2554 s.update(text)
2561 s.update(text)
2555 return s.hexdigest()
2562 return s.hexdigest()
2556
2563
2557 if opts.noprefix:
2564 if opts.noprefix:
2558 aprefix = bprefix = ''
2565 aprefix = bprefix = ''
2559 else:
2566 else:
2560 aprefix = 'a/'
2567 aprefix = 'a/'
2561 bprefix = 'b/'
2568 bprefix = 'b/'
2562
2569
2563 def diffline(f, revs):
2570 def diffline(f, revs):
2564 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2571 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2565 return 'diff %s %s' % (revinfo, f)
2572 return 'diff %s %s' % (revinfo, f)
2566
2573
2567 def isempty(fctx):
2574 def isempty(fctx):
2568 return fctx is None or fctx.size() == 0
2575 return fctx is None or fctx.size() == 0
2569
2576
2570 date1 = util.datestr(ctx1.date())
2577 date1 = util.datestr(ctx1.date())
2571 date2 = util.datestr(ctx2.date())
2578 date2 = util.datestr(ctx2.date())
2572
2579
2573 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2580 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2574
2581
2575 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2582 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2576 or repo.ui.configbool('devel', 'check-relroot')):
2583 or repo.ui.configbool('devel', 'check-relroot')):
2577 for f in modified + added + removed + list(copy) + list(copy.values()):
2584 for f in modified + added + removed + list(copy) + list(copy.values()):
2578 if f is not None and not f.startswith(relroot):
2585 if f is not None and not f.startswith(relroot):
2579 raise AssertionError(
2586 raise AssertionError(
2580 "file %s doesn't start with relroot %s" % (f, relroot))
2587 "file %s doesn't start with relroot %s" % (f, relroot))
2581
2588
2582 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2589 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2583 content1 = None
2590 content1 = None
2584 content2 = None
2591 content2 = None
2585 fctx1 = None
2592 fctx1 = None
2586 fctx2 = None
2593 fctx2 = None
2587 flag1 = None
2594 flag1 = None
2588 flag2 = None
2595 flag2 = None
2589 if f1:
2596 if f1:
2590 fctx1 = getfilectx(f1, ctx1)
2597 fctx1 = getfilectx(f1, ctx1)
2591 if opts.git or losedatafn:
2598 if opts.git or losedatafn:
2592 flag1 = ctx1.flags(f1)
2599 flag1 = ctx1.flags(f1)
2593 if f2:
2600 if f2:
2594 fctx2 = getfilectx(f2, ctx2)
2601 fctx2 = getfilectx(f2, ctx2)
2595 if opts.git or losedatafn:
2602 if opts.git or losedatafn:
2596 flag2 = ctx2.flags(f2)
2603 flag2 = ctx2.flags(f2)
2597 # if binary is True, output "summary" or "base85", but not "text diff"
2604 # if binary is True, output "summary" or "base85", but not "text diff"
2598 binary = not opts.text and any(f.isbinary()
2605 binary = not opts.text and any(f.isbinary()
2599 for f in [fctx1, fctx2] if f is not None)
2606 for f in [fctx1, fctx2] if f is not None)
2600
2607
2601 if losedatafn and not opts.git:
2608 if losedatafn and not opts.git:
2602 if (binary or
2609 if (binary or
2603 # copy/rename
2610 # copy/rename
2604 f2 in copy or
2611 f2 in copy or
2605 # empty file creation
2612 # empty file creation
2606 (not f1 and isempty(fctx2)) or
2613 (not f1 and isempty(fctx2)) or
2607 # empty file deletion
2614 # empty file deletion
2608 (isempty(fctx1) and not f2) or
2615 (isempty(fctx1) and not f2) or
2609 # create with flags
2616 # create with flags
2610 (not f1 and flag2) or
2617 (not f1 and flag2) or
2611 # change flags
2618 # change flags
2612 (f1 and f2 and flag1 != flag2)):
2619 (f1 and f2 and flag1 != flag2)):
2613 losedatafn(f2 or f1)
2620 losedatafn(f2 or f1)
2614
2621
2615 path1 = f1 or f2
2622 path1 = f1 or f2
2616 path2 = f2 or f1
2623 path2 = f2 or f1
2617 path1 = posixpath.join(prefix, path1[len(relroot):])
2624 path1 = posixpath.join(prefix, path1[len(relroot):])
2618 path2 = posixpath.join(prefix, path2[len(relroot):])
2625 path2 = posixpath.join(prefix, path2[len(relroot):])
2619 header = []
2626 header = []
2620 if opts.git:
2627 if opts.git:
2621 header.append('diff --git %s%s %s%s' %
2628 header.append('diff --git %s%s %s%s' %
2622 (aprefix, path1, bprefix, path2))
2629 (aprefix, path1, bprefix, path2))
2623 if not f1: # added
2630 if not f1: # added
2624 header.append('new file mode %s' % gitmode[flag2])
2631 header.append('new file mode %s' % gitmode[flag2])
2625 elif not f2: # removed
2632 elif not f2: # removed
2626 header.append('deleted file mode %s' % gitmode[flag1])
2633 header.append('deleted file mode %s' % gitmode[flag1])
2627 else: # modified/copied/renamed
2634 else: # modified/copied/renamed
2628 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2635 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2629 if mode1 != mode2:
2636 if mode1 != mode2:
2630 header.append('old mode %s' % mode1)
2637 header.append('old mode %s' % mode1)
2631 header.append('new mode %s' % mode2)
2638 header.append('new mode %s' % mode2)
2632 if copyop is not None:
2639 if copyop is not None:
2633 if opts.showsimilarity:
2640 if opts.showsimilarity:
2634 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2641 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2635 header.append('similarity index %d%%' % sim)
2642 header.append('similarity index %d%%' % sim)
2636 header.append('%s from %s' % (copyop, path1))
2643 header.append('%s from %s' % (copyop, path1))
2637 header.append('%s to %s' % (copyop, path2))
2644 header.append('%s to %s' % (copyop, path2))
2638 elif revs and not repo.ui.quiet:
2645 elif revs and not repo.ui.quiet:
2639 header.append(diffline(path1, revs))
2646 header.append(diffline(path1, revs))
2640
2647
2641 # fctx.is | diffopts | what to | is fctx.data()
2648 # fctx.is | diffopts | what to | is fctx.data()
2642 # binary() | text nobinary git index | output? | outputted?
2649 # binary() | text nobinary git index | output? | outputted?
2643 # ------------------------------------|----------------------------
2650 # ------------------------------------|----------------------------
2644 # yes | no no no * | summary | no
2651 # yes | no no no * | summary | no
2645 # yes | no no yes * | base85 | yes
2652 # yes | no no yes * | base85 | yes
2646 # yes | no yes no * | summary | no
2653 # yes | no yes no * | summary | no
2647 # yes | no yes yes 0 | summary | no
2654 # yes | no yes yes 0 | summary | no
2648 # yes | no yes yes >0 | summary | semi [1]
2655 # yes | no yes yes >0 | summary | semi [1]
2649 # yes | yes * * * | text diff | yes
2656 # yes | yes * * * | text diff | yes
2650 # no | * * * * | text diff | yes
2657 # no | * * * * | text diff | yes
2651 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2658 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2652 if binary and (not opts.git or (opts.git and opts.nobinary and not
2659 if binary and (not opts.git or (opts.git and opts.nobinary and not
2653 opts.index)):
2660 opts.index)):
2654 # fast path: no binary content will be displayed, content1 and
2661 # fast path: no binary content will be displayed, content1 and
2655 # content2 are only used for equivalent test. cmp() could have a
2662 # content2 are only used for equivalent test. cmp() could have a
2656 # fast path.
2663 # fast path.
2657 if fctx1 is not None:
2664 if fctx1 is not None:
2658 content1 = b'\0'
2665 content1 = b'\0'
2659 if fctx2 is not None:
2666 if fctx2 is not None:
2660 if fctx1 is not None and not fctx1.cmp(fctx2):
2667 if fctx1 is not None and not fctx1.cmp(fctx2):
2661 content2 = b'\0' # not different
2668 content2 = b'\0' # not different
2662 else:
2669 else:
2663 content2 = b'\0\0'
2670 content2 = b'\0\0'
2664 else:
2671 else:
2665 # normal path: load contents
2672 # normal path: load contents
2666 if fctx1 is not None:
2673 if fctx1 is not None:
2667 content1 = fctx1.data()
2674 content1 = fctx1.data()
2668 if fctx2 is not None:
2675 if fctx2 is not None:
2669 content2 = fctx2.data()
2676 content2 = fctx2.data()
2670
2677
2671 if binary and opts.git and not opts.nobinary:
2678 if binary and opts.git and not opts.nobinary:
2672 text = mdiff.b85diff(content1, content2)
2679 text = mdiff.b85diff(content1, content2)
2673 if text:
2680 if text:
2674 header.append('index %s..%s' %
2681 header.append('index %s..%s' %
2675 (gitindex(content1), gitindex(content2)))
2682 (gitindex(content1), gitindex(content2)))
2676 hunks = (None, [text]),
2683 hunks = (None, [text]),
2677 else:
2684 else:
2678 if opts.git and opts.index > 0:
2685 if opts.git and opts.index > 0:
2679 flag = flag1
2686 flag = flag1
2680 if flag is None:
2687 if flag is None:
2681 flag = flag2
2688 flag = flag2
2682 header.append('index %s..%s %s' %
2689 header.append('index %s..%s %s' %
2683 (gitindex(content1)[0:opts.index],
2690 (gitindex(content1)[0:opts.index],
2684 gitindex(content2)[0:opts.index],
2691 gitindex(content2)[0:opts.index],
2685 gitmode[flag]))
2692 gitmode[flag]))
2686
2693
2687 uheaders, hunks = mdiff.unidiff(content1, date1,
2694 uheaders, hunks = mdiff.unidiff(content1, date1,
2688 content2, date2,
2695 content2, date2,
2689 path1, path2, opts=opts)
2696 path1, path2, opts=opts)
2690 header.extend(uheaders)
2697 header.extend(uheaders)
2691 yield fctx1, fctx2, header, hunks
2698 yield fctx1, fctx2, header, hunks
2692
2699
2693 def diffstatsum(stats):
2700 def diffstatsum(stats):
2694 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2701 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2695 for f, a, r, b in stats:
2702 for f, a, r, b in stats:
2696 maxfile = max(maxfile, encoding.colwidth(f))
2703 maxfile = max(maxfile, encoding.colwidth(f))
2697 maxtotal = max(maxtotal, a + r)
2704 maxtotal = max(maxtotal, a + r)
2698 addtotal += a
2705 addtotal += a
2699 removetotal += r
2706 removetotal += r
2700 binary = binary or b
2707 binary = binary or b
2701
2708
2702 return maxfile, maxtotal, addtotal, removetotal, binary
2709 return maxfile, maxtotal, addtotal, removetotal, binary
2703
2710
2704 def diffstatdata(lines):
2711 def diffstatdata(lines):
2705 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2712 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2706
2713
2707 results = []
2714 results = []
2708 filename, adds, removes, isbinary = None, 0, 0, False
2715 filename, adds, removes, isbinary = None, 0, 0, False
2709
2716
2710 def addresult():
2717 def addresult():
2711 if filename:
2718 if filename:
2712 results.append((filename, adds, removes, isbinary))
2719 results.append((filename, adds, removes, isbinary))
2713
2720
2714 # inheader is used to track if a line is in the
2721 # inheader is used to track if a line is in the
2715 # header portion of the diff. This helps properly account
2722 # header portion of the diff. This helps properly account
2716 # for lines that start with '--' or '++'
2723 # for lines that start with '--' or '++'
2717 inheader = False
2724 inheader = False
2718
2725
2719 for line in lines:
2726 for line in lines:
2720 if line.startswith('diff'):
2727 if line.startswith('diff'):
2721 addresult()
2728 addresult()
2722 # starting a new file diff
2729 # starting a new file diff
2723 # set numbers to 0 and reset inheader
2730 # set numbers to 0 and reset inheader
2724 inheader = True
2731 inheader = True
2725 adds, removes, isbinary = 0, 0, False
2732 adds, removes, isbinary = 0, 0, False
2726 if line.startswith('diff --git a/'):
2733 if line.startswith('diff --git a/'):
2727 filename = gitre.search(line).group(2)
2734 filename = gitre.search(line).group(2)
2728 elif line.startswith('diff -r'):
2735 elif line.startswith('diff -r'):
2729 # format: "diff -r ... -r ... filename"
2736 # format: "diff -r ... -r ... filename"
2730 filename = diffre.search(line).group(1)
2737 filename = diffre.search(line).group(1)
2731 elif line.startswith('@@'):
2738 elif line.startswith('@@'):
2732 inheader = False
2739 inheader = False
2733 elif line.startswith('+') and not inheader:
2740 elif line.startswith('+') and not inheader:
2734 adds += 1
2741 adds += 1
2735 elif line.startswith('-') and not inheader:
2742 elif line.startswith('-') and not inheader:
2736 removes += 1
2743 removes += 1
2737 elif (line.startswith('GIT binary patch') or
2744 elif (line.startswith('GIT binary patch') or
2738 line.startswith('Binary file')):
2745 line.startswith('Binary file')):
2739 isbinary = True
2746 isbinary = True
2740 addresult()
2747 addresult()
2741 return results
2748 return results
2742
2749
2743 def diffstat(lines, width=80):
2750 def diffstat(lines, width=80):
2744 output = []
2751 output = []
2745 stats = diffstatdata(lines)
2752 stats = diffstatdata(lines)
2746 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2753 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2747
2754
2748 countwidth = len(str(maxtotal))
2755 countwidth = len(str(maxtotal))
2749 if hasbinary and countwidth < 3:
2756 if hasbinary and countwidth < 3:
2750 countwidth = 3
2757 countwidth = 3
2751 graphwidth = width - countwidth - maxname - 6
2758 graphwidth = width - countwidth - maxname - 6
2752 if graphwidth < 10:
2759 if graphwidth < 10:
2753 graphwidth = 10
2760 graphwidth = 10
2754
2761
2755 def scale(i):
2762 def scale(i):
2756 if maxtotal <= graphwidth:
2763 if maxtotal <= graphwidth:
2757 return i
2764 return i
2758 # If diffstat runs out of room it doesn't print anything,
2765 # If diffstat runs out of room it doesn't print anything,
2759 # which isn't very useful, so always print at least one + or -
2766 # which isn't very useful, so always print at least one + or -
2760 # if there were at least some changes.
2767 # if there were at least some changes.
2761 return max(i * graphwidth // maxtotal, int(bool(i)))
2768 return max(i * graphwidth // maxtotal, int(bool(i)))
2762
2769
2763 for filename, adds, removes, isbinary in stats:
2770 for filename, adds, removes, isbinary in stats:
2764 if isbinary:
2771 if isbinary:
2765 count = 'Bin'
2772 count = 'Bin'
2766 else:
2773 else:
2767 count = '%d' % (adds + removes)
2774 count = '%d' % (adds + removes)
2768 pluses = '+' * scale(adds)
2775 pluses = '+' * scale(adds)
2769 minuses = '-' * scale(removes)
2776 minuses = '-' * scale(removes)
2770 output.append(' %s%s | %*s %s%s\n' %
2777 output.append(' %s%s | %*s %s%s\n' %
2771 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2778 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2772 countwidth, count, pluses, minuses))
2779 countwidth, count, pluses, minuses))
2773
2780
2774 if stats:
2781 if stats:
2775 output.append(_(' %d files changed, %d insertions(+), '
2782 output.append(_(' %d files changed, %d insertions(+), '
2776 '%d deletions(-)\n')
2783 '%d deletions(-)\n')
2777 % (len(stats), totaladds, totalremoves))
2784 % (len(stats), totaladds, totalremoves))
2778
2785
2779 return ''.join(output)
2786 return ''.join(output)
2780
2787
2781 def diffstatui(*args, **kw):
2788 def diffstatui(*args, **kw):
2782 '''like diffstat(), but yields 2-tuples of (output, label) for
2789 '''like diffstat(), but yields 2-tuples of (output, label) for
2783 ui.write()
2790 ui.write()
2784 '''
2791 '''
2785
2792
2786 for line in diffstat(*args, **kw).splitlines():
2793 for line in diffstat(*args, **kw).splitlines():
2787 if line and line[-1] in '+-':
2794 if line and line[-1] in '+-':
2788 name, graph = line.rsplit(' ', 1)
2795 name, graph = line.rsplit(' ', 1)
2789 yield (name + ' ', '')
2796 yield (name + ' ', '')
2790 m = re.search(br'\++', graph)
2797 m = re.search(br'\++', graph)
2791 if m:
2798 if m:
2792 yield (m.group(0), 'diffstat.inserted')
2799 yield (m.group(0), 'diffstat.inserted')
2793 m = re.search(br'-+', graph)
2800 m = re.search(br'-+', graph)
2794 if m:
2801 if m:
2795 yield (m.group(0), 'diffstat.deleted')
2802 yield (m.group(0), 'diffstat.deleted')
2796 else:
2803 else:
2797 yield (line, '')
2804 yield (line, '')
2798 yield ('\n', '')
2805 yield ('\n', '')
General Comments 0
You need to be logged in to leave comments. Login now