##// END OF EJS Templates
patch: add within-line color diff capacity...
Matthieu Laneuville -
r35278:6ba79cf3 default
parent child Browse files
Show More
@@ -1,3972 +1,3972 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import itertools
11 import itertools
12 import os
12 import os
13 import re
13 import re
14 import tempfile
14 import tempfile
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 hex,
18 hex,
19 nullid,
19 nullid,
20 nullrev,
20 nullrev,
21 short,
21 short,
22 )
22 )
23
23
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 changelog,
26 changelog,
27 copies,
27 copies,
28 crecord as crecordmod,
28 crecord as crecordmod,
29 dagop,
29 dagop,
30 dirstateguard,
30 dirstateguard,
31 encoding,
31 encoding,
32 error,
32 error,
33 formatter,
33 formatter,
34 graphmod,
34 graphmod,
35 match as matchmod,
35 match as matchmod,
36 mdiff,
36 mdiff,
37 obsolete,
37 obsolete,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 pycompat,
40 pycompat,
41 registrar,
41 registrar,
42 revlog,
42 revlog,
43 revset,
43 revset,
44 scmutil,
44 scmutil,
45 smartset,
45 smartset,
46 templatekw,
46 templatekw,
47 templater,
47 templater,
48 util,
48 util,
49 vfs as vfsmod,
49 vfs as vfsmod,
50 )
50 )
51 stringio = util.stringio
51 stringio = util.stringio
52
52
53 # templates of common command options
53 # templates of common command options
54
54
55 dryrunopts = [
55 dryrunopts = [
56 ('n', 'dry-run', None,
56 ('n', 'dry-run', None,
57 _('do not perform actions, just print output')),
57 _('do not perform actions, just print output')),
58 ]
58 ]
59
59
60 remoteopts = [
60 remoteopts = [
61 ('e', 'ssh', '',
61 ('e', 'ssh', '',
62 _('specify ssh command to use'), _('CMD')),
62 _('specify ssh command to use'), _('CMD')),
63 ('', 'remotecmd', '',
63 ('', 'remotecmd', '',
64 _('specify hg command to run on the remote side'), _('CMD')),
64 _('specify hg command to run on the remote side'), _('CMD')),
65 ('', 'insecure', None,
65 ('', 'insecure', None,
66 _('do not verify server certificate (ignoring web.cacerts config)')),
66 _('do not verify server certificate (ignoring web.cacerts config)')),
67 ]
67 ]
68
68
69 walkopts = [
69 walkopts = [
70 ('I', 'include', [],
70 ('I', 'include', [],
71 _('include names matching the given patterns'), _('PATTERN')),
71 _('include names matching the given patterns'), _('PATTERN')),
72 ('X', 'exclude', [],
72 ('X', 'exclude', [],
73 _('exclude names matching the given patterns'), _('PATTERN')),
73 _('exclude names matching the given patterns'), _('PATTERN')),
74 ]
74 ]
75
75
76 commitopts = [
76 commitopts = [
77 ('m', 'message', '',
77 ('m', 'message', '',
78 _('use text as commit message'), _('TEXT')),
78 _('use text as commit message'), _('TEXT')),
79 ('l', 'logfile', '',
79 ('l', 'logfile', '',
80 _('read commit message from file'), _('FILE')),
80 _('read commit message from file'), _('FILE')),
81 ]
81 ]
82
82
83 commitopts2 = [
83 commitopts2 = [
84 ('d', 'date', '',
84 ('d', 'date', '',
85 _('record the specified date as commit date'), _('DATE')),
85 _('record the specified date as commit date'), _('DATE')),
86 ('u', 'user', '',
86 ('u', 'user', '',
87 _('record the specified user as committer'), _('USER')),
87 _('record the specified user as committer'), _('USER')),
88 ]
88 ]
89
89
90 # hidden for now
90 # hidden for now
91 formatteropts = [
91 formatteropts = [
92 ('T', 'template', '',
92 ('T', 'template', '',
93 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
93 _('display with template (EXPERIMENTAL)'), _('TEMPLATE')),
94 ]
94 ]
95
95
96 templateopts = [
96 templateopts = [
97 ('', 'style', '',
97 ('', 'style', '',
98 _('display using template map file (DEPRECATED)'), _('STYLE')),
98 _('display using template map file (DEPRECATED)'), _('STYLE')),
99 ('T', 'template', '',
99 ('T', 'template', '',
100 _('display with template'), _('TEMPLATE')),
100 _('display with template'), _('TEMPLATE')),
101 ]
101 ]
102
102
103 logopts = [
103 logopts = [
104 ('p', 'patch', None, _('show patch')),
104 ('p', 'patch', None, _('show patch')),
105 ('g', 'git', None, _('use git extended diff format')),
105 ('g', 'git', None, _('use git extended diff format')),
106 ('l', 'limit', '',
106 ('l', 'limit', '',
107 _('limit number of changes displayed'), _('NUM')),
107 _('limit number of changes displayed'), _('NUM')),
108 ('M', 'no-merges', None, _('do not show merges')),
108 ('M', 'no-merges', None, _('do not show merges')),
109 ('', 'stat', None, _('output diffstat-style summary of changes')),
109 ('', 'stat', None, _('output diffstat-style summary of changes')),
110 ('G', 'graph', None, _("show the revision DAG")),
110 ('G', 'graph', None, _("show the revision DAG")),
111 ] + templateopts
111 ] + templateopts
112
112
113 diffopts = [
113 diffopts = [
114 ('a', 'text', None, _('treat all files as text')),
114 ('a', 'text', None, _('treat all files as text')),
115 ('g', 'git', None, _('use git extended diff format')),
115 ('g', 'git', None, _('use git extended diff format')),
116 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
116 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
117 ('', 'nodates', None, _('omit dates from diff headers'))
117 ('', 'nodates', None, _('omit dates from diff headers'))
118 ]
118 ]
119
119
120 diffwsopts = [
120 diffwsopts = [
121 ('w', 'ignore-all-space', None,
121 ('w', 'ignore-all-space', None,
122 _('ignore white space when comparing lines')),
122 _('ignore white space when comparing lines')),
123 ('b', 'ignore-space-change', None,
123 ('b', 'ignore-space-change', None,
124 _('ignore changes in the amount of white space')),
124 _('ignore changes in the amount of white space')),
125 ('B', 'ignore-blank-lines', None,
125 ('B', 'ignore-blank-lines', None,
126 _('ignore changes whose lines are all blank')),
126 _('ignore changes whose lines are all blank')),
127 ('Z', 'ignore-space-at-eol', None,
127 ('Z', 'ignore-space-at-eol', None,
128 _('ignore changes in whitespace at EOL')),
128 _('ignore changes in whitespace at EOL')),
129 ]
129 ]
130
130
131 diffopts2 = [
131 diffopts2 = [
132 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
132 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
133 ('p', 'show-function', None, _('show which function each change is in')),
133 ('p', 'show-function', None, _('show which function each change is in')),
134 ('', 'reverse', None, _('produce a diff that undoes the changes')),
134 ('', 'reverse', None, _('produce a diff that undoes the changes')),
135 ] + diffwsopts + [
135 ] + diffwsopts + [
136 ('U', 'unified', '',
136 ('U', 'unified', '',
137 _('number of lines of context to show'), _('NUM')),
137 _('number of lines of context to show'), _('NUM')),
138 ('', 'stat', None, _('output diffstat-style summary of changes')),
138 ('', 'stat', None, _('output diffstat-style summary of changes')),
139 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
139 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
140 ]
140 ]
141
141
142 mergetoolopts = [
142 mergetoolopts = [
143 ('t', 'tool', '', _('specify merge tool')),
143 ('t', 'tool', '', _('specify merge tool')),
144 ]
144 ]
145
145
146 similarityopts = [
146 similarityopts = [
147 ('s', 'similarity', '',
147 ('s', 'similarity', '',
148 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
148 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
149 ]
149 ]
150
150
151 subrepoopts = [
151 subrepoopts = [
152 ('S', 'subrepos', None,
152 ('S', 'subrepos', None,
153 _('recurse into subrepositories'))
153 _('recurse into subrepositories'))
154 ]
154 ]
155
155
156 debugrevlogopts = [
156 debugrevlogopts = [
157 ('c', 'changelog', False, _('open changelog')),
157 ('c', 'changelog', False, _('open changelog')),
158 ('m', 'manifest', False, _('open manifest')),
158 ('m', 'manifest', False, _('open manifest')),
159 ('', 'dir', '', _('open directory manifest')),
159 ('', 'dir', '', _('open directory manifest')),
160 ]
160 ]
161
161
162 # special string such that everything below this line will be ingored in the
162 # special string such that everything below this line will be ingored in the
163 # editor text
163 # editor text
164 _linebelow = "^HG: ------------------------ >8 ------------------------$"
164 _linebelow = "^HG: ------------------------ >8 ------------------------$"
165
165
166 def ishunk(x):
166 def ishunk(x):
167 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
167 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
168 return isinstance(x, hunkclasses)
168 return isinstance(x, hunkclasses)
169
169
170 def newandmodified(chunks, originalchunks):
170 def newandmodified(chunks, originalchunks):
171 newlyaddedandmodifiedfiles = set()
171 newlyaddedandmodifiedfiles = set()
172 for chunk in chunks:
172 for chunk in chunks:
173 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
173 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
174 originalchunks:
174 originalchunks:
175 newlyaddedandmodifiedfiles.add(chunk.header.filename())
175 newlyaddedandmodifiedfiles.add(chunk.header.filename())
176 return newlyaddedandmodifiedfiles
176 return newlyaddedandmodifiedfiles
177
177
178 def parsealiases(cmd):
178 def parsealiases(cmd):
179 return cmd.lstrip("^").split("|")
179 return cmd.lstrip("^").split("|")
180
180
181 def setupwrapcolorwrite(ui):
181 def setupwrapcolorwrite(ui):
182 # wrap ui.write so diff output can be labeled/colorized
182 # wrap ui.write so diff output can be labeled/colorized
183 def wrapwrite(orig, *args, **kw):
183 def wrapwrite(orig, *args, **kw):
184 label = kw.pop('label', '')
184 label = kw.pop('label', '')
185 for chunk, l in patch.difflabel(lambda: args):
185 for chunk, l in patch.difflabel(lambda: args):
186 orig(chunk, label=label + l)
186 orig(chunk, label=label + l)
187
187
188 oldwrite = ui.write
188 oldwrite = ui.write
189 def wrap(*args, **kwargs):
189 def wrap(*args, **kwargs):
190 return wrapwrite(oldwrite, *args, **kwargs)
190 return wrapwrite(oldwrite, *args, **kwargs)
191 setattr(ui, 'write', wrap)
191 setattr(ui, 'write', wrap)
192 return oldwrite
192 return oldwrite
193
193
194 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
194 def filterchunks(ui, originalhunks, usecurses, testfile, operation=None):
195 if usecurses:
195 if usecurses:
196 if testfile:
196 if testfile:
197 recordfn = crecordmod.testdecorator(testfile,
197 recordfn = crecordmod.testdecorator(testfile,
198 crecordmod.testchunkselector)
198 crecordmod.testchunkselector)
199 else:
199 else:
200 recordfn = crecordmod.chunkselector
200 recordfn = crecordmod.chunkselector
201
201
202 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
202 return crecordmod.filterpatch(ui, originalhunks, recordfn, operation)
203
203
204 else:
204 else:
205 return patch.filterpatch(ui, originalhunks, operation)
205 return patch.filterpatch(ui, originalhunks, operation)
206
206
207 def recordfilter(ui, originalhunks, operation=None):
207 def recordfilter(ui, originalhunks, operation=None):
208 """ Prompts the user to filter the originalhunks and return a list of
208 """ Prompts the user to filter the originalhunks and return a list of
209 selected hunks.
209 selected hunks.
210 *operation* is used for to build ui messages to indicate the user what
210 *operation* is used for to build ui messages to indicate the user what
211 kind of filtering they are doing: reverting, committing, shelving, etc.
211 kind of filtering they are doing: reverting, committing, shelving, etc.
212 (see patch.filterpatch).
212 (see patch.filterpatch).
213 """
213 """
214 usecurses = crecordmod.checkcurses(ui)
214 usecurses = crecordmod.checkcurses(ui)
215 testfile = ui.config('experimental', 'crecordtest')
215 testfile = ui.config('experimental', 'crecordtest')
216 oldwrite = setupwrapcolorwrite(ui)
216 oldwrite = setupwrapcolorwrite(ui)
217 try:
217 try:
218 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
218 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
219 testfile, operation)
219 testfile, operation)
220 finally:
220 finally:
221 ui.write = oldwrite
221 ui.write = oldwrite
222 return newchunks, newopts
222 return newchunks, newopts
223
223
224 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
224 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
225 filterfn, *pats, **opts):
225 filterfn, *pats, **opts):
226 from . import merge as mergemod
226 from . import merge as mergemod
227 opts = pycompat.byteskwargs(opts)
227 opts = pycompat.byteskwargs(opts)
228 if not ui.interactive():
228 if not ui.interactive():
229 if cmdsuggest:
229 if cmdsuggest:
230 msg = _('running non-interactively, use %s instead') % cmdsuggest
230 msg = _('running non-interactively, use %s instead') % cmdsuggest
231 else:
231 else:
232 msg = _('running non-interactively')
232 msg = _('running non-interactively')
233 raise error.Abort(msg)
233 raise error.Abort(msg)
234
234
235 # make sure username is set before going interactive
235 # make sure username is set before going interactive
236 if not opts.get('user'):
236 if not opts.get('user'):
237 ui.username() # raise exception, username not provided
237 ui.username() # raise exception, username not provided
238
238
239 def recordfunc(ui, repo, message, match, opts):
239 def recordfunc(ui, repo, message, match, opts):
240 """This is generic record driver.
240 """This is generic record driver.
241
241
242 Its job is to interactively filter local changes, and
242 Its job is to interactively filter local changes, and
243 accordingly prepare working directory into a state in which the
243 accordingly prepare working directory into a state in which the
244 job can be delegated to a non-interactive commit command such as
244 job can be delegated to a non-interactive commit command such as
245 'commit' or 'qrefresh'.
245 'commit' or 'qrefresh'.
246
246
247 After the actual job is done by non-interactive command, the
247 After the actual job is done by non-interactive command, the
248 working directory is restored to its original state.
248 working directory is restored to its original state.
249
249
250 In the end we'll record interesting changes, and everything else
250 In the end we'll record interesting changes, and everything else
251 will be left in place, so the user can continue working.
251 will be left in place, so the user can continue working.
252 """
252 """
253
253
254 checkunfinished(repo, commit=True)
254 checkunfinished(repo, commit=True)
255 wctx = repo[None]
255 wctx = repo[None]
256 merge = len(wctx.parents()) > 1
256 merge = len(wctx.parents()) > 1
257 if merge:
257 if merge:
258 raise error.Abort(_('cannot partially commit a merge '
258 raise error.Abort(_('cannot partially commit a merge '
259 '(use "hg commit" instead)'))
259 '(use "hg commit" instead)'))
260
260
261 def fail(f, msg):
261 def fail(f, msg):
262 raise error.Abort('%s: %s' % (f, msg))
262 raise error.Abort('%s: %s' % (f, msg))
263
263
264 force = opts.get('force')
264 force = opts.get('force')
265 if not force:
265 if not force:
266 vdirs = []
266 vdirs = []
267 match.explicitdir = vdirs.append
267 match.explicitdir = vdirs.append
268 match.bad = fail
268 match.bad = fail
269
269
270 status = repo.status(match=match)
270 status = repo.status(match=match)
271 if not force:
271 if not force:
272 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
272 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
273 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
273 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
274 diffopts.nodates = True
274 diffopts.nodates = True
275 diffopts.git = True
275 diffopts.git = True
276 diffopts.showfunc = True
276 diffopts.showfunc = True
277 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
277 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
278 originalchunks = patch.parsepatch(originaldiff)
278 originalchunks = patch.parsepatch(originaldiff)
279
279
280 # 1. filter patch, since we are intending to apply subset of it
280 # 1. filter patch, since we are intending to apply subset of it
281 try:
281 try:
282 chunks, newopts = filterfn(ui, originalchunks)
282 chunks, newopts = filterfn(ui, originalchunks)
283 except error.PatchError as err:
283 except error.PatchError as err:
284 raise error.Abort(_('error parsing patch: %s') % err)
284 raise error.Abort(_('error parsing patch: %s') % err)
285 opts.update(newopts)
285 opts.update(newopts)
286
286
287 # We need to keep a backup of files that have been newly added and
287 # We need to keep a backup of files that have been newly added and
288 # modified during the recording process because there is a previous
288 # modified during the recording process because there is a previous
289 # version without the edit in the workdir
289 # version without the edit in the workdir
290 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
290 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
291 contenders = set()
291 contenders = set()
292 for h in chunks:
292 for h in chunks:
293 try:
293 try:
294 contenders.update(set(h.files()))
294 contenders.update(set(h.files()))
295 except AttributeError:
295 except AttributeError:
296 pass
296 pass
297
297
298 changed = status.modified + status.added + status.removed
298 changed = status.modified + status.added + status.removed
299 newfiles = [f for f in changed if f in contenders]
299 newfiles = [f for f in changed if f in contenders]
300 if not newfiles:
300 if not newfiles:
301 ui.status(_('no changes to record\n'))
301 ui.status(_('no changes to record\n'))
302 return 0
302 return 0
303
303
304 modified = set(status.modified)
304 modified = set(status.modified)
305
305
306 # 2. backup changed files, so we can restore them in the end
306 # 2. backup changed files, so we can restore them in the end
307
307
308 if backupall:
308 if backupall:
309 tobackup = changed
309 tobackup = changed
310 else:
310 else:
311 tobackup = [f for f in newfiles if f in modified or f in \
311 tobackup = [f for f in newfiles if f in modified or f in \
312 newlyaddedandmodifiedfiles]
312 newlyaddedandmodifiedfiles]
313 backups = {}
313 backups = {}
314 if tobackup:
314 if tobackup:
315 backupdir = repo.vfs.join('record-backups')
315 backupdir = repo.vfs.join('record-backups')
316 try:
316 try:
317 os.mkdir(backupdir)
317 os.mkdir(backupdir)
318 except OSError as err:
318 except OSError as err:
319 if err.errno != errno.EEXIST:
319 if err.errno != errno.EEXIST:
320 raise
320 raise
321 try:
321 try:
322 # backup continues
322 # backup continues
323 for f in tobackup:
323 for f in tobackup:
324 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
324 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
325 dir=backupdir)
325 dir=backupdir)
326 os.close(fd)
326 os.close(fd)
327 ui.debug('backup %r as %r\n' % (f, tmpname))
327 ui.debug('backup %r as %r\n' % (f, tmpname))
328 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
328 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
329 backups[f] = tmpname
329 backups[f] = tmpname
330
330
331 fp = stringio()
331 fp = stringio()
332 for c in chunks:
332 for c in chunks:
333 fname = c.filename()
333 fname = c.filename()
334 if fname in backups:
334 if fname in backups:
335 c.write(fp)
335 c.write(fp)
336 dopatch = fp.tell()
336 dopatch = fp.tell()
337 fp.seek(0)
337 fp.seek(0)
338
338
339 # 2.5 optionally review / modify patch in text editor
339 # 2.5 optionally review / modify patch in text editor
340 if opts.get('review', False):
340 if opts.get('review', False):
341 patchtext = (crecordmod.diffhelptext
341 patchtext = (crecordmod.diffhelptext
342 + crecordmod.patchhelptext
342 + crecordmod.patchhelptext
343 + fp.read())
343 + fp.read())
344 reviewedpatch = ui.edit(patchtext, "",
344 reviewedpatch = ui.edit(patchtext, "",
345 action="diff",
345 action="diff",
346 repopath=repo.path)
346 repopath=repo.path)
347 fp.truncate(0)
347 fp.truncate(0)
348 fp.write(reviewedpatch)
348 fp.write(reviewedpatch)
349 fp.seek(0)
349 fp.seek(0)
350
350
351 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
351 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
352 # 3a. apply filtered patch to clean repo (clean)
352 # 3a. apply filtered patch to clean repo (clean)
353 if backups:
353 if backups:
354 # Equivalent to hg.revert
354 # Equivalent to hg.revert
355 m = scmutil.matchfiles(repo, backups.keys())
355 m = scmutil.matchfiles(repo, backups.keys())
356 mergemod.update(repo, repo.dirstate.p1(),
356 mergemod.update(repo, repo.dirstate.p1(),
357 False, True, matcher=m)
357 False, True, matcher=m)
358
358
359 # 3b. (apply)
359 # 3b. (apply)
360 if dopatch:
360 if dopatch:
361 try:
361 try:
362 ui.debug('applying patch\n')
362 ui.debug('applying patch\n')
363 ui.debug(fp.getvalue())
363 ui.debug(fp.getvalue())
364 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
364 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
365 except error.PatchError as err:
365 except error.PatchError as err:
366 raise error.Abort(str(err))
366 raise error.Abort(str(err))
367 del fp
367 del fp
368
368
369 # 4. We prepared working directory according to filtered
369 # 4. We prepared working directory according to filtered
370 # patch. Now is the time to delegate the job to
370 # patch. Now is the time to delegate the job to
371 # commit/qrefresh or the like!
371 # commit/qrefresh or the like!
372
372
373 # Make all of the pathnames absolute.
373 # Make all of the pathnames absolute.
374 newfiles = [repo.wjoin(nf) for nf in newfiles]
374 newfiles = [repo.wjoin(nf) for nf in newfiles]
375 return commitfunc(ui, repo, *newfiles, **opts)
375 return commitfunc(ui, repo, *newfiles, **opts)
376 finally:
376 finally:
377 # 5. finally restore backed-up files
377 # 5. finally restore backed-up files
378 try:
378 try:
379 dirstate = repo.dirstate
379 dirstate = repo.dirstate
380 for realname, tmpname in backups.iteritems():
380 for realname, tmpname in backups.iteritems():
381 ui.debug('restoring %r to %r\n' % (tmpname, realname))
381 ui.debug('restoring %r to %r\n' % (tmpname, realname))
382
382
383 if dirstate[realname] == 'n':
383 if dirstate[realname] == 'n':
384 # without normallookup, restoring timestamp
384 # without normallookup, restoring timestamp
385 # may cause partially committed files
385 # may cause partially committed files
386 # to be treated as unmodified
386 # to be treated as unmodified
387 dirstate.normallookup(realname)
387 dirstate.normallookup(realname)
388
388
389 # copystat=True here and above are a hack to trick any
389 # copystat=True here and above are a hack to trick any
390 # editors that have f open that we haven't modified them.
390 # editors that have f open that we haven't modified them.
391 #
391 #
392 # Also note that this racy as an editor could notice the
392 # Also note that this racy as an editor could notice the
393 # file's mtime before we've finished writing it.
393 # file's mtime before we've finished writing it.
394 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
394 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
395 os.unlink(tmpname)
395 os.unlink(tmpname)
396 if tobackup:
396 if tobackup:
397 os.rmdir(backupdir)
397 os.rmdir(backupdir)
398 except OSError:
398 except OSError:
399 pass
399 pass
400
400
401 def recordinwlock(ui, repo, message, match, opts):
401 def recordinwlock(ui, repo, message, match, opts):
402 with repo.wlock():
402 with repo.wlock():
403 return recordfunc(ui, repo, message, match, opts)
403 return recordfunc(ui, repo, message, match, opts)
404
404
405 return commit(ui, repo, recordinwlock, pats, opts)
405 return commit(ui, repo, recordinwlock, pats, opts)
406
406
407 class dirnode(object):
407 class dirnode(object):
408 """
408 """
409 Represent a directory in user working copy with information required for
409 Represent a directory in user working copy with information required for
410 the purpose of tersing its status.
410 the purpose of tersing its status.
411
411
412 path is the path to the directory
412 path is the path to the directory
413
413
414 statuses is a set of statuses of all files in this directory (this includes
414 statuses is a set of statuses of all files in this directory (this includes
415 all the files in all the subdirectories too)
415 all the files in all the subdirectories too)
416
416
417 files is a list of files which are direct child of this directory
417 files is a list of files which are direct child of this directory
418
418
419 subdirs is a dictionary of sub-directory name as the key and it's own
419 subdirs is a dictionary of sub-directory name as the key and it's own
420 dirnode object as the value
420 dirnode object as the value
421 """
421 """
422
422
423 def __init__(self, dirpath):
423 def __init__(self, dirpath):
424 self.path = dirpath
424 self.path = dirpath
425 self.statuses = set([])
425 self.statuses = set([])
426 self.files = []
426 self.files = []
427 self.subdirs = {}
427 self.subdirs = {}
428
428
429 def _addfileindir(self, filename, status):
429 def _addfileindir(self, filename, status):
430 """Add a file in this directory as a direct child."""
430 """Add a file in this directory as a direct child."""
431 self.files.append((filename, status))
431 self.files.append((filename, status))
432
432
433 def addfile(self, filename, status):
433 def addfile(self, filename, status):
434 """
434 """
435 Add a file to this directory or to its direct parent directory.
435 Add a file to this directory or to its direct parent directory.
436
436
437 If the file is not direct child of this directory, we traverse to the
437 If the file is not direct child of this directory, we traverse to the
438 directory of which this file is a direct child of and add the file
438 directory of which this file is a direct child of and add the file
439 there.
439 there.
440 """
440 """
441
441
442 # the filename contains a path separator, it means it's not the direct
442 # the filename contains a path separator, it means it's not the direct
443 # child of this directory
443 # child of this directory
444 if '/' in filename:
444 if '/' in filename:
445 subdir, filep = filename.split('/', 1)
445 subdir, filep = filename.split('/', 1)
446
446
447 # does the dirnode object for subdir exists
447 # does the dirnode object for subdir exists
448 if subdir not in self.subdirs:
448 if subdir not in self.subdirs:
449 subdirpath = os.path.join(self.path, subdir)
449 subdirpath = os.path.join(self.path, subdir)
450 self.subdirs[subdir] = dirnode(subdirpath)
450 self.subdirs[subdir] = dirnode(subdirpath)
451
451
452 # try adding the file in subdir
452 # try adding the file in subdir
453 self.subdirs[subdir].addfile(filep, status)
453 self.subdirs[subdir].addfile(filep, status)
454
454
455 else:
455 else:
456 self._addfileindir(filename, status)
456 self._addfileindir(filename, status)
457
457
458 if status not in self.statuses:
458 if status not in self.statuses:
459 self.statuses.add(status)
459 self.statuses.add(status)
460
460
461 def iterfilepaths(self):
461 def iterfilepaths(self):
462 """Yield (status, path) for files directly under this directory."""
462 """Yield (status, path) for files directly under this directory."""
463 for f, st in self.files:
463 for f, st in self.files:
464 yield st, os.path.join(self.path, f)
464 yield st, os.path.join(self.path, f)
465
465
466 def tersewalk(self, terseargs):
466 def tersewalk(self, terseargs):
467 """
467 """
468 Yield (status, path) obtained by processing the status of this
468 Yield (status, path) obtained by processing the status of this
469 dirnode.
469 dirnode.
470
470
471 terseargs is the string of arguments passed by the user with `--terse`
471 terseargs is the string of arguments passed by the user with `--terse`
472 flag.
472 flag.
473
473
474 Following are the cases which can happen:
474 Following are the cases which can happen:
475
475
476 1) All the files in the directory (including all the files in its
476 1) All the files in the directory (including all the files in its
477 subdirectories) share the same status and the user has asked us to terse
477 subdirectories) share the same status and the user has asked us to terse
478 that status. -> yield (status, dirpath)
478 that status. -> yield (status, dirpath)
479
479
480 2) Otherwise, we do following:
480 2) Otherwise, we do following:
481
481
482 a) Yield (status, filepath) for all the files which are in this
482 a) Yield (status, filepath) for all the files which are in this
483 directory (only the ones in this directory, not the subdirs)
483 directory (only the ones in this directory, not the subdirs)
484
484
485 b) Recurse the function on all the subdirectories of this
485 b) Recurse the function on all the subdirectories of this
486 directory
486 directory
487 """
487 """
488
488
489 if len(self.statuses) == 1:
489 if len(self.statuses) == 1:
490 onlyst = self.statuses.pop()
490 onlyst = self.statuses.pop()
491
491
492 # Making sure we terse only when the status abbreviation is
492 # Making sure we terse only when the status abbreviation is
493 # passed as terse argument
493 # passed as terse argument
494 if onlyst in terseargs:
494 if onlyst in terseargs:
495 yield onlyst, self.path + pycompat.ossep
495 yield onlyst, self.path + pycompat.ossep
496 return
496 return
497
497
498 # add the files to status list
498 # add the files to status list
499 for st, fpath in self.iterfilepaths():
499 for st, fpath in self.iterfilepaths():
500 yield st, fpath
500 yield st, fpath
501
501
502 #recurse on the subdirs
502 #recurse on the subdirs
503 for dirobj in self.subdirs.values():
503 for dirobj in self.subdirs.values():
504 for st, fpath in dirobj.tersewalk(terseargs):
504 for st, fpath in dirobj.tersewalk(terseargs):
505 yield st, fpath
505 yield st, fpath
506
506
507 def tersedir(statuslist, terseargs):
507 def tersedir(statuslist, terseargs):
508 """
508 """
509 Terse the status if all the files in a directory shares the same status.
509 Terse the status if all the files in a directory shares the same status.
510
510
511 statuslist is scmutil.status() object which contains a list of files for
511 statuslist is scmutil.status() object which contains a list of files for
512 each status.
512 each status.
513 terseargs is string which is passed by the user as the argument to `--terse`
513 terseargs is string which is passed by the user as the argument to `--terse`
514 flag.
514 flag.
515
515
516 The function makes a tree of objects of dirnode class, and at each node it
516 The function makes a tree of objects of dirnode class, and at each node it
517 stores the information required to know whether we can terse a certain
517 stores the information required to know whether we can terse a certain
518 directory or not.
518 directory or not.
519 """
519 """
520 # the order matters here as that is used to produce final list
520 # the order matters here as that is used to produce final list
521 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
521 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
522
522
523 # checking the argument validity
523 # checking the argument validity
524 for s in pycompat.bytestr(terseargs):
524 for s in pycompat.bytestr(terseargs):
525 if s not in allst:
525 if s not in allst:
526 raise error.Abort(_("'%s' not recognized") % s)
526 raise error.Abort(_("'%s' not recognized") % s)
527
527
528 # creating a dirnode object for the root of the repo
528 # creating a dirnode object for the root of the repo
529 rootobj = dirnode('')
529 rootobj = dirnode('')
530 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
530 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
531 'ignored', 'removed')
531 'ignored', 'removed')
532
532
533 tersedict = {}
533 tersedict = {}
534 for attrname in pstatus:
534 for attrname in pstatus:
535 statuschar = attrname[0:1]
535 statuschar = attrname[0:1]
536 for f in getattr(statuslist, attrname):
536 for f in getattr(statuslist, attrname):
537 rootobj.addfile(f, statuschar)
537 rootobj.addfile(f, statuschar)
538 tersedict[statuschar] = []
538 tersedict[statuschar] = []
539
539
540 # we won't be tersing the root dir, so add files in it
540 # we won't be tersing the root dir, so add files in it
541 for st, fpath in rootobj.iterfilepaths():
541 for st, fpath in rootobj.iterfilepaths():
542 tersedict[st].append(fpath)
542 tersedict[st].append(fpath)
543
543
544 # process each sub-directory and build tersedict
544 # process each sub-directory and build tersedict
545 for subdir in rootobj.subdirs.values():
545 for subdir in rootobj.subdirs.values():
546 for st, f in subdir.tersewalk(terseargs):
546 for st, f in subdir.tersewalk(terseargs):
547 tersedict[st].append(f)
547 tersedict[st].append(f)
548
548
549 tersedlist = []
549 tersedlist = []
550 for st in allst:
550 for st in allst:
551 tersedict[st].sort()
551 tersedict[st].sort()
552 tersedlist.append(tersedict[st])
552 tersedlist.append(tersedict[st])
553
553
554 return tersedlist
554 return tersedlist
555
555
556 def _commentlines(raw):
556 def _commentlines(raw):
557 '''Surround lineswith a comment char and a new line'''
557 '''Surround lineswith a comment char and a new line'''
558 lines = raw.splitlines()
558 lines = raw.splitlines()
559 commentedlines = ['# %s' % line for line in lines]
559 commentedlines = ['# %s' % line for line in lines]
560 return '\n'.join(commentedlines) + '\n'
560 return '\n'.join(commentedlines) + '\n'
561
561
562 def _conflictsmsg(repo):
562 def _conflictsmsg(repo):
563 # avoid merge cycle
563 # avoid merge cycle
564 from . import merge as mergemod
564 from . import merge as mergemod
565 mergestate = mergemod.mergestate.read(repo)
565 mergestate = mergemod.mergestate.read(repo)
566 if not mergestate.active():
566 if not mergestate.active():
567 return
567 return
568
568
569 m = scmutil.match(repo[None])
569 m = scmutil.match(repo[None])
570 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
570 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
571 if unresolvedlist:
571 if unresolvedlist:
572 mergeliststr = '\n'.join(
572 mergeliststr = '\n'.join(
573 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
573 [' %s' % util.pathto(repo.root, pycompat.getcwd(), path)
574 for path in unresolvedlist])
574 for path in unresolvedlist])
575 msg = _('''Unresolved merge conflicts:
575 msg = _('''Unresolved merge conflicts:
576
576
577 %s
577 %s
578
578
579 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
579 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
580 else:
580 else:
581 msg = _('No unresolved merge conflicts.')
581 msg = _('No unresolved merge conflicts.')
582
582
583 return _commentlines(msg)
583 return _commentlines(msg)
584
584
585 def _helpmessage(continuecmd, abortcmd):
585 def _helpmessage(continuecmd, abortcmd):
586 msg = _('To continue: %s\n'
586 msg = _('To continue: %s\n'
587 'To abort: %s') % (continuecmd, abortcmd)
587 'To abort: %s') % (continuecmd, abortcmd)
588 return _commentlines(msg)
588 return _commentlines(msg)
589
589
590 def _rebasemsg():
590 def _rebasemsg():
591 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
591 return _helpmessage('hg rebase --continue', 'hg rebase --abort')
592
592
593 def _histeditmsg():
593 def _histeditmsg():
594 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
594 return _helpmessage('hg histedit --continue', 'hg histedit --abort')
595
595
596 def _unshelvemsg():
596 def _unshelvemsg():
597 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
597 return _helpmessage('hg unshelve --continue', 'hg unshelve --abort')
598
598
599 def _updatecleanmsg(dest=None):
599 def _updatecleanmsg(dest=None):
600 warning = _('warning: this will discard uncommitted changes')
600 warning = _('warning: this will discard uncommitted changes')
601 return 'hg update --clean %s (%s)' % (dest or '.', warning)
601 return 'hg update --clean %s (%s)' % (dest or '.', warning)
602
602
603 def _graftmsg():
603 def _graftmsg():
604 # tweakdefaults requires `update` to have a rev hence the `.`
604 # tweakdefaults requires `update` to have a rev hence the `.`
605 return _helpmessage('hg graft --continue', _updatecleanmsg())
605 return _helpmessage('hg graft --continue', _updatecleanmsg())
606
606
607 def _mergemsg():
607 def _mergemsg():
608 # tweakdefaults requires `update` to have a rev hence the `.`
608 # tweakdefaults requires `update` to have a rev hence the `.`
609 return _helpmessage('hg commit', _updatecleanmsg())
609 return _helpmessage('hg commit', _updatecleanmsg())
610
610
611 def _bisectmsg():
611 def _bisectmsg():
612 msg = _('To mark the changeset good: hg bisect --good\n'
612 msg = _('To mark the changeset good: hg bisect --good\n'
613 'To mark the changeset bad: hg bisect --bad\n'
613 'To mark the changeset bad: hg bisect --bad\n'
614 'To abort: hg bisect --reset\n')
614 'To abort: hg bisect --reset\n')
615 return _commentlines(msg)
615 return _commentlines(msg)
616
616
617 def fileexistspredicate(filename):
617 def fileexistspredicate(filename):
618 return lambda repo: repo.vfs.exists(filename)
618 return lambda repo: repo.vfs.exists(filename)
619
619
620 def _mergepredicate(repo):
620 def _mergepredicate(repo):
621 return len(repo[None].parents()) > 1
621 return len(repo[None].parents()) > 1
622
622
623 STATES = (
623 STATES = (
624 # (state, predicate to detect states, helpful message function)
624 # (state, predicate to detect states, helpful message function)
625 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
625 ('histedit', fileexistspredicate('histedit-state'), _histeditmsg),
626 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
626 ('bisect', fileexistspredicate('bisect.state'), _bisectmsg),
627 ('graft', fileexistspredicate('graftstate'), _graftmsg),
627 ('graft', fileexistspredicate('graftstate'), _graftmsg),
628 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
628 ('unshelve', fileexistspredicate('unshelverebasestate'), _unshelvemsg),
629 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
629 ('rebase', fileexistspredicate('rebasestate'), _rebasemsg),
630 # The merge state is part of a list that will be iterated over.
630 # The merge state is part of a list that will be iterated over.
631 # They need to be last because some of the other unfinished states may also
631 # They need to be last because some of the other unfinished states may also
632 # be in a merge or update state (eg. rebase, histedit, graft, etc).
632 # be in a merge or update state (eg. rebase, histedit, graft, etc).
633 # We want those to have priority.
633 # We want those to have priority.
634 ('merge', _mergepredicate, _mergemsg),
634 ('merge', _mergepredicate, _mergemsg),
635 )
635 )
636
636
637 def _getrepostate(repo):
637 def _getrepostate(repo):
638 # experimental config: commands.status.skipstates
638 # experimental config: commands.status.skipstates
639 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
639 skip = set(repo.ui.configlist('commands', 'status.skipstates'))
640 for state, statedetectionpredicate, msgfn in STATES:
640 for state, statedetectionpredicate, msgfn in STATES:
641 if state in skip:
641 if state in skip:
642 continue
642 continue
643 if statedetectionpredicate(repo):
643 if statedetectionpredicate(repo):
644 return (state, statedetectionpredicate, msgfn)
644 return (state, statedetectionpredicate, msgfn)
645
645
646 def morestatus(repo, fm):
646 def morestatus(repo, fm):
647 statetuple = _getrepostate(repo)
647 statetuple = _getrepostate(repo)
648 label = 'status.morestatus'
648 label = 'status.morestatus'
649 if statetuple:
649 if statetuple:
650 fm.startitem()
650 fm.startitem()
651 state, statedetectionpredicate, helpfulmsg = statetuple
651 state, statedetectionpredicate, helpfulmsg = statetuple
652 statemsg = _('The repository is in an unfinished *%s* state.') % state
652 statemsg = _('The repository is in an unfinished *%s* state.') % state
653 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
653 fm.write('statemsg', '%s\n', _commentlines(statemsg), label=label)
654 conmsg = _conflictsmsg(repo)
654 conmsg = _conflictsmsg(repo)
655 if conmsg:
655 if conmsg:
656 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
656 fm.write('conflictsmsg', '%s\n', conmsg, label=label)
657 if helpfulmsg:
657 if helpfulmsg:
658 helpmsg = helpfulmsg()
658 helpmsg = helpfulmsg()
659 fm.write('helpmsg', '%s\n', helpmsg, label=label)
659 fm.write('helpmsg', '%s\n', helpmsg, label=label)
660
660
661 def findpossible(cmd, table, strict=False):
661 def findpossible(cmd, table, strict=False):
662 """
662 """
663 Return cmd -> (aliases, command table entry)
663 Return cmd -> (aliases, command table entry)
664 for each matching command.
664 for each matching command.
665 Return debug commands (or their aliases) only if no normal command matches.
665 Return debug commands (or their aliases) only if no normal command matches.
666 """
666 """
667 choice = {}
667 choice = {}
668 debugchoice = {}
668 debugchoice = {}
669
669
670 if cmd in table:
670 if cmd in table:
671 # short-circuit exact matches, "log" alias beats "^log|history"
671 # short-circuit exact matches, "log" alias beats "^log|history"
672 keys = [cmd]
672 keys = [cmd]
673 else:
673 else:
674 keys = table.keys()
674 keys = table.keys()
675
675
676 allcmds = []
676 allcmds = []
677 for e in keys:
677 for e in keys:
678 aliases = parsealiases(e)
678 aliases = parsealiases(e)
679 allcmds.extend(aliases)
679 allcmds.extend(aliases)
680 found = None
680 found = None
681 if cmd in aliases:
681 if cmd in aliases:
682 found = cmd
682 found = cmd
683 elif not strict:
683 elif not strict:
684 for a in aliases:
684 for a in aliases:
685 if a.startswith(cmd):
685 if a.startswith(cmd):
686 found = a
686 found = a
687 break
687 break
688 if found is not None:
688 if found is not None:
689 if aliases[0].startswith("debug") or found.startswith("debug"):
689 if aliases[0].startswith("debug") or found.startswith("debug"):
690 debugchoice[found] = (aliases, table[e])
690 debugchoice[found] = (aliases, table[e])
691 else:
691 else:
692 choice[found] = (aliases, table[e])
692 choice[found] = (aliases, table[e])
693
693
694 if not choice and debugchoice:
694 if not choice and debugchoice:
695 choice = debugchoice
695 choice = debugchoice
696
696
697 return choice, allcmds
697 return choice, allcmds
698
698
699 def findcmd(cmd, table, strict=True):
699 def findcmd(cmd, table, strict=True):
700 """Return (aliases, command table entry) for command string."""
700 """Return (aliases, command table entry) for command string."""
701 choice, allcmds = findpossible(cmd, table, strict)
701 choice, allcmds = findpossible(cmd, table, strict)
702
702
703 if cmd in choice:
703 if cmd in choice:
704 return choice[cmd]
704 return choice[cmd]
705
705
706 if len(choice) > 1:
706 if len(choice) > 1:
707 clist = sorted(choice)
707 clist = sorted(choice)
708 raise error.AmbiguousCommand(cmd, clist)
708 raise error.AmbiguousCommand(cmd, clist)
709
709
710 if choice:
710 if choice:
711 return list(choice.values())[0]
711 return list(choice.values())[0]
712
712
713 raise error.UnknownCommand(cmd, allcmds)
713 raise error.UnknownCommand(cmd, allcmds)
714
714
715 def findrepo(p):
715 def findrepo(p):
716 while not os.path.isdir(os.path.join(p, ".hg")):
716 while not os.path.isdir(os.path.join(p, ".hg")):
717 oldp, p = p, os.path.dirname(p)
717 oldp, p = p, os.path.dirname(p)
718 if p == oldp:
718 if p == oldp:
719 return None
719 return None
720
720
721 return p
721 return p
722
722
723 def bailifchanged(repo, merge=True, hint=None):
723 def bailifchanged(repo, merge=True, hint=None):
724 """ enforce the precondition that working directory must be clean.
724 """ enforce the precondition that working directory must be clean.
725
725
726 'merge' can be set to false if a pending uncommitted merge should be
726 'merge' can be set to false if a pending uncommitted merge should be
727 ignored (such as when 'update --check' runs).
727 ignored (such as when 'update --check' runs).
728
728
729 'hint' is the usual hint given to Abort exception.
729 'hint' is the usual hint given to Abort exception.
730 """
730 """
731
731
732 if merge and repo.dirstate.p2() != nullid:
732 if merge and repo.dirstate.p2() != nullid:
733 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
733 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
734 modified, added, removed, deleted = repo.status()[:4]
734 modified, added, removed, deleted = repo.status()[:4]
735 if modified or added or removed or deleted:
735 if modified or added or removed or deleted:
736 raise error.Abort(_('uncommitted changes'), hint=hint)
736 raise error.Abort(_('uncommitted changes'), hint=hint)
737 ctx = repo[None]
737 ctx = repo[None]
738 for s in sorted(ctx.substate):
738 for s in sorted(ctx.substate):
739 ctx.sub(s).bailifchanged(hint=hint)
739 ctx.sub(s).bailifchanged(hint=hint)
740
740
741 def logmessage(ui, opts):
741 def logmessage(ui, opts):
742 """ get the log message according to -m and -l option """
742 """ get the log message according to -m and -l option """
743 message = opts.get('message')
743 message = opts.get('message')
744 logfile = opts.get('logfile')
744 logfile = opts.get('logfile')
745
745
746 if message and logfile:
746 if message and logfile:
747 raise error.Abort(_('options --message and --logfile are mutually '
747 raise error.Abort(_('options --message and --logfile are mutually '
748 'exclusive'))
748 'exclusive'))
749 if not message and logfile:
749 if not message and logfile:
750 try:
750 try:
751 if isstdiofilename(logfile):
751 if isstdiofilename(logfile):
752 message = ui.fin.read()
752 message = ui.fin.read()
753 else:
753 else:
754 message = '\n'.join(util.readfile(logfile).splitlines())
754 message = '\n'.join(util.readfile(logfile).splitlines())
755 except IOError as inst:
755 except IOError as inst:
756 raise error.Abort(_("can't read commit message '%s': %s") %
756 raise error.Abort(_("can't read commit message '%s': %s") %
757 (logfile, encoding.strtolocal(inst.strerror)))
757 (logfile, encoding.strtolocal(inst.strerror)))
758 return message
758 return message
759
759
760 def mergeeditform(ctxorbool, baseformname):
760 def mergeeditform(ctxorbool, baseformname):
761 """return appropriate editform name (referencing a committemplate)
761 """return appropriate editform name (referencing a committemplate)
762
762
763 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
763 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
764 merging is committed.
764 merging is committed.
765
765
766 This returns baseformname with '.merge' appended if it is a merge,
766 This returns baseformname with '.merge' appended if it is a merge,
767 otherwise '.normal' is appended.
767 otherwise '.normal' is appended.
768 """
768 """
769 if isinstance(ctxorbool, bool):
769 if isinstance(ctxorbool, bool):
770 if ctxorbool:
770 if ctxorbool:
771 return baseformname + ".merge"
771 return baseformname + ".merge"
772 elif 1 < len(ctxorbool.parents()):
772 elif 1 < len(ctxorbool.parents()):
773 return baseformname + ".merge"
773 return baseformname + ".merge"
774
774
775 return baseformname + ".normal"
775 return baseformname + ".normal"
776
776
777 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
777 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
778 editform='', **opts):
778 editform='', **opts):
779 """get appropriate commit message editor according to '--edit' option
779 """get appropriate commit message editor according to '--edit' option
780
780
781 'finishdesc' is a function to be called with edited commit message
781 'finishdesc' is a function to be called with edited commit message
782 (= 'description' of the new changeset) just after editing, but
782 (= 'description' of the new changeset) just after editing, but
783 before checking empty-ness. It should return actual text to be
783 before checking empty-ness. It should return actual text to be
784 stored into history. This allows to change description before
784 stored into history. This allows to change description before
785 storing.
785 storing.
786
786
787 'extramsg' is a extra message to be shown in the editor instead of
787 'extramsg' is a extra message to be shown in the editor instead of
788 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
788 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
789 is automatically added.
789 is automatically added.
790
790
791 'editform' is a dot-separated list of names, to distinguish
791 'editform' is a dot-separated list of names, to distinguish
792 the purpose of commit text editing.
792 the purpose of commit text editing.
793
793
794 'getcommiteditor' returns 'commitforceeditor' regardless of
794 'getcommiteditor' returns 'commitforceeditor' regardless of
795 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
795 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
796 they are specific for usage in MQ.
796 they are specific for usage in MQ.
797 """
797 """
798 if edit or finishdesc or extramsg:
798 if edit or finishdesc or extramsg:
799 return lambda r, c, s: commitforceeditor(r, c, s,
799 return lambda r, c, s: commitforceeditor(r, c, s,
800 finishdesc=finishdesc,
800 finishdesc=finishdesc,
801 extramsg=extramsg,
801 extramsg=extramsg,
802 editform=editform)
802 editform=editform)
803 elif editform:
803 elif editform:
804 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
804 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
805 else:
805 else:
806 return commiteditor
806 return commiteditor
807
807
808 def loglimit(opts):
808 def loglimit(opts):
809 """get the log limit according to option -l/--limit"""
809 """get the log limit according to option -l/--limit"""
810 limit = opts.get('limit')
810 limit = opts.get('limit')
811 if limit:
811 if limit:
812 try:
812 try:
813 limit = int(limit)
813 limit = int(limit)
814 except ValueError:
814 except ValueError:
815 raise error.Abort(_('limit must be a positive integer'))
815 raise error.Abort(_('limit must be a positive integer'))
816 if limit <= 0:
816 if limit <= 0:
817 raise error.Abort(_('limit must be positive'))
817 raise error.Abort(_('limit must be positive'))
818 else:
818 else:
819 limit = None
819 limit = None
820 return limit
820 return limit
821
821
822 def makefilename(repo, pat, node, desc=None,
822 def makefilename(repo, pat, node, desc=None,
823 total=None, seqno=None, revwidth=None, pathname=None):
823 total=None, seqno=None, revwidth=None, pathname=None):
824 node_expander = {
824 node_expander = {
825 'H': lambda: hex(node),
825 'H': lambda: hex(node),
826 'R': lambda: '%d' % repo.changelog.rev(node),
826 'R': lambda: '%d' % repo.changelog.rev(node),
827 'h': lambda: short(node),
827 'h': lambda: short(node),
828 'm': lambda: re.sub('[^\w]', '_', desc or '')
828 'm': lambda: re.sub('[^\w]', '_', desc or '')
829 }
829 }
830 expander = {
830 expander = {
831 '%': lambda: '%',
831 '%': lambda: '%',
832 'b': lambda: os.path.basename(repo.root),
832 'b': lambda: os.path.basename(repo.root),
833 }
833 }
834
834
835 try:
835 try:
836 if node:
836 if node:
837 expander.update(node_expander)
837 expander.update(node_expander)
838 if node:
838 if node:
839 expander['r'] = (lambda:
839 expander['r'] = (lambda:
840 ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
840 ('%d' % repo.changelog.rev(node)).zfill(revwidth or 0))
841 if total is not None:
841 if total is not None:
842 expander['N'] = lambda: '%d' % total
842 expander['N'] = lambda: '%d' % total
843 if seqno is not None:
843 if seqno is not None:
844 expander['n'] = lambda: '%d' % seqno
844 expander['n'] = lambda: '%d' % seqno
845 if total is not None and seqno is not None:
845 if total is not None and seqno is not None:
846 expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
846 expander['n'] = (lambda: ('%d' % seqno).zfill(len('%d' % total)))
847 if pathname is not None:
847 if pathname is not None:
848 expander['s'] = lambda: os.path.basename(pathname)
848 expander['s'] = lambda: os.path.basename(pathname)
849 expander['d'] = lambda: os.path.dirname(pathname) or '.'
849 expander['d'] = lambda: os.path.dirname(pathname) or '.'
850 expander['p'] = lambda: pathname
850 expander['p'] = lambda: pathname
851
851
852 newname = []
852 newname = []
853 patlen = len(pat)
853 patlen = len(pat)
854 i = 0
854 i = 0
855 while i < patlen:
855 while i < patlen:
856 c = pat[i:i + 1]
856 c = pat[i:i + 1]
857 if c == '%':
857 if c == '%':
858 i += 1
858 i += 1
859 c = pat[i:i + 1]
859 c = pat[i:i + 1]
860 c = expander[c]()
860 c = expander[c]()
861 newname.append(c)
861 newname.append(c)
862 i += 1
862 i += 1
863 return ''.join(newname)
863 return ''.join(newname)
864 except KeyError as inst:
864 except KeyError as inst:
865 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
865 raise error.Abort(_("invalid format spec '%%%s' in output filename") %
866 inst.args[0])
866 inst.args[0])
867
867
868 def isstdiofilename(pat):
868 def isstdiofilename(pat):
869 """True if the given pat looks like a filename denoting stdin/stdout"""
869 """True if the given pat looks like a filename denoting stdin/stdout"""
870 return not pat or pat == '-'
870 return not pat or pat == '-'
871
871
872 class _unclosablefile(object):
872 class _unclosablefile(object):
873 def __init__(self, fp):
873 def __init__(self, fp):
874 self._fp = fp
874 self._fp = fp
875
875
876 def close(self):
876 def close(self):
877 pass
877 pass
878
878
879 def __iter__(self):
879 def __iter__(self):
880 return iter(self._fp)
880 return iter(self._fp)
881
881
882 def __getattr__(self, attr):
882 def __getattr__(self, attr):
883 return getattr(self._fp, attr)
883 return getattr(self._fp, attr)
884
884
885 def __enter__(self):
885 def __enter__(self):
886 return self
886 return self
887
887
888 def __exit__(self, exc_type, exc_value, exc_tb):
888 def __exit__(self, exc_type, exc_value, exc_tb):
889 pass
889 pass
890
890
891 def makefileobj(repo, pat, node=None, desc=None, total=None,
891 def makefileobj(repo, pat, node=None, desc=None, total=None,
892 seqno=None, revwidth=None, mode='wb', modemap=None,
892 seqno=None, revwidth=None, mode='wb', modemap=None,
893 pathname=None):
893 pathname=None):
894
894
895 writable = mode not in ('r', 'rb')
895 writable = mode not in ('r', 'rb')
896
896
897 if isstdiofilename(pat):
897 if isstdiofilename(pat):
898 if writable:
898 if writable:
899 fp = repo.ui.fout
899 fp = repo.ui.fout
900 else:
900 else:
901 fp = repo.ui.fin
901 fp = repo.ui.fin
902 return _unclosablefile(fp)
902 return _unclosablefile(fp)
903 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
903 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
904 if modemap is not None:
904 if modemap is not None:
905 mode = modemap.get(fn, mode)
905 mode = modemap.get(fn, mode)
906 if mode == 'wb':
906 if mode == 'wb':
907 modemap[fn] = 'ab'
907 modemap[fn] = 'ab'
908 return open(fn, mode)
908 return open(fn, mode)
909
909
910 def openrevlog(repo, cmd, file_, opts):
910 def openrevlog(repo, cmd, file_, opts):
911 """opens the changelog, manifest, a filelog or a given revlog"""
911 """opens the changelog, manifest, a filelog or a given revlog"""
912 cl = opts['changelog']
912 cl = opts['changelog']
913 mf = opts['manifest']
913 mf = opts['manifest']
914 dir = opts['dir']
914 dir = opts['dir']
915 msg = None
915 msg = None
916 if cl and mf:
916 if cl and mf:
917 msg = _('cannot specify --changelog and --manifest at the same time')
917 msg = _('cannot specify --changelog and --manifest at the same time')
918 elif cl and dir:
918 elif cl and dir:
919 msg = _('cannot specify --changelog and --dir at the same time')
919 msg = _('cannot specify --changelog and --dir at the same time')
920 elif cl or mf or dir:
920 elif cl or mf or dir:
921 if file_:
921 if file_:
922 msg = _('cannot specify filename with --changelog or --manifest')
922 msg = _('cannot specify filename with --changelog or --manifest')
923 elif not repo:
923 elif not repo:
924 msg = _('cannot specify --changelog or --manifest or --dir '
924 msg = _('cannot specify --changelog or --manifest or --dir '
925 'without a repository')
925 'without a repository')
926 if msg:
926 if msg:
927 raise error.Abort(msg)
927 raise error.Abort(msg)
928
928
929 r = None
929 r = None
930 if repo:
930 if repo:
931 if cl:
931 if cl:
932 r = repo.unfiltered().changelog
932 r = repo.unfiltered().changelog
933 elif dir:
933 elif dir:
934 if 'treemanifest' not in repo.requirements:
934 if 'treemanifest' not in repo.requirements:
935 raise error.Abort(_("--dir can only be used on repos with "
935 raise error.Abort(_("--dir can only be used on repos with "
936 "treemanifest enabled"))
936 "treemanifest enabled"))
937 dirlog = repo.manifestlog._revlog.dirlog(dir)
937 dirlog = repo.manifestlog._revlog.dirlog(dir)
938 if len(dirlog):
938 if len(dirlog):
939 r = dirlog
939 r = dirlog
940 elif mf:
940 elif mf:
941 r = repo.manifestlog._revlog
941 r = repo.manifestlog._revlog
942 elif file_:
942 elif file_:
943 filelog = repo.file(file_)
943 filelog = repo.file(file_)
944 if len(filelog):
944 if len(filelog):
945 r = filelog
945 r = filelog
946 if not r:
946 if not r:
947 if not file_:
947 if not file_:
948 raise error.CommandError(cmd, _('invalid arguments'))
948 raise error.CommandError(cmd, _('invalid arguments'))
949 if not os.path.isfile(file_):
949 if not os.path.isfile(file_):
950 raise error.Abort(_("revlog '%s' not found") % file_)
950 raise error.Abort(_("revlog '%s' not found") % file_)
951 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
951 r = revlog.revlog(vfsmod.vfs(pycompat.getcwd(), audit=False),
952 file_[:-2] + ".i")
952 file_[:-2] + ".i")
953 return r
953 return r
954
954
955 def copy(ui, repo, pats, opts, rename=False):
955 def copy(ui, repo, pats, opts, rename=False):
956 # called with the repo lock held
956 # called with the repo lock held
957 #
957 #
958 # hgsep => pathname that uses "/" to separate directories
958 # hgsep => pathname that uses "/" to separate directories
959 # ossep => pathname that uses os.sep to separate directories
959 # ossep => pathname that uses os.sep to separate directories
960 cwd = repo.getcwd()
960 cwd = repo.getcwd()
961 targets = {}
961 targets = {}
962 after = opts.get("after")
962 after = opts.get("after")
963 dryrun = opts.get("dry_run")
963 dryrun = opts.get("dry_run")
964 wctx = repo[None]
964 wctx = repo[None]
965
965
966 def walkpat(pat):
966 def walkpat(pat):
967 srcs = []
967 srcs = []
968 if after:
968 if after:
969 badstates = '?'
969 badstates = '?'
970 else:
970 else:
971 badstates = '?r'
971 badstates = '?r'
972 m = scmutil.match(wctx, [pat], opts, globbed=True)
972 m = scmutil.match(wctx, [pat], opts, globbed=True)
973 for abs in wctx.walk(m):
973 for abs in wctx.walk(m):
974 state = repo.dirstate[abs]
974 state = repo.dirstate[abs]
975 rel = m.rel(abs)
975 rel = m.rel(abs)
976 exact = m.exact(abs)
976 exact = m.exact(abs)
977 if state in badstates:
977 if state in badstates:
978 if exact and state == '?':
978 if exact and state == '?':
979 ui.warn(_('%s: not copying - file is not managed\n') % rel)
979 ui.warn(_('%s: not copying - file is not managed\n') % rel)
980 if exact and state == 'r':
980 if exact and state == 'r':
981 ui.warn(_('%s: not copying - file has been marked for'
981 ui.warn(_('%s: not copying - file has been marked for'
982 ' remove\n') % rel)
982 ' remove\n') % rel)
983 continue
983 continue
984 # abs: hgsep
984 # abs: hgsep
985 # rel: ossep
985 # rel: ossep
986 srcs.append((abs, rel, exact))
986 srcs.append((abs, rel, exact))
987 return srcs
987 return srcs
988
988
989 # abssrc: hgsep
989 # abssrc: hgsep
990 # relsrc: ossep
990 # relsrc: ossep
991 # otarget: ossep
991 # otarget: ossep
992 def copyfile(abssrc, relsrc, otarget, exact):
992 def copyfile(abssrc, relsrc, otarget, exact):
993 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
993 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
994 if '/' in abstarget:
994 if '/' in abstarget:
995 # We cannot normalize abstarget itself, this would prevent
995 # We cannot normalize abstarget itself, this would prevent
996 # case only renames, like a => A.
996 # case only renames, like a => A.
997 abspath, absname = abstarget.rsplit('/', 1)
997 abspath, absname = abstarget.rsplit('/', 1)
998 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
998 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
999 reltarget = repo.pathto(abstarget, cwd)
999 reltarget = repo.pathto(abstarget, cwd)
1000 target = repo.wjoin(abstarget)
1000 target = repo.wjoin(abstarget)
1001 src = repo.wjoin(abssrc)
1001 src = repo.wjoin(abssrc)
1002 state = repo.dirstate[abstarget]
1002 state = repo.dirstate[abstarget]
1003
1003
1004 scmutil.checkportable(ui, abstarget)
1004 scmutil.checkportable(ui, abstarget)
1005
1005
1006 # check for collisions
1006 # check for collisions
1007 prevsrc = targets.get(abstarget)
1007 prevsrc = targets.get(abstarget)
1008 if prevsrc is not None:
1008 if prevsrc is not None:
1009 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1009 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1010 (reltarget, repo.pathto(abssrc, cwd),
1010 (reltarget, repo.pathto(abssrc, cwd),
1011 repo.pathto(prevsrc, cwd)))
1011 repo.pathto(prevsrc, cwd)))
1012 return
1012 return
1013
1013
1014 # check for overwrites
1014 # check for overwrites
1015 exists = os.path.lexists(target)
1015 exists = os.path.lexists(target)
1016 samefile = False
1016 samefile = False
1017 if exists and abssrc != abstarget:
1017 if exists and abssrc != abstarget:
1018 if (repo.dirstate.normalize(abssrc) ==
1018 if (repo.dirstate.normalize(abssrc) ==
1019 repo.dirstate.normalize(abstarget)):
1019 repo.dirstate.normalize(abstarget)):
1020 if not rename:
1020 if not rename:
1021 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1021 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1022 return
1022 return
1023 exists = False
1023 exists = False
1024 samefile = True
1024 samefile = True
1025
1025
1026 if not after and exists or after and state in 'mn':
1026 if not after and exists or after and state in 'mn':
1027 if not opts['force']:
1027 if not opts['force']:
1028 if state in 'mn':
1028 if state in 'mn':
1029 msg = _('%s: not overwriting - file already committed\n')
1029 msg = _('%s: not overwriting - file already committed\n')
1030 if after:
1030 if after:
1031 flags = '--after --force'
1031 flags = '--after --force'
1032 else:
1032 else:
1033 flags = '--force'
1033 flags = '--force'
1034 if rename:
1034 if rename:
1035 hint = _('(hg rename %s to replace the file by '
1035 hint = _('(hg rename %s to replace the file by '
1036 'recording a rename)\n') % flags
1036 'recording a rename)\n') % flags
1037 else:
1037 else:
1038 hint = _('(hg copy %s to replace the file by '
1038 hint = _('(hg copy %s to replace the file by '
1039 'recording a copy)\n') % flags
1039 'recording a copy)\n') % flags
1040 else:
1040 else:
1041 msg = _('%s: not overwriting - file exists\n')
1041 msg = _('%s: not overwriting - file exists\n')
1042 if rename:
1042 if rename:
1043 hint = _('(hg rename --after to record the rename)\n')
1043 hint = _('(hg rename --after to record the rename)\n')
1044 else:
1044 else:
1045 hint = _('(hg copy --after to record the copy)\n')
1045 hint = _('(hg copy --after to record the copy)\n')
1046 ui.warn(msg % reltarget)
1046 ui.warn(msg % reltarget)
1047 ui.warn(hint)
1047 ui.warn(hint)
1048 return
1048 return
1049
1049
1050 if after:
1050 if after:
1051 if not exists:
1051 if not exists:
1052 if rename:
1052 if rename:
1053 ui.warn(_('%s: not recording move - %s does not exist\n') %
1053 ui.warn(_('%s: not recording move - %s does not exist\n') %
1054 (relsrc, reltarget))
1054 (relsrc, reltarget))
1055 else:
1055 else:
1056 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1056 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1057 (relsrc, reltarget))
1057 (relsrc, reltarget))
1058 return
1058 return
1059 elif not dryrun:
1059 elif not dryrun:
1060 try:
1060 try:
1061 if exists:
1061 if exists:
1062 os.unlink(target)
1062 os.unlink(target)
1063 targetdir = os.path.dirname(target) or '.'
1063 targetdir = os.path.dirname(target) or '.'
1064 if not os.path.isdir(targetdir):
1064 if not os.path.isdir(targetdir):
1065 os.makedirs(targetdir)
1065 os.makedirs(targetdir)
1066 if samefile:
1066 if samefile:
1067 tmp = target + "~hgrename"
1067 tmp = target + "~hgrename"
1068 os.rename(src, tmp)
1068 os.rename(src, tmp)
1069 os.rename(tmp, target)
1069 os.rename(tmp, target)
1070 else:
1070 else:
1071 util.copyfile(src, target)
1071 util.copyfile(src, target)
1072 srcexists = True
1072 srcexists = True
1073 except IOError as inst:
1073 except IOError as inst:
1074 if inst.errno == errno.ENOENT:
1074 if inst.errno == errno.ENOENT:
1075 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1075 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1076 srcexists = False
1076 srcexists = False
1077 else:
1077 else:
1078 ui.warn(_('%s: cannot copy - %s\n') %
1078 ui.warn(_('%s: cannot copy - %s\n') %
1079 (relsrc, encoding.strtolocal(inst.strerror)))
1079 (relsrc, encoding.strtolocal(inst.strerror)))
1080 return True # report a failure
1080 return True # report a failure
1081
1081
1082 if ui.verbose or not exact:
1082 if ui.verbose or not exact:
1083 if rename:
1083 if rename:
1084 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1084 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1085 else:
1085 else:
1086 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1086 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1087
1087
1088 targets[abstarget] = abssrc
1088 targets[abstarget] = abssrc
1089
1089
1090 # fix up dirstate
1090 # fix up dirstate
1091 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1091 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1092 dryrun=dryrun, cwd=cwd)
1092 dryrun=dryrun, cwd=cwd)
1093 if rename and not dryrun:
1093 if rename and not dryrun:
1094 if not after and srcexists and not samefile:
1094 if not after and srcexists and not samefile:
1095 repo.wvfs.unlinkpath(abssrc)
1095 repo.wvfs.unlinkpath(abssrc)
1096 wctx.forget([abssrc])
1096 wctx.forget([abssrc])
1097
1097
1098 # pat: ossep
1098 # pat: ossep
1099 # dest ossep
1099 # dest ossep
1100 # srcs: list of (hgsep, hgsep, ossep, bool)
1100 # srcs: list of (hgsep, hgsep, ossep, bool)
1101 # return: function that takes hgsep and returns ossep
1101 # return: function that takes hgsep and returns ossep
1102 def targetpathfn(pat, dest, srcs):
1102 def targetpathfn(pat, dest, srcs):
1103 if os.path.isdir(pat):
1103 if os.path.isdir(pat):
1104 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1104 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1105 abspfx = util.localpath(abspfx)
1105 abspfx = util.localpath(abspfx)
1106 if destdirexists:
1106 if destdirexists:
1107 striplen = len(os.path.split(abspfx)[0])
1107 striplen = len(os.path.split(abspfx)[0])
1108 else:
1108 else:
1109 striplen = len(abspfx)
1109 striplen = len(abspfx)
1110 if striplen:
1110 if striplen:
1111 striplen += len(pycompat.ossep)
1111 striplen += len(pycompat.ossep)
1112 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1112 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1113 elif destdirexists:
1113 elif destdirexists:
1114 res = lambda p: os.path.join(dest,
1114 res = lambda p: os.path.join(dest,
1115 os.path.basename(util.localpath(p)))
1115 os.path.basename(util.localpath(p)))
1116 else:
1116 else:
1117 res = lambda p: dest
1117 res = lambda p: dest
1118 return res
1118 return res
1119
1119
1120 # pat: ossep
1120 # pat: ossep
1121 # dest ossep
1121 # dest ossep
1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1122 # srcs: list of (hgsep, hgsep, ossep, bool)
1123 # return: function that takes hgsep and returns ossep
1123 # return: function that takes hgsep and returns ossep
1124 def targetpathafterfn(pat, dest, srcs):
1124 def targetpathafterfn(pat, dest, srcs):
1125 if matchmod.patkind(pat):
1125 if matchmod.patkind(pat):
1126 # a mercurial pattern
1126 # a mercurial pattern
1127 res = lambda p: os.path.join(dest,
1127 res = lambda p: os.path.join(dest,
1128 os.path.basename(util.localpath(p)))
1128 os.path.basename(util.localpath(p)))
1129 else:
1129 else:
1130 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1130 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1131 if len(abspfx) < len(srcs[0][0]):
1131 if len(abspfx) < len(srcs[0][0]):
1132 # A directory. Either the target path contains the last
1132 # A directory. Either the target path contains the last
1133 # component of the source path or it does not.
1133 # component of the source path or it does not.
1134 def evalpath(striplen):
1134 def evalpath(striplen):
1135 score = 0
1135 score = 0
1136 for s in srcs:
1136 for s in srcs:
1137 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1137 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1138 if os.path.lexists(t):
1138 if os.path.lexists(t):
1139 score += 1
1139 score += 1
1140 return score
1140 return score
1141
1141
1142 abspfx = util.localpath(abspfx)
1142 abspfx = util.localpath(abspfx)
1143 striplen = len(abspfx)
1143 striplen = len(abspfx)
1144 if striplen:
1144 if striplen:
1145 striplen += len(pycompat.ossep)
1145 striplen += len(pycompat.ossep)
1146 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1146 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1147 score = evalpath(striplen)
1147 score = evalpath(striplen)
1148 striplen1 = len(os.path.split(abspfx)[0])
1148 striplen1 = len(os.path.split(abspfx)[0])
1149 if striplen1:
1149 if striplen1:
1150 striplen1 += len(pycompat.ossep)
1150 striplen1 += len(pycompat.ossep)
1151 if evalpath(striplen1) > score:
1151 if evalpath(striplen1) > score:
1152 striplen = striplen1
1152 striplen = striplen1
1153 res = lambda p: os.path.join(dest,
1153 res = lambda p: os.path.join(dest,
1154 util.localpath(p)[striplen:])
1154 util.localpath(p)[striplen:])
1155 else:
1155 else:
1156 # a file
1156 # a file
1157 if destdirexists:
1157 if destdirexists:
1158 res = lambda p: os.path.join(dest,
1158 res = lambda p: os.path.join(dest,
1159 os.path.basename(util.localpath(p)))
1159 os.path.basename(util.localpath(p)))
1160 else:
1160 else:
1161 res = lambda p: dest
1161 res = lambda p: dest
1162 return res
1162 return res
1163
1163
1164 pats = scmutil.expandpats(pats)
1164 pats = scmutil.expandpats(pats)
1165 if not pats:
1165 if not pats:
1166 raise error.Abort(_('no source or destination specified'))
1166 raise error.Abort(_('no source or destination specified'))
1167 if len(pats) == 1:
1167 if len(pats) == 1:
1168 raise error.Abort(_('no destination specified'))
1168 raise error.Abort(_('no destination specified'))
1169 dest = pats.pop()
1169 dest = pats.pop()
1170 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1170 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1171 if not destdirexists:
1171 if not destdirexists:
1172 if len(pats) > 1 or matchmod.patkind(pats[0]):
1172 if len(pats) > 1 or matchmod.patkind(pats[0]):
1173 raise error.Abort(_('with multiple sources, destination must be an '
1173 raise error.Abort(_('with multiple sources, destination must be an '
1174 'existing directory'))
1174 'existing directory'))
1175 if util.endswithsep(dest):
1175 if util.endswithsep(dest):
1176 raise error.Abort(_('destination %s is not a directory') % dest)
1176 raise error.Abort(_('destination %s is not a directory') % dest)
1177
1177
1178 tfn = targetpathfn
1178 tfn = targetpathfn
1179 if after:
1179 if after:
1180 tfn = targetpathafterfn
1180 tfn = targetpathafterfn
1181 copylist = []
1181 copylist = []
1182 for pat in pats:
1182 for pat in pats:
1183 srcs = walkpat(pat)
1183 srcs = walkpat(pat)
1184 if not srcs:
1184 if not srcs:
1185 continue
1185 continue
1186 copylist.append((tfn(pat, dest, srcs), srcs))
1186 copylist.append((tfn(pat, dest, srcs), srcs))
1187 if not copylist:
1187 if not copylist:
1188 raise error.Abort(_('no files to copy'))
1188 raise error.Abort(_('no files to copy'))
1189
1189
1190 errors = 0
1190 errors = 0
1191 for targetpath, srcs in copylist:
1191 for targetpath, srcs in copylist:
1192 for abssrc, relsrc, exact in srcs:
1192 for abssrc, relsrc, exact in srcs:
1193 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1193 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1194 errors += 1
1194 errors += 1
1195
1195
1196 if errors:
1196 if errors:
1197 ui.warn(_('(consider using --after)\n'))
1197 ui.warn(_('(consider using --after)\n'))
1198
1198
1199 return errors != 0
1199 return errors != 0
1200
1200
1201 ## facility to let extension process additional data into an import patch
1201 ## facility to let extension process additional data into an import patch
1202 # list of identifier to be executed in order
1202 # list of identifier to be executed in order
1203 extrapreimport = [] # run before commit
1203 extrapreimport = [] # run before commit
1204 extrapostimport = [] # run after commit
1204 extrapostimport = [] # run after commit
1205 # mapping from identifier to actual import function
1205 # mapping from identifier to actual import function
1206 #
1206 #
1207 # 'preimport' are run before the commit is made and are provided the following
1207 # 'preimport' are run before the commit is made and are provided the following
1208 # arguments:
1208 # arguments:
1209 # - repo: the localrepository instance,
1209 # - repo: the localrepository instance,
1210 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1210 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1211 # - extra: the future extra dictionary of the changeset, please mutate it,
1211 # - extra: the future extra dictionary of the changeset, please mutate it,
1212 # - opts: the import options.
1212 # - opts: the import options.
1213 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1213 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1214 # mutation of in memory commit and more. Feel free to rework the code to get
1214 # mutation of in memory commit and more. Feel free to rework the code to get
1215 # there.
1215 # there.
1216 extrapreimportmap = {}
1216 extrapreimportmap = {}
1217 # 'postimport' are run after the commit is made and are provided the following
1217 # 'postimport' are run after the commit is made and are provided the following
1218 # argument:
1218 # argument:
1219 # - ctx: the changectx created by import.
1219 # - ctx: the changectx created by import.
1220 extrapostimportmap = {}
1220 extrapostimportmap = {}
1221
1221
1222 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1222 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
1223 """Utility function used by commands.import to import a single patch
1223 """Utility function used by commands.import to import a single patch
1224
1224
1225 This function is explicitly defined here to help the evolve extension to
1225 This function is explicitly defined here to help the evolve extension to
1226 wrap this part of the import logic.
1226 wrap this part of the import logic.
1227
1227
1228 The API is currently a bit ugly because it a simple code translation from
1228 The API is currently a bit ugly because it a simple code translation from
1229 the import command. Feel free to make it better.
1229 the import command. Feel free to make it better.
1230
1230
1231 :hunk: a patch (as a binary string)
1231 :hunk: a patch (as a binary string)
1232 :parents: nodes that will be parent of the created commit
1232 :parents: nodes that will be parent of the created commit
1233 :opts: the full dict of option passed to the import command
1233 :opts: the full dict of option passed to the import command
1234 :msgs: list to save commit message to.
1234 :msgs: list to save commit message to.
1235 (used in case we need to save it when failing)
1235 (used in case we need to save it when failing)
1236 :updatefunc: a function that update a repo to a given node
1236 :updatefunc: a function that update a repo to a given node
1237 updatefunc(<repo>, <node>)
1237 updatefunc(<repo>, <node>)
1238 """
1238 """
1239 # avoid cycle context -> subrepo -> cmdutil
1239 # avoid cycle context -> subrepo -> cmdutil
1240 from . import context
1240 from . import context
1241 extractdata = patch.extract(ui, hunk)
1241 extractdata = patch.extract(ui, hunk)
1242 tmpname = extractdata.get('filename')
1242 tmpname = extractdata.get('filename')
1243 message = extractdata.get('message')
1243 message = extractdata.get('message')
1244 user = opts.get('user') or extractdata.get('user')
1244 user = opts.get('user') or extractdata.get('user')
1245 date = opts.get('date') or extractdata.get('date')
1245 date = opts.get('date') or extractdata.get('date')
1246 branch = extractdata.get('branch')
1246 branch = extractdata.get('branch')
1247 nodeid = extractdata.get('nodeid')
1247 nodeid = extractdata.get('nodeid')
1248 p1 = extractdata.get('p1')
1248 p1 = extractdata.get('p1')
1249 p2 = extractdata.get('p2')
1249 p2 = extractdata.get('p2')
1250
1250
1251 nocommit = opts.get('no_commit')
1251 nocommit = opts.get('no_commit')
1252 importbranch = opts.get('import_branch')
1252 importbranch = opts.get('import_branch')
1253 update = not opts.get('bypass')
1253 update = not opts.get('bypass')
1254 strip = opts["strip"]
1254 strip = opts["strip"]
1255 prefix = opts["prefix"]
1255 prefix = opts["prefix"]
1256 sim = float(opts.get('similarity') or 0)
1256 sim = float(opts.get('similarity') or 0)
1257 if not tmpname:
1257 if not tmpname:
1258 return (None, None, False)
1258 return (None, None, False)
1259
1259
1260 rejects = False
1260 rejects = False
1261
1261
1262 try:
1262 try:
1263 cmdline_message = logmessage(ui, opts)
1263 cmdline_message = logmessage(ui, opts)
1264 if cmdline_message:
1264 if cmdline_message:
1265 # pickup the cmdline msg
1265 # pickup the cmdline msg
1266 message = cmdline_message
1266 message = cmdline_message
1267 elif message:
1267 elif message:
1268 # pickup the patch msg
1268 # pickup the patch msg
1269 message = message.strip()
1269 message = message.strip()
1270 else:
1270 else:
1271 # launch the editor
1271 # launch the editor
1272 message = None
1272 message = None
1273 ui.debug('message:\n%s\n' % message)
1273 ui.debug('message:\n%s\n' % message)
1274
1274
1275 if len(parents) == 1:
1275 if len(parents) == 1:
1276 parents.append(repo[nullid])
1276 parents.append(repo[nullid])
1277 if opts.get('exact'):
1277 if opts.get('exact'):
1278 if not nodeid or not p1:
1278 if not nodeid or not p1:
1279 raise error.Abort(_('not a Mercurial patch'))
1279 raise error.Abort(_('not a Mercurial patch'))
1280 p1 = repo[p1]
1280 p1 = repo[p1]
1281 p2 = repo[p2 or nullid]
1281 p2 = repo[p2 or nullid]
1282 elif p2:
1282 elif p2:
1283 try:
1283 try:
1284 p1 = repo[p1]
1284 p1 = repo[p1]
1285 p2 = repo[p2]
1285 p2 = repo[p2]
1286 # Without any options, consider p2 only if the
1286 # Without any options, consider p2 only if the
1287 # patch is being applied on top of the recorded
1287 # patch is being applied on top of the recorded
1288 # first parent.
1288 # first parent.
1289 if p1 != parents[0]:
1289 if p1 != parents[0]:
1290 p1 = parents[0]
1290 p1 = parents[0]
1291 p2 = repo[nullid]
1291 p2 = repo[nullid]
1292 except error.RepoError:
1292 except error.RepoError:
1293 p1, p2 = parents
1293 p1, p2 = parents
1294 if p2.node() == nullid:
1294 if p2.node() == nullid:
1295 ui.warn(_("warning: import the patch as a normal revision\n"
1295 ui.warn(_("warning: import the patch as a normal revision\n"
1296 "(use --exact to import the patch as a merge)\n"))
1296 "(use --exact to import the patch as a merge)\n"))
1297 else:
1297 else:
1298 p1, p2 = parents
1298 p1, p2 = parents
1299
1299
1300 n = None
1300 n = None
1301 if update:
1301 if update:
1302 if p1 != parents[0]:
1302 if p1 != parents[0]:
1303 updatefunc(repo, p1.node())
1303 updatefunc(repo, p1.node())
1304 if p2 != parents[1]:
1304 if p2 != parents[1]:
1305 repo.setparents(p1.node(), p2.node())
1305 repo.setparents(p1.node(), p2.node())
1306
1306
1307 if opts.get('exact') or importbranch:
1307 if opts.get('exact') or importbranch:
1308 repo.dirstate.setbranch(branch or 'default')
1308 repo.dirstate.setbranch(branch or 'default')
1309
1309
1310 partial = opts.get('partial', False)
1310 partial = opts.get('partial', False)
1311 files = set()
1311 files = set()
1312 try:
1312 try:
1313 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1313 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1314 files=files, eolmode=None, similarity=sim / 100.0)
1314 files=files, eolmode=None, similarity=sim / 100.0)
1315 except error.PatchError as e:
1315 except error.PatchError as e:
1316 if not partial:
1316 if not partial:
1317 raise error.Abort(str(e))
1317 raise error.Abort(str(e))
1318 if partial:
1318 if partial:
1319 rejects = True
1319 rejects = True
1320
1320
1321 files = list(files)
1321 files = list(files)
1322 if nocommit:
1322 if nocommit:
1323 if message:
1323 if message:
1324 msgs.append(message)
1324 msgs.append(message)
1325 else:
1325 else:
1326 if opts.get('exact') or p2:
1326 if opts.get('exact') or p2:
1327 # If you got here, you either use --force and know what
1327 # If you got here, you either use --force and know what
1328 # you are doing or used --exact or a merge patch while
1328 # you are doing or used --exact or a merge patch while
1329 # being updated to its first parent.
1329 # being updated to its first parent.
1330 m = None
1330 m = None
1331 else:
1331 else:
1332 m = scmutil.matchfiles(repo, files or [])
1332 m = scmutil.matchfiles(repo, files or [])
1333 editform = mergeeditform(repo[None], 'import.normal')
1333 editform = mergeeditform(repo[None], 'import.normal')
1334 if opts.get('exact'):
1334 if opts.get('exact'):
1335 editor = None
1335 editor = None
1336 else:
1336 else:
1337 editor = getcommiteditor(editform=editform, **opts)
1337 editor = getcommiteditor(editform=editform, **opts)
1338 extra = {}
1338 extra = {}
1339 for idfunc in extrapreimport:
1339 for idfunc in extrapreimport:
1340 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1340 extrapreimportmap[idfunc](repo, extractdata, extra, opts)
1341 overrides = {}
1341 overrides = {}
1342 if partial:
1342 if partial:
1343 overrides[('ui', 'allowemptycommit')] = True
1343 overrides[('ui', 'allowemptycommit')] = True
1344 with repo.ui.configoverride(overrides, 'import'):
1344 with repo.ui.configoverride(overrides, 'import'):
1345 n = repo.commit(message, user,
1345 n = repo.commit(message, user,
1346 date, match=m,
1346 date, match=m,
1347 editor=editor, extra=extra)
1347 editor=editor, extra=extra)
1348 for idfunc in extrapostimport:
1348 for idfunc in extrapostimport:
1349 extrapostimportmap[idfunc](repo[n])
1349 extrapostimportmap[idfunc](repo[n])
1350 else:
1350 else:
1351 if opts.get('exact') or importbranch:
1351 if opts.get('exact') or importbranch:
1352 branch = branch or 'default'
1352 branch = branch or 'default'
1353 else:
1353 else:
1354 branch = p1.branch()
1354 branch = p1.branch()
1355 store = patch.filestore()
1355 store = patch.filestore()
1356 try:
1356 try:
1357 files = set()
1357 files = set()
1358 try:
1358 try:
1359 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1359 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1360 files, eolmode=None)
1360 files, eolmode=None)
1361 except error.PatchError as e:
1361 except error.PatchError as e:
1362 raise error.Abort(str(e))
1362 raise error.Abort(str(e))
1363 if opts.get('exact'):
1363 if opts.get('exact'):
1364 editor = None
1364 editor = None
1365 else:
1365 else:
1366 editor = getcommiteditor(editform='import.bypass')
1366 editor = getcommiteditor(editform='import.bypass')
1367 memctx = context.memctx(repo, (p1.node(), p2.node()),
1367 memctx = context.memctx(repo, (p1.node(), p2.node()),
1368 message,
1368 message,
1369 files=files,
1369 files=files,
1370 filectxfn=store,
1370 filectxfn=store,
1371 user=user,
1371 user=user,
1372 date=date,
1372 date=date,
1373 branch=branch,
1373 branch=branch,
1374 editor=editor)
1374 editor=editor)
1375 n = memctx.commit()
1375 n = memctx.commit()
1376 finally:
1376 finally:
1377 store.close()
1377 store.close()
1378 if opts.get('exact') and nocommit:
1378 if opts.get('exact') and nocommit:
1379 # --exact with --no-commit is still useful in that it does merge
1379 # --exact with --no-commit is still useful in that it does merge
1380 # and branch bits
1380 # and branch bits
1381 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1381 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1382 elif opts.get('exact') and hex(n) != nodeid:
1382 elif opts.get('exact') and hex(n) != nodeid:
1383 raise error.Abort(_('patch is damaged or loses information'))
1383 raise error.Abort(_('patch is damaged or loses information'))
1384 msg = _('applied to working directory')
1384 msg = _('applied to working directory')
1385 if n:
1385 if n:
1386 # i18n: refers to a short changeset id
1386 # i18n: refers to a short changeset id
1387 msg = _('created %s') % short(n)
1387 msg = _('created %s') % short(n)
1388 return (msg, n, rejects)
1388 return (msg, n, rejects)
1389 finally:
1389 finally:
1390 os.unlink(tmpname)
1390 os.unlink(tmpname)
1391
1391
1392 # facility to let extensions include additional data in an exported patch
1392 # facility to let extensions include additional data in an exported patch
1393 # list of identifiers to be executed in order
1393 # list of identifiers to be executed in order
1394 extraexport = []
1394 extraexport = []
1395 # mapping from identifier to actual export function
1395 # mapping from identifier to actual export function
1396 # function as to return a string to be added to the header or None
1396 # function as to return a string to be added to the header or None
1397 # it is given two arguments (sequencenumber, changectx)
1397 # it is given two arguments (sequencenumber, changectx)
1398 extraexportmap = {}
1398 extraexportmap = {}
1399
1399
1400 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1400 def _exportsingle(repo, ctx, match, switch_parent, rev, seqno, write, diffopts):
1401 node = scmutil.binnode(ctx)
1401 node = scmutil.binnode(ctx)
1402 parents = [p.node() for p in ctx.parents() if p]
1402 parents = [p.node() for p in ctx.parents() if p]
1403 branch = ctx.branch()
1403 branch = ctx.branch()
1404 if switch_parent:
1404 if switch_parent:
1405 parents.reverse()
1405 parents.reverse()
1406
1406
1407 if parents:
1407 if parents:
1408 prev = parents[0]
1408 prev = parents[0]
1409 else:
1409 else:
1410 prev = nullid
1410 prev = nullid
1411
1411
1412 write("# HG changeset patch\n")
1412 write("# HG changeset patch\n")
1413 write("# User %s\n" % ctx.user())
1413 write("# User %s\n" % ctx.user())
1414 write("# Date %d %d\n" % ctx.date())
1414 write("# Date %d %d\n" % ctx.date())
1415 write("# %s\n" % util.datestr(ctx.date()))
1415 write("# %s\n" % util.datestr(ctx.date()))
1416 if branch and branch != 'default':
1416 if branch and branch != 'default':
1417 write("# Branch %s\n" % branch)
1417 write("# Branch %s\n" % branch)
1418 write("# Node ID %s\n" % hex(node))
1418 write("# Node ID %s\n" % hex(node))
1419 write("# Parent %s\n" % hex(prev))
1419 write("# Parent %s\n" % hex(prev))
1420 if len(parents) > 1:
1420 if len(parents) > 1:
1421 write("# Parent %s\n" % hex(parents[1]))
1421 write("# Parent %s\n" % hex(parents[1]))
1422
1422
1423 for headerid in extraexport:
1423 for headerid in extraexport:
1424 header = extraexportmap[headerid](seqno, ctx)
1424 header = extraexportmap[headerid](seqno, ctx)
1425 if header is not None:
1425 if header is not None:
1426 write('# %s\n' % header)
1426 write('# %s\n' % header)
1427 write(ctx.description().rstrip())
1427 write(ctx.description().rstrip())
1428 write("\n\n")
1428 write("\n\n")
1429
1429
1430 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1430 for chunk, label in patch.diffui(repo, prev, node, match, opts=diffopts):
1431 write(chunk, label=label)
1431 write(chunk, label=label)
1432
1432
1433 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1433 def export(repo, revs, fntemplate='hg-%h.patch', fp=None, switch_parent=False,
1434 opts=None, match=None):
1434 opts=None, match=None):
1435 '''export changesets as hg patches
1435 '''export changesets as hg patches
1436
1436
1437 Args:
1437 Args:
1438 repo: The repository from which we're exporting revisions.
1438 repo: The repository from which we're exporting revisions.
1439 revs: A list of revisions to export as revision numbers.
1439 revs: A list of revisions to export as revision numbers.
1440 fntemplate: An optional string to use for generating patch file names.
1440 fntemplate: An optional string to use for generating patch file names.
1441 fp: An optional file-like object to which patches should be written.
1441 fp: An optional file-like object to which patches should be written.
1442 switch_parent: If True, show diffs against second parent when not nullid.
1442 switch_parent: If True, show diffs against second parent when not nullid.
1443 Default is false, which always shows diff against p1.
1443 Default is false, which always shows diff against p1.
1444 opts: diff options to use for generating the patch.
1444 opts: diff options to use for generating the patch.
1445 match: If specified, only export changes to files matching this matcher.
1445 match: If specified, only export changes to files matching this matcher.
1446
1446
1447 Returns:
1447 Returns:
1448 Nothing.
1448 Nothing.
1449
1449
1450 Side Effect:
1450 Side Effect:
1451 "HG Changeset Patch" data is emitted to one of the following
1451 "HG Changeset Patch" data is emitted to one of the following
1452 destinations:
1452 destinations:
1453 fp is specified: All revs are written to the specified
1453 fp is specified: All revs are written to the specified
1454 file-like object.
1454 file-like object.
1455 fntemplate specified: Each rev is written to a unique file named using
1455 fntemplate specified: Each rev is written to a unique file named using
1456 the given template.
1456 the given template.
1457 Neither fp nor template specified: All revs written to repo.ui.write()
1457 Neither fp nor template specified: All revs written to repo.ui.write()
1458 '''
1458 '''
1459
1459
1460 total = len(revs)
1460 total = len(revs)
1461 revwidth = max(len(str(rev)) for rev in revs)
1461 revwidth = max(len(str(rev)) for rev in revs)
1462 filemode = {}
1462 filemode = {}
1463
1463
1464 write = None
1464 write = None
1465 dest = '<unnamed>'
1465 dest = '<unnamed>'
1466 if fp:
1466 if fp:
1467 dest = getattr(fp, 'name', dest)
1467 dest = getattr(fp, 'name', dest)
1468 def write(s, **kw):
1468 def write(s, **kw):
1469 fp.write(s)
1469 fp.write(s)
1470 elif not fntemplate:
1470 elif not fntemplate:
1471 write = repo.ui.write
1471 write = repo.ui.write
1472
1472
1473 for seqno, rev in enumerate(revs, 1):
1473 for seqno, rev in enumerate(revs, 1):
1474 ctx = repo[rev]
1474 ctx = repo[rev]
1475 fo = None
1475 fo = None
1476 if not fp and fntemplate:
1476 if not fp and fntemplate:
1477 desc_lines = ctx.description().rstrip().split('\n')
1477 desc_lines = ctx.description().rstrip().split('\n')
1478 desc = desc_lines[0] #Commit always has a first line.
1478 desc = desc_lines[0] #Commit always has a first line.
1479 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1479 fo = makefileobj(repo, fntemplate, ctx.node(), desc=desc,
1480 total=total, seqno=seqno, revwidth=revwidth,
1480 total=total, seqno=seqno, revwidth=revwidth,
1481 mode='wb', modemap=filemode)
1481 mode='wb', modemap=filemode)
1482 dest = fo.name
1482 dest = fo.name
1483 def write(s, **kw):
1483 def write(s, **kw):
1484 fo.write(s)
1484 fo.write(s)
1485 if not dest.startswith('<'):
1485 if not dest.startswith('<'):
1486 repo.ui.note("%s\n" % dest)
1486 repo.ui.note("%s\n" % dest)
1487 _exportsingle(
1487 _exportsingle(
1488 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1488 repo, ctx, match, switch_parent, rev, seqno, write, opts)
1489 if fo is not None:
1489 if fo is not None:
1490 fo.close()
1490 fo.close()
1491
1491
1492 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1492 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1493 changes=None, stat=False, fp=None, prefix='',
1493 changes=None, stat=False, fp=None, prefix='',
1494 root='', listsubrepos=False, hunksfilterfn=None):
1494 root='', listsubrepos=False, hunksfilterfn=None):
1495 '''show diff or diffstat.'''
1495 '''show diff or diffstat.'''
1496 if fp is None:
1496 if fp is None:
1497 write = ui.write
1497 write = ui.write
1498 else:
1498 else:
1499 def write(s, **kw):
1499 def write(s, **kw):
1500 fp.write(s)
1500 fp.write(s)
1501
1501
1502 if root:
1502 if root:
1503 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1503 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1504 else:
1504 else:
1505 relroot = ''
1505 relroot = ''
1506 if relroot != '':
1506 if relroot != '':
1507 # XXX relative roots currently don't work if the root is within a
1507 # XXX relative roots currently don't work if the root is within a
1508 # subrepo
1508 # subrepo
1509 uirelroot = match.uipath(relroot)
1509 uirelroot = match.uipath(relroot)
1510 relroot += '/'
1510 relroot += '/'
1511 for matchroot in match.files():
1511 for matchroot in match.files():
1512 if not matchroot.startswith(relroot):
1512 if not matchroot.startswith(relroot):
1513 ui.warn(_('warning: %s not inside relative root %s\n') % (
1513 ui.warn(_('warning: %s not inside relative root %s\n') % (
1514 match.uipath(matchroot), uirelroot))
1514 match.uipath(matchroot), uirelroot))
1515
1515
1516 if stat:
1516 if stat:
1517 diffopts = diffopts.copy(context=0)
1517 diffopts = diffopts.copy(context=0)
1518 width = 80
1518 width = 80
1519 if not ui.plain():
1519 if not ui.plain():
1520 width = ui.termwidth()
1520 width = ui.termwidth()
1521 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1521 chunks = patch.diff(repo, node1, node2, match, changes, opts=diffopts,
1522 prefix=prefix, relroot=relroot,
1522 prefix=prefix, relroot=relroot,
1523 hunksfilterfn=hunksfilterfn)
1523 hunksfilterfn=hunksfilterfn)
1524 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1524 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1525 width=width):
1525 width=width):
1526 write(chunk, label=label)
1526 write(chunk, label=label)
1527 else:
1527 else:
1528 for chunk, label in patch.diffui(repo, node1, node2, match,
1528 for chunk, label in patch.diffui(repo, node1, node2, match,
1529 changes, diffopts, prefix=prefix,
1529 changes, opts=diffopts, prefix=prefix,
1530 relroot=relroot,
1530 relroot=relroot,
1531 hunksfilterfn=hunksfilterfn):
1531 hunksfilterfn=hunksfilterfn):
1532 write(chunk, label=label)
1532 write(chunk, label=label)
1533
1533
1534 if listsubrepos:
1534 if listsubrepos:
1535 ctx1 = repo[node1]
1535 ctx1 = repo[node1]
1536 ctx2 = repo[node2]
1536 ctx2 = repo[node2]
1537 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1537 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1538 tempnode2 = node2
1538 tempnode2 = node2
1539 try:
1539 try:
1540 if node2 is not None:
1540 if node2 is not None:
1541 tempnode2 = ctx2.substate[subpath][1]
1541 tempnode2 = ctx2.substate[subpath][1]
1542 except KeyError:
1542 except KeyError:
1543 # A subrepo that existed in node1 was deleted between node1 and
1543 # A subrepo that existed in node1 was deleted between node1 and
1544 # node2 (inclusive). Thus, ctx2's substate won't contain that
1544 # node2 (inclusive). Thus, ctx2's substate won't contain that
1545 # subpath. The best we can do is to ignore it.
1545 # subpath. The best we can do is to ignore it.
1546 tempnode2 = None
1546 tempnode2 = None
1547 submatch = matchmod.subdirmatcher(subpath, match)
1547 submatch = matchmod.subdirmatcher(subpath, match)
1548 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1548 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1549 stat=stat, fp=fp, prefix=prefix)
1549 stat=stat, fp=fp, prefix=prefix)
1550
1550
1551 def _changesetlabels(ctx):
1551 def _changesetlabels(ctx):
1552 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1552 labels = ['log.changeset', 'changeset.%s' % ctx.phasestr()]
1553 if ctx.obsolete():
1553 if ctx.obsolete():
1554 labels.append('changeset.obsolete')
1554 labels.append('changeset.obsolete')
1555 if ctx.isunstable():
1555 if ctx.isunstable():
1556 labels.append('changeset.unstable')
1556 labels.append('changeset.unstable')
1557 for instability in ctx.instabilities():
1557 for instability in ctx.instabilities():
1558 labels.append('instability.%s' % instability)
1558 labels.append('instability.%s' % instability)
1559 return ' '.join(labels)
1559 return ' '.join(labels)
1560
1560
1561 class changeset_printer(object):
1561 class changeset_printer(object):
1562 '''show changeset information when templating not requested.'''
1562 '''show changeset information when templating not requested.'''
1563
1563
1564 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1564 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1565 self.ui = ui
1565 self.ui = ui
1566 self.repo = repo
1566 self.repo = repo
1567 self.buffered = buffered
1567 self.buffered = buffered
1568 self.matchfn = matchfn
1568 self.matchfn = matchfn
1569 self.diffopts = diffopts
1569 self.diffopts = diffopts
1570 self.header = {}
1570 self.header = {}
1571 self.hunk = {}
1571 self.hunk = {}
1572 self.lastheader = None
1572 self.lastheader = None
1573 self.footer = None
1573 self.footer = None
1574 self._columns = templatekw.getlogcolumns()
1574 self._columns = templatekw.getlogcolumns()
1575
1575
1576 def flush(self, ctx):
1576 def flush(self, ctx):
1577 rev = ctx.rev()
1577 rev = ctx.rev()
1578 if rev in self.header:
1578 if rev in self.header:
1579 h = self.header[rev]
1579 h = self.header[rev]
1580 if h != self.lastheader:
1580 if h != self.lastheader:
1581 self.lastheader = h
1581 self.lastheader = h
1582 self.ui.write(h)
1582 self.ui.write(h)
1583 del self.header[rev]
1583 del self.header[rev]
1584 if rev in self.hunk:
1584 if rev in self.hunk:
1585 self.ui.write(self.hunk[rev])
1585 self.ui.write(self.hunk[rev])
1586 del self.hunk[rev]
1586 del self.hunk[rev]
1587 return 1
1587 return 1
1588 return 0
1588 return 0
1589
1589
1590 def close(self):
1590 def close(self):
1591 if self.footer:
1591 if self.footer:
1592 self.ui.write(self.footer)
1592 self.ui.write(self.footer)
1593
1593
1594 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1594 def show(self, ctx, copies=None, matchfn=None, hunksfilterfn=None,
1595 **props):
1595 **props):
1596 props = pycompat.byteskwargs(props)
1596 props = pycompat.byteskwargs(props)
1597 if self.buffered:
1597 if self.buffered:
1598 self.ui.pushbuffer(labeled=True)
1598 self.ui.pushbuffer(labeled=True)
1599 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1599 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1600 self.hunk[ctx.rev()] = self.ui.popbuffer()
1600 self.hunk[ctx.rev()] = self.ui.popbuffer()
1601 else:
1601 else:
1602 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1602 self._show(ctx, copies, matchfn, hunksfilterfn, props)
1603
1603
1604 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1604 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1605 '''show a single changeset or file revision'''
1605 '''show a single changeset or file revision'''
1606 changenode = ctx.node()
1606 changenode = ctx.node()
1607 rev = ctx.rev()
1607 rev = ctx.rev()
1608
1608
1609 if self.ui.quiet:
1609 if self.ui.quiet:
1610 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1610 self.ui.write("%s\n" % scmutil.formatchangeid(ctx),
1611 label='log.node')
1611 label='log.node')
1612 return
1612 return
1613
1613
1614 columns = self._columns
1614 columns = self._columns
1615 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
1615 self.ui.write(columns['changeset'] % scmutil.formatchangeid(ctx),
1616 label=_changesetlabels(ctx))
1616 label=_changesetlabels(ctx))
1617
1617
1618 # branches are shown first before any other names due to backwards
1618 # branches are shown first before any other names due to backwards
1619 # compatibility
1619 # compatibility
1620 branch = ctx.branch()
1620 branch = ctx.branch()
1621 # don't show the default branch name
1621 # don't show the default branch name
1622 if branch != 'default':
1622 if branch != 'default':
1623 self.ui.write(columns['branch'] % branch, label='log.branch')
1623 self.ui.write(columns['branch'] % branch, label='log.branch')
1624
1624
1625 for nsname, ns in self.repo.names.iteritems():
1625 for nsname, ns in self.repo.names.iteritems():
1626 # branches has special logic already handled above, so here we just
1626 # branches has special logic already handled above, so here we just
1627 # skip it
1627 # skip it
1628 if nsname == 'branches':
1628 if nsname == 'branches':
1629 continue
1629 continue
1630 # we will use the templatename as the color name since those two
1630 # we will use the templatename as the color name since those two
1631 # should be the same
1631 # should be the same
1632 for name in ns.names(self.repo, changenode):
1632 for name in ns.names(self.repo, changenode):
1633 self.ui.write(ns.logfmt % name,
1633 self.ui.write(ns.logfmt % name,
1634 label='log.%s' % ns.colorname)
1634 label='log.%s' % ns.colorname)
1635 if self.ui.debugflag:
1635 if self.ui.debugflag:
1636 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
1636 self.ui.write(columns['phase'] % ctx.phasestr(), label='log.phase')
1637 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1637 for pctx in scmutil.meaningfulparents(self.repo, ctx):
1638 label = 'log.parent changeset.%s' % pctx.phasestr()
1638 label = 'log.parent changeset.%s' % pctx.phasestr()
1639 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
1639 self.ui.write(columns['parent'] % scmutil.formatchangeid(pctx),
1640 label=label)
1640 label=label)
1641
1641
1642 if self.ui.debugflag and rev is not None:
1642 if self.ui.debugflag and rev is not None:
1643 mnode = ctx.manifestnode()
1643 mnode = ctx.manifestnode()
1644 mrev = self.repo.manifestlog._revlog.rev(mnode)
1644 mrev = self.repo.manifestlog._revlog.rev(mnode)
1645 self.ui.write(columns['manifest']
1645 self.ui.write(columns['manifest']
1646 % scmutil.formatrevnode(self.ui, mrev, mnode),
1646 % scmutil.formatrevnode(self.ui, mrev, mnode),
1647 label='ui.debug log.manifest')
1647 label='ui.debug log.manifest')
1648 self.ui.write(columns['user'] % ctx.user(), label='log.user')
1648 self.ui.write(columns['user'] % ctx.user(), label='log.user')
1649 self.ui.write(columns['date'] % util.datestr(ctx.date()),
1649 self.ui.write(columns['date'] % util.datestr(ctx.date()),
1650 label='log.date')
1650 label='log.date')
1651
1651
1652 if ctx.isunstable():
1652 if ctx.isunstable():
1653 instabilities = ctx.instabilities()
1653 instabilities = ctx.instabilities()
1654 self.ui.write(columns['instability'] % ', '.join(instabilities),
1654 self.ui.write(columns['instability'] % ', '.join(instabilities),
1655 label='log.instability')
1655 label='log.instability')
1656
1656
1657 elif ctx.obsolete():
1657 elif ctx.obsolete():
1658 self._showobsfate(ctx)
1658 self._showobsfate(ctx)
1659
1659
1660 self._exthook(ctx)
1660 self._exthook(ctx)
1661
1661
1662 if self.ui.debugflag:
1662 if self.ui.debugflag:
1663 files = ctx.p1().status(ctx)[:3]
1663 files = ctx.p1().status(ctx)[:3]
1664 for key, value in zip(['files', 'files+', 'files-'], files):
1664 for key, value in zip(['files', 'files+', 'files-'], files):
1665 if value:
1665 if value:
1666 self.ui.write(columns[key] % " ".join(value),
1666 self.ui.write(columns[key] % " ".join(value),
1667 label='ui.debug log.files')
1667 label='ui.debug log.files')
1668 elif ctx.files() and self.ui.verbose:
1668 elif ctx.files() and self.ui.verbose:
1669 self.ui.write(columns['files'] % " ".join(ctx.files()),
1669 self.ui.write(columns['files'] % " ".join(ctx.files()),
1670 label='ui.note log.files')
1670 label='ui.note log.files')
1671 if copies and self.ui.verbose:
1671 if copies and self.ui.verbose:
1672 copies = ['%s (%s)' % c for c in copies]
1672 copies = ['%s (%s)' % c for c in copies]
1673 self.ui.write(columns['copies'] % ' '.join(copies),
1673 self.ui.write(columns['copies'] % ' '.join(copies),
1674 label='ui.note log.copies')
1674 label='ui.note log.copies')
1675
1675
1676 extra = ctx.extra()
1676 extra = ctx.extra()
1677 if extra and self.ui.debugflag:
1677 if extra and self.ui.debugflag:
1678 for key, value in sorted(extra.items()):
1678 for key, value in sorted(extra.items()):
1679 self.ui.write(columns['extra'] % (key, util.escapestr(value)),
1679 self.ui.write(columns['extra'] % (key, util.escapestr(value)),
1680 label='ui.debug log.extra')
1680 label='ui.debug log.extra')
1681
1681
1682 description = ctx.description().strip()
1682 description = ctx.description().strip()
1683 if description:
1683 if description:
1684 if self.ui.verbose:
1684 if self.ui.verbose:
1685 self.ui.write(_("description:\n"),
1685 self.ui.write(_("description:\n"),
1686 label='ui.note log.description')
1686 label='ui.note log.description')
1687 self.ui.write(description,
1687 self.ui.write(description,
1688 label='ui.note log.description')
1688 label='ui.note log.description')
1689 self.ui.write("\n\n")
1689 self.ui.write("\n\n")
1690 else:
1690 else:
1691 self.ui.write(columns['summary'] % description.splitlines()[0],
1691 self.ui.write(columns['summary'] % description.splitlines()[0],
1692 label='log.summary')
1692 label='log.summary')
1693 self.ui.write("\n")
1693 self.ui.write("\n")
1694
1694
1695 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1695 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1696
1696
1697 def _showobsfate(self, ctx):
1697 def _showobsfate(self, ctx):
1698 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1698 obsfate = templatekw.showobsfate(repo=self.repo, ctx=ctx, ui=self.ui)
1699
1699
1700 if obsfate:
1700 if obsfate:
1701 for obsfateline in obsfate:
1701 for obsfateline in obsfate:
1702 self.ui.write(self._columns['obsolete'] % obsfateline,
1702 self.ui.write(self._columns['obsolete'] % obsfateline,
1703 label='log.obsfate')
1703 label='log.obsfate')
1704
1704
1705 def _exthook(self, ctx):
1705 def _exthook(self, ctx):
1706 '''empty method used by extension as a hook point
1706 '''empty method used by extension as a hook point
1707 '''
1707 '''
1708
1708
1709 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1709 def showpatch(self, ctx, matchfn, hunksfilterfn=None):
1710 if not matchfn:
1710 if not matchfn:
1711 matchfn = self.matchfn
1711 matchfn = self.matchfn
1712 if matchfn:
1712 if matchfn:
1713 stat = self.diffopts.get('stat')
1713 stat = self.diffopts.get('stat')
1714 diff = self.diffopts.get('patch')
1714 diff = self.diffopts.get('patch')
1715 diffopts = patch.diffallopts(self.ui, self.diffopts)
1715 diffopts = patch.diffallopts(self.ui, self.diffopts)
1716 node = ctx.node()
1716 node = ctx.node()
1717 prev = ctx.p1().node()
1717 prev = ctx.p1().node()
1718 if stat:
1718 if stat:
1719 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1719 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1720 match=matchfn, stat=True,
1720 match=matchfn, stat=True,
1721 hunksfilterfn=hunksfilterfn)
1721 hunksfilterfn=hunksfilterfn)
1722 if diff:
1722 if diff:
1723 if stat:
1723 if stat:
1724 self.ui.write("\n")
1724 self.ui.write("\n")
1725 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1725 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1726 match=matchfn, stat=False,
1726 match=matchfn, stat=False,
1727 hunksfilterfn=hunksfilterfn)
1727 hunksfilterfn=hunksfilterfn)
1728 self.ui.write("\n")
1728 self.ui.write("\n")
1729
1729
1730 class jsonchangeset(changeset_printer):
1730 class jsonchangeset(changeset_printer):
1731 '''format changeset information.'''
1731 '''format changeset information.'''
1732
1732
1733 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1733 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1734 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1734 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1735 self.cache = {}
1735 self.cache = {}
1736 self._first = True
1736 self._first = True
1737
1737
1738 def close(self):
1738 def close(self):
1739 if not self._first:
1739 if not self._first:
1740 self.ui.write("\n]\n")
1740 self.ui.write("\n]\n")
1741 else:
1741 else:
1742 self.ui.write("[]\n")
1742 self.ui.write("[]\n")
1743
1743
1744 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1744 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1745 '''show a single changeset or file revision'''
1745 '''show a single changeset or file revision'''
1746 rev = ctx.rev()
1746 rev = ctx.rev()
1747 if rev is None:
1747 if rev is None:
1748 jrev = jnode = 'null'
1748 jrev = jnode = 'null'
1749 else:
1749 else:
1750 jrev = '%d' % rev
1750 jrev = '%d' % rev
1751 jnode = '"%s"' % hex(ctx.node())
1751 jnode = '"%s"' % hex(ctx.node())
1752 j = encoding.jsonescape
1752 j = encoding.jsonescape
1753
1753
1754 if self._first:
1754 if self._first:
1755 self.ui.write("[\n {")
1755 self.ui.write("[\n {")
1756 self._first = False
1756 self._first = False
1757 else:
1757 else:
1758 self.ui.write(",\n {")
1758 self.ui.write(",\n {")
1759
1759
1760 if self.ui.quiet:
1760 if self.ui.quiet:
1761 self.ui.write(('\n "rev": %s') % jrev)
1761 self.ui.write(('\n "rev": %s') % jrev)
1762 self.ui.write((',\n "node": %s') % jnode)
1762 self.ui.write((',\n "node": %s') % jnode)
1763 self.ui.write('\n }')
1763 self.ui.write('\n }')
1764 return
1764 return
1765
1765
1766 self.ui.write(('\n "rev": %s') % jrev)
1766 self.ui.write(('\n "rev": %s') % jrev)
1767 self.ui.write((',\n "node": %s') % jnode)
1767 self.ui.write((',\n "node": %s') % jnode)
1768 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1768 self.ui.write((',\n "branch": "%s"') % j(ctx.branch()))
1769 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1769 self.ui.write((',\n "phase": "%s"') % ctx.phasestr())
1770 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1770 self.ui.write((',\n "user": "%s"') % j(ctx.user()))
1771 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1771 self.ui.write((',\n "date": [%d, %d]') % ctx.date())
1772 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1772 self.ui.write((',\n "desc": "%s"') % j(ctx.description()))
1773
1773
1774 self.ui.write((',\n "bookmarks": [%s]') %
1774 self.ui.write((',\n "bookmarks": [%s]') %
1775 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1775 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1776 self.ui.write((',\n "tags": [%s]') %
1776 self.ui.write((',\n "tags": [%s]') %
1777 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1777 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1778 self.ui.write((',\n "parents": [%s]') %
1778 self.ui.write((',\n "parents": [%s]') %
1779 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1779 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1780
1780
1781 if self.ui.debugflag:
1781 if self.ui.debugflag:
1782 if rev is None:
1782 if rev is None:
1783 jmanifestnode = 'null'
1783 jmanifestnode = 'null'
1784 else:
1784 else:
1785 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1785 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1786 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1786 self.ui.write((',\n "manifest": %s') % jmanifestnode)
1787
1787
1788 self.ui.write((',\n "extra": {%s}') %
1788 self.ui.write((',\n "extra": {%s}') %
1789 ", ".join('"%s": "%s"' % (j(k), j(v))
1789 ", ".join('"%s": "%s"' % (j(k), j(v))
1790 for k, v in ctx.extra().items()))
1790 for k, v in ctx.extra().items()))
1791
1791
1792 files = ctx.p1().status(ctx)
1792 files = ctx.p1().status(ctx)
1793 self.ui.write((',\n "modified": [%s]') %
1793 self.ui.write((',\n "modified": [%s]') %
1794 ", ".join('"%s"' % j(f) for f in files[0]))
1794 ", ".join('"%s"' % j(f) for f in files[0]))
1795 self.ui.write((',\n "added": [%s]') %
1795 self.ui.write((',\n "added": [%s]') %
1796 ", ".join('"%s"' % j(f) for f in files[1]))
1796 ", ".join('"%s"' % j(f) for f in files[1]))
1797 self.ui.write((',\n "removed": [%s]') %
1797 self.ui.write((',\n "removed": [%s]') %
1798 ", ".join('"%s"' % j(f) for f in files[2]))
1798 ", ".join('"%s"' % j(f) for f in files[2]))
1799
1799
1800 elif self.ui.verbose:
1800 elif self.ui.verbose:
1801 self.ui.write((',\n "files": [%s]') %
1801 self.ui.write((',\n "files": [%s]') %
1802 ", ".join('"%s"' % j(f) for f in ctx.files()))
1802 ", ".join('"%s"' % j(f) for f in ctx.files()))
1803
1803
1804 if copies:
1804 if copies:
1805 self.ui.write((',\n "copies": {%s}') %
1805 self.ui.write((',\n "copies": {%s}') %
1806 ", ".join('"%s": "%s"' % (j(k), j(v))
1806 ", ".join('"%s": "%s"' % (j(k), j(v))
1807 for k, v in copies))
1807 for k, v in copies))
1808
1808
1809 matchfn = self.matchfn
1809 matchfn = self.matchfn
1810 if matchfn:
1810 if matchfn:
1811 stat = self.diffopts.get('stat')
1811 stat = self.diffopts.get('stat')
1812 diff = self.diffopts.get('patch')
1812 diff = self.diffopts.get('patch')
1813 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1813 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1814 node, prev = ctx.node(), ctx.p1().node()
1814 node, prev = ctx.node(), ctx.p1().node()
1815 if stat:
1815 if stat:
1816 self.ui.pushbuffer()
1816 self.ui.pushbuffer()
1817 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1817 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1818 match=matchfn, stat=True)
1818 match=matchfn, stat=True)
1819 self.ui.write((',\n "diffstat": "%s"')
1819 self.ui.write((',\n "diffstat": "%s"')
1820 % j(self.ui.popbuffer()))
1820 % j(self.ui.popbuffer()))
1821 if diff:
1821 if diff:
1822 self.ui.pushbuffer()
1822 self.ui.pushbuffer()
1823 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1823 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1824 match=matchfn, stat=False)
1824 match=matchfn, stat=False)
1825 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1825 self.ui.write((',\n "diff": "%s"') % j(self.ui.popbuffer()))
1826
1826
1827 self.ui.write("\n }")
1827 self.ui.write("\n }")
1828
1828
1829 class changeset_templater(changeset_printer):
1829 class changeset_templater(changeset_printer):
1830 '''format changeset information.
1830 '''format changeset information.
1831
1831
1832 Note: there are a variety of convenience functions to build a
1832 Note: there are a variety of convenience functions to build a
1833 changeset_templater for common cases. See functions such as:
1833 changeset_templater for common cases. See functions such as:
1834 makelogtemplater, show_changeset, buildcommittemplate, or other
1834 makelogtemplater, show_changeset, buildcommittemplate, or other
1835 functions that use changesest_templater.
1835 functions that use changesest_templater.
1836 '''
1836 '''
1837
1837
1838 # Arguments before "buffered" used to be positional. Consider not
1838 # Arguments before "buffered" used to be positional. Consider not
1839 # adding/removing arguments before "buffered" to not break callers.
1839 # adding/removing arguments before "buffered" to not break callers.
1840 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1840 def __init__(self, ui, repo, tmplspec, matchfn=None, diffopts=None,
1841 buffered=False):
1841 buffered=False):
1842 diffopts = diffopts or {}
1842 diffopts = diffopts or {}
1843
1843
1844 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1844 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1845 self.t = formatter.loadtemplater(ui, tmplspec,
1845 self.t = formatter.loadtemplater(ui, tmplspec,
1846 cache=templatekw.defaulttempl)
1846 cache=templatekw.defaulttempl)
1847 self._counter = itertools.count()
1847 self._counter = itertools.count()
1848 self.cache = {}
1848 self.cache = {}
1849
1849
1850 self._tref = tmplspec.ref
1850 self._tref = tmplspec.ref
1851 self._parts = {'header': '', 'footer': '',
1851 self._parts = {'header': '', 'footer': '',
1852 tmplspec.ref: tmplspec.ref,
1852 tmplspec.ref: tmplspec.ref,
1853 'docheader': '', 'docfooter': '',
1853 'docheader': '', 'docfooter': '',
1854 'separator': ''}
1854 'separator': ''}
1855 if tmplspec.mapfile:
1855 if tmplspec.mapfile:
1856 # find correct templates for current mode, for backward
1856 # find correct templates for current mode, for backward
1857 # compatibility with 'log -v/-q/--debug' using a mapfile
1857 # compatibility with 'log -v/-q/--debug' using a mapfile
1858 tmplmodes = [
1858 tmplmodes = [
1859 (True, ''),
1859 (True, ''),
1860 (self.ui.verbose, '_verbose'),
1860 (self.ui.verbose, '_verbose'),
1861 (self.ui.quiet, '_quiet'),
1861 (self.ui.quiet, '_quiet'),
1862 (self.ui.debugflag, '_debug'),
1862 (self.ui.debugflag, '_debug'),
1863 ]
1863 ]
1864 for mode, postfix in tmplmodes:
1864 for mode, postfix in tmplmodes:
1865 for t in self._parts:
1865 for t in self._parts:
1866 cur = t + postfix
1866 cur = t + postfix
1867 if mode and cur in self.t:
1867 if mode and cur in self.t:
1868 self._parts[t] = cur
1868 self._parts[t] = cur
1869 else:
1869 else:
1870 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1870 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
1871 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1871 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
1872 self._parts.update(m)
1872 self._parts.update(m)
1873
1873
1874 if self._parts['docheader']:
1874 if self._parts['docheader']:
1875 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1875 self.ui.write(templater.stringify(self.t(self._parts['docheader'])))
1876
1876
1877 def close(self):
1877 def close(self):
1878 if self._parts['docfooter']:
1878 if self._parts['docfooter']:
1879 if not self.footer:
1879 if not self.footer:
1880 self.footer = ""
1880 self.footer = ""
1881 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1881 self.footer += templater.stringify(self.t(self._parts['docfooter']))
1882 return super(changeset_templater, self).close()
1882 return super(changeset_templater, self).close()
1883
1883
1884 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1884 def _show(self, ctx, copies, matchfn, hunksfilterfn, props):
1885 '''show a single changeset or file revision'''
1885 '''show a single changeset or file revision'''
1886 props = props.copy()
1886 props = props.copy()
1887 props.update(templatekw.keywords)
1887 props.update(templatekw.keywords)
1888 props['templ'] = self.t
1888 props['templ'] = self.t
1889 props['ctx'] = ctx
1889 props['ctx'] = ctx
1890 props['repo'] = self.repo
1890 props['repo'] = self.repo
1891 props['ui'] = self.repo.ui
1891 props['ui'] = self.repo.ui
1892 props['index'] = index = next(self._counter)
1892 props['index'] = index = next(self._counter)
1893 props['revcache'] = {'copies': copies}
1893 props['revcache'] = {'copies': copies}
1894 props['cache'] = self.cache
1894 props['cache'] = self.cache
1895 props = pycompat.strkwargs(props)
1895 props = pycompat.strkwargs(props)
1896
1896
1897 # write separator, which wouldn't work well with the header part below
1897 # write separator, which wouldn't work well with the header part below
1898 # since there's inherently a conflict between header (across items) and
1898 # since there's inherently a conflict between header (across items) and
1899 # separator (per item)
1899 # separator (per item)
1900 if self._parts['separator'] and index > 0:
1900 if self._parts['separator'] and index > 0:
1901 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1901 self.ui.write(templater.stringify(self.t(self._parts['separator'])))
1902
1902
1903 # write header
1903 # write header
1904 if self._parts['header']:
1904 if self._parts['header']:
1905 h = templater.stringify(self.t(self._parts['header'], **props))
1905 h = templater.stringify(self.t(self._parts['header'], **props))
1906 if self.buffered:
1906 if self.buffered:
1907 self.header[ctx.rev()] = h
1907 self.header[ctx.rev()] = h
1908 else:
1908 else:
1909 if self.lastheader != h:
1909 if self.lastheader != h:
1910 self.lastheader = h
1910 self.lastheader = h
1911 self.ui.write(h)
1911 self.ui.write(h)
1912
1912
1913 # write changeset metadata, then patch if requested
1913 # write changeset metadata, then patch if requested
1914 key = self._parts[self._tref]
1914 key = self._parts[self._tref]
1915 self.ui.write(templater.stringify(self.t(key, **props)))
1915 self.ui.write(templater.stringify(self.t(key, **props)))
1916 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1916 self.showpatch(ctx, matchfn, hunksfilterfn=hunksfilterfn)
1917
1917
1918 if self._parts['footer']:
1918 if self._parts['footer']:
1919 if not self.footer:
1919 if not self.footer:
1920 self.footer = templater.stringify(
1920 self.footer = templater.stringify(
1921 self.t(self._parts['footer'], **props))
1921 self.t(self._parts['footer'], **props))
1922
1922
1923 def logtemplatespec(tmpl, mapfile):
1923 def logtemplatespec(tmpl, mapfile):
1924 if mapfile:
1924 if mapfile:
1925 return formatter.templatespec('changeset', tmpl, mapfile)
1925 return formatter.templatespec('changeset', tmpl, mapfile)
1926 else:
1926 else:
1927 return formatter.templatespec('', tmpl, None)
1927 return formatter.templatespec('', tmpl, None)
1928
1928
1929 def _lookuplogtemplate(ui, tmpl, style):
1929 def _lookuplogtemplate(ui, tmpl, style):
1930 """Find the template matching the given template spec or style
1930 """Find the template matching the given template spec or style
1931
1931
1932 See formatter.lookuptemplate() for details.
1932 See formatter.lookuptemplate() for details.
1933 """
1933 """
1934
1934
1935 # ui settings
1935 # ui settings
1936 if not tmpl and not style: # template are stronger than style
1936 if not tmpl and not style: # template are stronger than style
1937 tmpl = ui.config('ui', 'logtemplate')
1937 tmpl = ui.config('ui', 'logtemplate')
1938 if tmpl:
1938 if tmpl:
1939 return logtemplatespec(templater.unquotestring(tmpl), None)
1939 return logtemplatespec(templater.unquotestring(tmpl), None)
1940 else:
1940 else:
1941 style = util.expandpath(ui.config('ui', 'style'))
1941 style = util.expandpath(ui.config('ui', 'style'))
1942
1942
1943 if not tmpl and style:
1943 if not tmpl and style:
1944 mapfile = style
1944 mapfile = style
1945 if not os.path.split(mapfile)[0]:
1945 if not os.path.split(mapfile)[0]:
1946 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1946 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1947 or templater.templatepath(mapfile))
1947 or templater.templatepath(mapfile))
1948 if mapname:
1948 if mapname:
1949 mapfile = mapname
1949 mapfile = mapname
1950 return logtemplatespec(None, mapfile)
1950 return logtemplatespec(None, mapfile)
1951
1951
1952 if not tmpl:
1952 if not tmpl:
1953 return logtemplatespec(None, None)
1953 return logtemplatespec(None, None)
1954
1954
1955 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1955 return formatter.lookuptemplate(ui, 'changeset', tmpl)
1956
1956
1957 def makelogtemplater(ui, repo, tmpl, buffered=False):
1957 def makelogtemplater(ui, repo, tmpl, buffered=False):
1958 """Create a changeset_templater from a literal template 'tmpl'
1958 """Create a changeset_templater from a literal template 'tmpl'
1959 byte-string."""
1959 byte-string."""
1960 spec = logtemplatespec(tmpl, None)
1960 spec = logtemplatespec(tmpl, None)
1961 return changeset_templater(ui, repo, spec, buffered=buffered)
1961 return changeset_templater(ui, repo, spec, buffered=buffered)
1962
1962
1963 def show_changeset(ui, repo, opts, buffered=False):
1963 def show_changeset(ui, repo, opts, buffered=False):
1964 """show one changeset using template or regular display.
1964 """show one changeset using template or regular display.
1965
1965
1966 Display format will be the first non-empty hit of:
1966 Display format will be the first non-empty hit of:
1967 1. option 'template'
1967 1. option 'template'
1968 2. option 'style'
1968 2. option 'style'
1969 3. [ui] setting 'logtemplate'
1969 3. [ui] setting 'logtemplate'
1970 4. [ui] setting 'style'
1970 4. [ui] setting 'style'
1971 If all of these values are either the unset or the empty string,
1971 If all of these values are either the unset or the empty string,
1972 regular display via changeset_printer() is done.
1972 regular display via changeset_printer() is done.
1973 """
1973 """
1974 # options
1974 # options
1975 match = None
1975 match = None
1976 if opts.get('patch') or opts.get('stat'):
1976 if opts.get('patch') or opts.get('stat'):
1977 match = scmutil.matchall(repo)
1977 match = scmutil.matchall(repo)
1978
1978
1979 if opts.get('template') == 'json':
1979 if opts.get('template') == 'json':
1980 return jsonchangeset(ui, repo, match, opts, buffered)
1980 return jsonchangeset(ui, repo, match, opts, buffered)
1981
1981
1982 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1982 spec = _lookuplogtemplate(ui, opts.get('template'), opts.get('style'))
1983
1983
1984 if not spec.ref and not spec.tmpl and not spec.mapfile:
1984 if not spec.ref and not spec.tmpl and not spec.mapfile:
1985 return changeset_printer(ui, repo, match, opts, buffered)
1985 return changeset_printer(ui, repo, match, opts, buffered)
1986
1986
1987 return changeset_templater(ui, repo, spec, match, opts, buffered)
1987 return changeset_templater(ui, repo, spec, match, opts, buffered)
1988
1988
1989 def showmarker(fm, marker, index=None):
1989 def showmarker(fm, marker, index=None):
1990 """utility function to display obsolescence marker in a readable way
1990 """utility function to display obsolescence marker in a readable way
1991
1991
1992 To be used by debug function."""
1992 To be used by debug function."""
1993 if index is not None:
1993 if index is not None:
1994 fm.write('index', '%i ', index)
1994 fm.write('index', '%i ', index)
1995 fm.write('prednode', '%s ', hex(marker.prednode()))
1995 fm.write('prednode', '%s ', hex(marker.prednode()))
1996 succs = marker.succnodes()
1996 succs = marker.succnodes()
1997 fm.condwrite(succs, 'succnodes', '%s ',
1997 fm.condwrite(succs, 'succnodes', '%s ',
1998 fm.formatlist(map(hex, succs), name='node'))
1998 fm.formatlist(map(hex, succs), name='node'))
1999 fm.write('flag', '%X ', marker.flags())
1999 fm.write('flag', '%X ', marker.flags())
2000 parents = marker.parentnodes()
2000 parents = marker.parentnodes()
2001 if parents is not None:
2001 if parents is not None:
2002 fm.write('parentnodes', '{%s} ',
2002 fm.write('parentnodes', '{%s} ',
2003 fm.formatlist(map(hex, parents), name='node', sep=', '))
2003 fm.formatlist(map(hex, parents), name='node', sep=', '))
2004 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2004 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
2005 meta = marker.metadata().copy()
2005 meta = marker.metadata().copy()
2006 meta.pop('date', None)
2006 meta.pop('date', None)
2007 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2007 fm.write('metadata', '{%s}', fm.formatdict(meta, fmt='%r: %r', sep=', '))
2008 fm.plain('\n')
2008 fm.plain('\n')
2009
2009
2010 def finddate(ui, repo, date):
2010 def finddate(ui, repo, date):
2011 """Find the tipmost changeset that matches the given date spec"""
2011 """Find the tipmost changeset that matches the given date spec"""
2012
2012
2013 df = util.matchdate(date)
2013 df = util.matchdate(date)
2014 m = scmutil.matchall(repo)
2014 m = scmutil.matchall(repo)
2015 results = {}
2015 results = {}
2016
2016
2017 def prep(ctx, fns):
2017 def prep(ctx, fns):
2018 d = ctx.date()
2018 d = ctx.date()
2019 if df(d[0]):
2019 if df(d[0]):
2020 results[ctx.rev()] = d
2020 results[ctx.rev()] = d
2021
2021
2022 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2022 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
2023 rev = ctx.rev()
2023 rev = ctx.rev()
2024 if rev in results:
2024 if rev in results:
2025 ui.status(_("found revision %s from %s\n") %
2025 ui.status(_("found revision %s from %s\n") %
2026 (rev, util.datestr(results[rev])))
2026 (rev, util.datestr(results[rev])))
2027 return '%d' % rev
2027 return '%d' % rev
2028
2028
2029 raise error.Abort(_("revision matching date not found"))
2029 raise error.Abort(_("revision matching date not found"))
2030
2030
2031 def increasingwindows(windowsize=8, sizelimit=512):
2031 def increasingwindows(windowsize=8, sizelimit=512):
2032 while True:
2032 while True:
2033 yield windowsize
2033 yield windowsize
2034 if windowsize < sizelimit:
2034 if windowsize < sizelimit:
2035 windowsize *= 2
2035 windowsize *= 2
2036
2036
2037 class FileWalkError(Exception):
2037 class FileWalkError(Exception):
2038 pass
2038 pass
2039
2039
2040 def walkfilerevs(repo, match, follow, revs, fncache):
2040 def walkfilerevs(repo, match, follow, revs, fncache):
2041 '''Walks the file history for the matched files.
2041 '''Walks the file history for the matched files.
2042
2042
2043 Returns the changeset revs that are involved in the file history.
2043 Returns the changeset revs that are involved in the file history.
2044
2044
2045 Throws FileWalkError if the file history can't be walked using
2045 Throws FileWalkError if the file history can't be walked using
2046 filelogs alone.
2046 filelogs alone.
2047 '''
2047 '''
2048 wanted = set()
2048 wanted = set()
2049 copies = []
2049 copies = []
2050 minrev, maxrev = min(revs), max(revs)
2050 minrev, maxrev = min(revs), max(revs)
2051 def filerevgen(filelog, last):
2051 def filerevgen(filelog, last):
2052 """
2052 """
2053 Only files, no patterns. Check the history of each file.
2053 Only files, no patterns. Check the history of each file.
2054
2054
2055 Examines filelog entries within minrev, maxrev linkrev range
2055 Examines filelog entries within minrev, maxrev linkrev range
2056 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2056 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2057 tuples in backwards order
2057 tuples in backwards order
2058 """
2058 """
2059 cl_count = len(repo)
2059 cl_count = len(repo)
2060 revs = []
2060 revs = []
2061 for j in xrange(0, last + 1):
2061 for j in xrange(0, last + 1):
2062 linkrev = filelog.linkrev(j)
2062 linkrev = filelog.linkrev(j)
2063 if linkrev < minrev:
2063 if linkrev < minrev:
2064 continue
2064 continue
2065 # only yield rev for which we have the changelog, it can
2065 # only yield rev for which we have the changelog, it can
2066 # happen while doing "hg log" during a pull or commit
2066 # happen while doing "hg log" during a pull or commit
2067 if linkrev >= cl_count:
2067 if linkrev >= cl_count:
2068 break
2068 break
2069
2069
2070 parentlinkrevs = []
2070 parentlinkrevs = []
2071 for p in filelog.parentrevs(j):
2071 for p in filelog.parentrevs(j):
2072 if p != nullrev:
2072 if p != nullrev:
2073 parentlinkrevs.append(filelog.linkrev(p))
2073 parentlinkrevs.append(filelog.linkrev(p))
2074 n = filelog.node(j)
2074 n = filelog.node(j)
2075 revs.append((linkrev, parentlinkrevs,
2075 revs.append((linkrev, parentlinkrevs,
2076 follow and filelog.renamed(n)))
2076 follow and filelog.renamed(n)))
2077
2077
2078 return reversed(revs)
2078 return reversed(revs)
2079 def iterfiles():
2079 def iterfiles():
2080 pctx = repo['.']
2080 pctx = repo['.']
2081 for filename in match.files():
2081 for filename in match.files():
2082 if follow:
2082 if follow:
2083 if filename not in pctx:
2083 if filename not in pctx:
2084 raise error.Abort(_('cannot follow file not in parent '
2084 raise error.Abort(_('cannot follow file not in parent '
2085 'revision: "%s"') % filename)
2085 'revision: "%s"') % filename)
2086 yield filename, pctx[filename].filenode()
2086 yield filename, pctx[filename].filenode()
2087 else:
2087 else:
2088 yield filename, None
2088 yield filename, None
2089 for filename_node in copies:
2089 for filename_node in copies:
2090 yield filename_node
2090 yield filename_node
2091
2091
2092 for file_, node in iterfiles():
2092 for file_, node in iterfiles():
2093 filelog = repo.file(file_)
2093 filelog = repo.file(file_)
2094 if not len(filelog):
2094 if not len(filelog):
2095 if node is None:
2095 if node is None:
2096 # A zero count may be a directory or deleted file, so
2096 # A zero count may be a directory or deleted file, so
2097 # try to find matching entries on the slow path.
2097 # try to find matching entries on the slow path.
2098 if follow:
2098 if follow:
2099 raise error.Abort(
2099 raise error.Abort(
2100 _('cannot follow nonexistent file: "%s"') % file_)
2100 _('cannot follow nonexistent file: "%s"') % file_)
2101 raise FileWalkError("Cannot walk via filelog")
2101 raise FileWalkError("Cannot walk via filelog")
2102 else:
2102 else:
2103 continue
2103 continue
2104
2104
2105 if node is None:
2105 if node is None:
2106 last = len(filelog) - 1
2106 last = len(filelog) - 1
2107 else:
2107 else:
2108 last = filelog.rev(node)
2108 last = filelog.rev(node)
2109
2109
2110 # keep track of all ancestors of the file
2110 # keep track of all ancestors of the file
2111 ancestors = {filelog.linkrev(last)}
2111 ancestors = {filelog.linkrev(last)}
2112
2112
2113 # iterate from latest to oldest revision
2113 # iterate from latest to oldest revision
2114 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2114 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
2115 if not follow:
2115 if not follow:
2116 if rev > maxrev:
2116 if rev > maxrev:
2117 continue
2117 continue
2118 else:
2118 else:
2119 # Note that last might not be the first interesting
2119 # Note that last might not be the first interesting
2120 # rev to us:
2120 # rev to us:
2121 # if the file has been changed after maxrev, we'll
2121 # if the file has been changed after maxrev, we'll
2122 # have linkrev(last) > maxrev, and we still need
2122 # have linkrev(last) > maxrev, and we still need
2123 # to explore the file graph
2123 # to explore the file graph
2124 if rev not in ancestors:
2124 if rev not in ancestors:
2125 continue
2125 continue
2126 # XXX insert 1327 fix here
2126 # XXX insert 1327 fix here
2127 if flparentlinkrevs:
2127 if flparentlinkrevs:
2128 ancestors.update(flparentlinkrevs)
2128 ancestors.update(flparentlinkrevs)
2129
2129
2130 fncache.setdefault(rev, []).append(file_)
2130 fncache.setdefault(rev, []).append(file_)
2131 wanted.add(rev)
2131 wanted.add(rev)
2132 if copied:
2132 if copied:
2133 copies.append(copied)
2133 copies.append(copied)
2134
2134
2135 return wanted
2135 return wanted
2136
2136
2137 class _followfilter(object):
2137 class _followfilter(object):
2138 def __init__(self, repo, onlyfirst=False):
2138 def __init__(self, repo, onlyfirst=False):
2139 self.repo = repo
2139 self.repo = repo
2140 self.startrev = nullrev
2140 self.startrev = nullrev
2141 self.roots = set()
2141 self.roots = set()
2142 self.onlyfirst = onlyfirst
2142 self.onlyfirst = onlyfirst
2143
2143
2144 def match(self, rev):
2144 def match(self, rev):
2145 def realparents(rev):
2145 def realparents(rev):
2146 if self.onlyfirst:
2146 if self.onlyfirst:
2147 return self.repo.changelog.parentrevs(rev)[0:1]
2147 return self.repo.changelog.parentrevs(rev)[0:1]
2148 else:
2148 else:
2149 return filter(lambda x: x != nullrev,
2149 return filter(lambda x: x != nullrev,
2150 self.repo.changelog.parentrevs(rev))
2150 self.repo.changelog.parentrevs(rev))
2151
2151
2152 if self.startrev == nullrev:
2152 if self.startrev == nullrev:
2153 self.startrev = rev
2153 self.startrev = rev
2154 return True
2154 return True
2155
2155
2156 if rev > self.startrev:
2156 if rev > self.startrev:
2157 # forward: all descendants
2157 # forward: all descendants
2158 if not self.roots:
2158 if not self.roots:
2159 self.roots.add(self.startrev)
2159 self.roots.add(self.startrev)
2160 for parent in realparents(rev):
2160 for parent in realparents(rev):
2161 if parent in self.roots:
2161 if parent in self.roots:
2162 self.roots.add(rev)
2162 self.roots.add(rev)
2163 return True
2163 return True
2164 else:
2164 else:
2165 # backwards: all parents
2165 # backwards: all parents
2166 if not self.roots:
2166 if not self.roots:
2167 self.roots.update(realparents(self.startrev))
2167 self.roots.update(realparents(self.startrev))
2168 if rev in self.roots:
2168 if rev in self.roots:
2169 self.roots.remove(rev)
2169 self.roots.remove(rev)
2170 self.roots.update(realparents(rev))
2170 self.roots.update(realparents(rev))
2171 return True
2171 return True
2172
2172
2173 return False
2173 return False
2174
2174
2175 def walkchangerevs(repo, match, opts, prepare):
2175 def walkchangerevs(repo, match, opts, prepare):
2176 '''Iterate over files and the revs in which they changed.
2176 '''Iterate over files and the revs in which they changed.
2177
2177
2178 Callers most commonly need to iterate backwards over the history
2178 Callers most commonly need to iterate backwards over the history
2179 in which they are interested. Doing so has awful (quadratic-looking)
2179 in which they are interested. Doing so has awful (quadratic-looking)
2180 performance, so we use iterators in a "windowed" way.
2180 performance, so we use iterators in a "windowed" way.
2181
2181
2182 We walk a window of revisions in the desired order. Within the
2182 We walk a window of revisions in the desired order. Within the
2183 window, we first walk forwards to gather data, then in the desired
2183 window, we first walk forwards to gather data, then in the desired
2184 order (usually backwards) to display it.
2184 order (usually backwards) to display it.
2185
2185
2186 This function returns an iterator yielding contexts. Before
2186 This function returns an iterator yielding contexts. Before
2187 yielding each context, the iterator will first call the prepare
2187 yielding each context, the iterator will first call the prepare
2188 function on each context in the window in forward order.'''
2188 function on each context in the window in forward order.'''
2189
2189
2190 follow = opts.get('follow') or opts.get('follow_first')
2190 follow = opts.get('follow') or opts.get('follow_first')
2191 revs = _logrevs(repo, opts)
2191 revs = _logrevs(repo, opts)
2192 if not revs:
2192 if not revs:
2193 return []
2193 return []
2194 wanted = set()
2194 wanted = set()
2195 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2195 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2196 opts.get('removed'))
2196 opts.get('removed'))
2197 fncache = {}
2197 fncache = {}
2198 change = repo.changectx
2198 change = repo.changectx
2199
2199
2200 # First step is to fill wanted, the set of revisions that we want to yield.
2200 # First step is to fill wanted, the set of revisions that we want to yield.
2201 # When it does not induce extra cost, we also fill fncache for revisions in
2201 # When it does not induce extra cost, we also fill fncache for revisions in
2202 # wanted: a cache of filenames that were changed (ctx.files()) and that
2202 # wanted: a cache of filenames that were changed (ctx.files()) and that
2203 # match the file filtering conditions.
2203 # match the file filtering conditions.
2204
2204
2205 if match.always():
2205 if match.always():
2206 # No files, no patterns. Display all revs.
2206 # No files, no patterns. Display all revs.
2207 wanted = revs
2207 wanted = revs
2208 elif not slowpath:
2208 elif not slowpath:
2209 # We only have to read through the filelog to find wanted revisions
2209 # We only have to read through the filelog to find wanted revisions
2210
2210
2211 try:
2211 try:
2212 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2212 wanted = walkfilerevs(repo, match, follow, revs, fncache)
2213 except FileWalkError:
2213 except FileWalkError:
2214 slowpath = True
2214 slowpath = True
2215
2215
2216 # We decided to fall back to the slowpath because at least one
2216 # We decided to fall back to the slowpath because at least one
2217 # of the paths was not a file. Check to see if at least one of them
2217 # of the paths was not a file. Check to see if at least one of them
2218 # existed in history, otherwise simply return
2218 # existed in history, otherwise simply return
2219 for path in match.files():
2219 for path in match.files():
2220 if path == '.' or path in repo.store:
2220 if path == '.' or path in repo.store:
2221 break
2221 break
2222 else:
2222 else:
2223 return []
2223 return []
2224
2224
2225 if slowpath:
2225 if slowpath:
2226 # We have to read the changelog to match filenames against
2226 # We have to read the changelog to match filenames against
2227 # changed files
2227 # changed files
2228
2228
2229 if follow:
2229 if follow:
2230 raise error.Abort(_('can only follow copies/renames for explicit '
2230 raise error.Abort(_('can only follow copies/renames for explicit '
2231 'filenames'))
2231 'filenames'))
2232
2232
2233 # The slow path checks files modified in every changeset.
2233 # The slow path checks files modified in every changeset.
2234 # This is really slow on large repos, so compute the set lazily.
2234 # This is really slow on large repos, so compute the set lazily.
2235 class lazywantedset(object):
2235 class lazywantedset(object):
2236 def __init__(self):
2236 def __init__(self):
2237 self.set = set()
2237 self.set = set()
2238 self.revs = set(revs)
2238 self.revs = set(revs)
2239
2239
2240 # No need to worry about locality here because it will be accessed
2240 # No need to worry about locality here because it will be accessed
2241 # in the same order as the increasing window below.
2241 # in the same order as the increasing window below.
2242 def __contains__(self, value):
2242 def __contains__(self, value):
2243 if value in self.set:
2243 if value in self.set:
2244 return True
2244 return True
2245 elif not value in self.revs:
2245 elif not value in self.revs:
2246 return False
2246 return False
2247 else:
2247 else:
2248 self.revs.discard(value)
2248 self.revs.discard(value)
2249 ctx = change(value)
2249 ctx = change(value)
2250 matches = filter(match, ctx.files())
2250 matches = filter(match, ctx.files())
2251 if matches:
2251 if matches:
2252 fncache[value] = matches
2252 fncache[value] = matches
2253 self.set.add(value)
2253 self.set.add(value)
2254 return True
2254 return True
2255 return False
2255 return False
2256
2256
2257 def discard(self, value):
2257 def discard(self, value):
2258 self.revs.discard(value)
2258 self.revs.discard(value)
2259 self.set.discard(value)
2259 self.set.discard(value)
2260
2260
2261 wanted = lazywantedset()
2261 wanted = lazywantedset()
2262
2262
2263 # it might be worthwhile to do this in the iterator if the rev range
2263 # it might be worthwhile to do this in the iterator if the rev range
2264 # is descending and the prune args are all within that range
2264 # is descending and the prune args are all within that range
2265 for rev in opts.get('prune', ()):
2265 for rev in opts.get('prune', ()):
2266 rev = repo[rev].rev()
2266 rev = repo[rev].rev()
2267 ff = _followfilter(repo)
2267 ff = _followfilter(repo)
2268 stop = min(revs[0], revs[-1])
2268 stop = min(revs[0], revs[-1])
2269 for x in xrange(rev, stop - 1, -1):
2269 for x in xrange(rev, stop - 1, -1):
2270 if ff.match(x):
2270 if ff.match(x):
2271 wanted = wanted - [x]
2271 wanted = wanted - [x]
2272
2272
2273 # Now that wanted is correctly initialized, we can iterate over the
2273 # Now that wanted is correctly initialized, we can iterate over the
2274 # revision range, yielding only revisions in wanted.
2274 # revision range, yielding only revisions in wanted.
2275 def iterate():
2275 def iterate():
2276 if follow and match.always():
2276 if follow and match.always():
2277 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2277 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
2278 def want(rev):
2278 def want(rev):
2279 return ff.match(rev) and rev in wanted
2279 return ff.match(rev) and rev in wanted
2280 else:
2280 else:
2281 def want(rev):
2281 def want(rev):
2282 return rev in wanted
2282 return rev in wanted
2283
2283
2284 it = iter(revs)
2284 it = iter(revs)
2285 stopiteration = False
2285 stopiteration = False
2286 for windowsize in increasingwindows():
2286 for windowsize in increasingwindows():
2287 nrevs = []
2287 nrevs = []
2288 for i in xrange(windowsize):
2288 for i in xrange(windowsize):
2289 rev = next(it, None)
2289 rev = next(it, None)
2290 if rev is None:
2290 if rev is None:
2291 stopiteration = True
2291 stopiteration = True
2292 break
2292 break
2293 elif want(rev):
2293 elif want(rev):
2294 nrevs.append(rev)
2294 nrevs.append(rev)
2295 for rev in sorted(nrevs):
2295 for rev in sorted(nrevs):
2296 fns = fncache.get(rev)
2296 fns = fncache.get(rev)
2297 ctx = change(rev)
2297 ctx = change(rev)
2298 if not fns:
2298 if not fns:
2299 def fns_generator():
2299 def fns_generator():
2300 for f in ctx.files():
2300 for f in ctx.files():
2301 if match(f):
2301 if match(f):
2302 yield f
2302 yield f
2303 fns = fns_generator()
2303 fns = fns_generator()
2304 prepare(ctx, fns)
2304 prepare(ctx, fns)
2305 for rev in nrevs:
2305 for rev in nrevs:
2306 yield change(rev)
2306 yield change(rev)
2307
2307
2308 if stopiteration:
2308 if stopiteration:
2309 break
2309 break
2310
2310
2311 return iterate()
2311 return iterate()
2312
2312
2313 def _makefollowlogfilematcher(repo, files, followfirst):
2313 def _makefollowlogfilematcher(repo, files, followfirst):
2314 # When displaying a revision with --patch --follow FILE, we have
2314 # When displaying a revision with --patch --follow FILE, we have
2315 # to know which file of the revision must be diffed. With
2315 # to know which file of the revision must be diffed. With
2316 # --follow, we want the names of the ancestors of FILE in the
2316 # --follow, we want the names of the ancestors of FILE in the
2317 # revision, stored in "fcache". "fcache" is populated by
2317 # revision, stored in "fcache". "fcache" is populated by
2318 # reproducing the graph traversal already done by --follow revset
2318 # reproducing the graph traversal already done by --follow revset
2319 # and relating revs to file names (which is not "correct" but
2319 # and relating revs to file names (which is not "correct" but
2320 # good enough).
2320 # good enough).
2321 fcache = {}
2321 fcache = {}
2322 fcacheready = [False]
2322 fcacheready = [False]
2323 pctx = repo['.']
2323 pctx = repo['.']
2324
2324
2325 def populate():
2325 def populate():
2326 for fn in files:
2326 for fn in files:
2327 fctx = pctx[fn]
2327 fctx = pctx[fn]
2328 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2328 fcache.setdefault(fctx.introrev(), set()).add(fctx.path())
2329 for c in fctx.ancestors(followfirst=followfirst):
2329 for c in fctx.ancestors(followfirst=followfirst):
2330 fcache.setdefault(c.rev(), set()).add(c.path())
2330 fcache.setdefault(c.rev(), set()).add(c.path())
2331
2331
2332 def filematcher(rev):
2332 def filematcher(rev):
2333 if not fcacheready[0]:
2333 if not fcacheready[0]:
2334 # Lazy initialization
2334 # Lazy initialization
2335 fcacheready[0] = True
2335 fcacheready[0] = True
2336 populate()
2336 populate()
2337 return scmutil.matchfiles(repo, fcache.get(rev, []))
2337 return scmutil.matchfiles(repo, fcache.get(rev, []))
2338
2338
2339 return filematcher
2339 return filematcher
2340
2340
2341 def _makenofollowlogfilematcher(repo, pats, opts):
2341 def _makenofollowlogfilematcher(repo, pats, opts):
2342 '''hook for extensions to override the filematcher for non-follow cases'''
2342 '''hook for extensions to override the filematcher for non-follow cases'''
2343 return None
2343 return None
2344
2344
2345 def _makelogrevset(repo, pats, opts, revs):
2345 def _makelogrevset(repo, pats, opts, revs):
2346 """Return (expr, filematcher) where expr is a revset string built
2346 """Return (expr, filematcher) where expr is a revset string built
2347 from log options and file patterns or None. If --stat or --patch
2347 from log options and file patterns or None. If --stat or --patch
2348 are not passed filematcher is None. Otherwise it is a callable
2348 are not passed filematcher is None. Otherwise it is a callable
2349 taking a revision number and returning a match objects filtering
2349 taking a revision number and returning a match objects filtering
2350 the files to be detailed when displaying the revision.
2350 the files to be detailed when displaying the revision.
2351 """
2351 """
2352 opt2revset = {
2352 opt2revset = {
2353 'no_merges': ('not merge()', None),
2353 'no_merges': ('not merge()', None),
2354 'only_merges': ('merge()', None),
2354 'only_merges': ('merge()', None),
2355 '_ancestors': ('ancestors(%(val)s)', None),
2355 '_ancestors': ('ancestors(%(val)s)', None),
2356 '_fancestors': ('_firstancestors(%(val)s)', None),
2356 '_fancestors': ('_firstancestors(%(val)s)', None),
2357 '_descendants': ('descendants(%(val)s)', None),
2357 '_descendants': ('descendants(%(val)s)', None),
2358 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2358 '_fdescendants': ('_firstdescendants(%(val)s)', None),
2359 '_matchfiles': ('_matchfiles(%(val)s)', None),
2359 '_matchfiles': ('_matchfiles(%(val)s)', None),
2360 'date': ('date(%(val)r)', None),
2360 'date': ('date(%(val)r)', None),
2361 'branch': ('branch(%(val)r)', ' or '),
2361 'branch': ('branch(%(val)r)', ' or '),
2362 '_patslog': ('filelog(%(val)r)', ' or '),
2362 '_patslog': ('filelog(%(val)r)', ' or '),
2363 '_patsfollow': ('follow(%(val)r)', ' or '),
2363 '_patsfollow': ('follow(%(val)r)', ' or '),
2364 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2364 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
2365 'keyword': ('keyword(%(val)r)', ' or '),
2365 'keyword': ('keyword(%(val)r)', ' or '),
2366 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2366 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
2367 'user': ('user(%(val)r)', ' or '),
2367 'user': ('user(%(val)r)', ' or '),
2368 }
2368 }
2369
2369
2370 opts = dict(opts)
2370 opts = dict(opts)
2371 # follow or not follow?
2371 # follow or not follow?
2372 follow = opts.get('follow') or opts.get('follow_first')
2372 follow = opts.get('follow') or opts.get('follow_first')
2373 if opts.get('follow_first'):
2373 if opts.get('follow_first'):
2374 followfirst = 1
2374 followfirst = 1
2375 else:
2375 else:
2376 followfirst = 0
2376 followfirst = 0
2377 # --follow with FILE behavior depends on revs...
2377 # --follow with FILE behavior depends on revs...
2378 it = iter(revs)
2378 it = iter(revs)
2379 startrev = next(it)
2379 startrev = next(it)
2380 followdescendants = startrev < next(it, startrev)
2380 followdescendants = startrev < next(it, startrev)
2381
2381
2382 # branch and only_branch are really aliases and must be handled at
2382 # branch and only_branch are really aliases and must be handled at
2383 # the same time
2383 # the same time
2384 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2384 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
2385 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2385 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
2386 # pats/include/exclude are passed to match.match() directly in
2386 # pats/include/exclude are passed to match.match() directly in
2387 # _matchfiles() revset but walkchangerevs() builds its matcher with
2387 # _matchfiles() revset but walkchangerevs() builds its matcher with
2388 # scmutil.match(). The difference is input pats are globbed on
2388 # scmutil.match(). The difference is input pats are globbed on
2389 # platforms without shell expansion (windows).
2389 # platforms without shell expansion (windows).
2390 wctx = repo[None]
2390 wctx = repo[None]
2391 match, pats = scmutil.matchandpats(wctx, pats, opts)
2391 match, pats = scmutil.matchandpats(wctx, pats, opts)
2392 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2392 slowpath = match.anypats() or ((match.isexact() or match.prefix()) and
2393 opts.get('removed'))
2393 opts.get('removed'))
2394 if not slowpath:
2394 if not slowpath:
2395 for f in match.files():
2395 for f in match.files():
2396 if follow and f not in wctx:
2396 if follow and f not in wctx:
2397 # If the file exists, it may be a directory, so let it
2397 # If the file exists, it may be a directory, so let it
2398 # take the slow path.
2398 # take the slow path.
2399 if os.path.exists(repo.wjoin(f)):
2399 if os.path.exists(repo.wjoin(f)):
2400 slowpath = True
2400 slowpath = True
2401 continue
2401 continue
2402 else:
2402 else:
2403 raise error.Abort(_('cannot follow file not in parent '
2403 raise error.Abort(_('cannot follow file not in parent '
2404 'revision: "%s"') % f)
2404 'revision: "%s"') % f)
2405 filelog = repo.file(f)
2405 filelog = repo.file(f)
2406 if not filelog:
2406 if not filelog:
2407 # A zero count may be a directory or deleted file, so
2407 # A zero count may be a directory or deleted file, so
2408 # try to find matching entries on the slow path.
2408 # try to find matching entries on the slow path.
2409 if follow:
2409 if follow:
2410 raise error.Abort(
2410 raise error.Abort(
2411 _('cannot follow nonexistent file: "%s"') % f)
2411 _('cannot follow nonexistent file: "%s"') % f)
2412 slowpath = True
2412 slowpath = True
2413
2413
2414 # We decided to fall back to the slowpath because at least one
2414 # We decided to fall back to the slowpath because at least one
2415 # of the paths was not a file. Check to see if at least one of them
2415 # of the paths was not a file. Check to see if at least one of them
2416 # existed in history - in that case, we'll continue down the
2416 # existed in history - in that case, we'll continue down the
2417 # slowpath; otherwise, we can turn off the slowpath
2417 # slowpath; otherwise, we can turn off the slowpath
2418 if slowpath:
2418 if slowpath:
2419 for path in match.files():
2419 for path in match.files():
2420 if path == '.' or path in repo.store:
2420 if path == '.' or path in repo.store:
2421 break
2421 break
2422 else:
2422 else:
2423 slowpath = False
2423 slowpath = False
2424
2424
2425 fpats = ('_patsfollow', '_patsfollowfirst')
2425 fpats = ('_patsfollow', '_patsfollowfirst')
2426 fnopats = (('_ancestors', '_fancestors'),
2426 fnopats = (('_ancestors', '_fancestors'),
2427 ('_descendants', '_fdescendants'))
2427 ('_descendants', '_fdescendants'))
2428 if slowpath:
2428 if slowpath:
2429 # See walkchangerevs() slow path.
2429 # See walkchangerevs() slow path.
2430 #
2430 #
2431 # pats/include/exclude cannot be represented as separate
2431 # pats/include/exclude cannot be represented as separate
2432 # revset expressions as their filtering logic applies at file
2432 # revset expressions as their filtering logic applies at file
2433 # level. For instance "-I a -X a" matches a revision touching
2433 # level. For instance "-I a -X a" matches a revision touching
2434 # "a" and "b" while "file(a) and not file(b)" does
2434 # "a" and "b" while "file(a) and not file(b)" does
2435 # not. Besides, filesets are evaluated against the working
2435 # not. Besides, filesets are evaluated against the working
2436 # directory.
2436 # directory.
2437 matchargs = ['r:', 'd:relpath']
2437 matchargs = ['r:', 'd:relpath']
2438 for p in pats:
2438 for p in pats:
2439 matchargs.append('p:' + p)
2439 matchargs.append('p:' + p)
2440 for p in opts.get('include', []):
2440 for p in opts.get('include', []):
2441 matchargs.append('i:' + p)
2441 matchargs.append('i:' + p)
2442 for p in opts.get('exclude', []):
2442 for p in opts.get('exclude', []):
2443 matchargs.append('x:' + p)
2443 matchargs.append('x:' + p)
2444 matchargs = ','.join(('%r' % p) for p in matchargs)
2444 matchargs = ','.join(('%r' % p) for p in matchargs)
2445 opts['_matchfiles'] = matchargs
2445 opts['_matchfiles'] = matchargs
2446 if follow:
2446 if follow:
2447 opts[fnopats[0][followfirst]] = '.'
2447 opts[fnopats[0][followfirst]] = '.'
2448 else:
2448 else:
2449 if follow:
2449 if follow:
2450 if pats:
2450 if pats:
2451 # follow() revset interprets its file argument as a
2451 # follow() revset interprets its file argument as a
2452 # manifest entry, so use match.files(), not pats.
2452 # manifest entry, so use match.files(), not pats.
2453 opts[fpats[followfirst]] = list(match.files())
2453 opts[fpats[followfirst]] = list(match.files())
2454 else:
2454 else:
2455 op = fnopats[followdescendants][followfirst]
2455 op = fnopats[followdescendants][followfirst]
2456 opts[op] = 'rev(%d)' % startrev
2456 opts[op] = 'rev(%d)' % startrev
2457 else:
2457 else:
2458 opts['_patslog'] = list(pats)
2458 opts['_patslog'] = list(pats)
2459
2459
2460 filematcher = None
2460 filematcher = None
2461 if opts.get('patch') or opts.get('stat'):
2461 if opts.get('patch') or opts.get('stat'):
2462 # When following files, track renames via a special matcher.
2462 # When following files, track renames via a special matcher.
2463 # If we're forced to take the slowpath it means we're following
2463 # If we're forced to take the slowpath it means we're following
2464 # at least one pattern/directory, so don't bother with rename tracking.
2464 # at least one pattern/directory, so don't bother with rename tracking.
2465 if follow and not match.always() and not slowpath:
2465 if follow and not match.always() and not slowpath:
2466 # _makefollowlogfilematcher expects its files argument to be
2466 # _makefollowlogfilematcher expects its files argument to be
2467 # relative to the repo root, so use match.files(), not pats.
2467 # relative to the repo root, so use match.files(), not pats.
2468 filematcher = _makefollowlogfilematcher(repo, match.files(),
2468 filematcher = _makefollowlogfilematcher(repo, match.files(),
2469 followfirst)
2469 followfirst)
2470 else:
2470 else:
2471 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2471 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2472 if filematcher is None:
2472 if filematcher is None:
2473 filematcher = lambda rev: match
2473 filematcher = lambda rev: match
2474
2474
2475 expr = []
2475 expr = []
2476 for op, val in sorted(opts.iteritems()):
2476 for op, val in sorted(opts.iteritems()):
2477 if not val:
2477 if not val:
2478 continue
2478 continue
2479 if op not in opt2revset:
2479 if op not in opt2revset:
2480 continue
2480 continue
2481 revop, andor = opt2revset[op]
2481 revop, andor = opt2revset[op]
2482 if '%(val)' not in revop:
2482 if '%(val)' not in revop:
2483 expr.append(revop)
2483 expr.append(revop)
2484 else:
2484 else:
2485 if not isinstance(val, list):
2485 if not isinstance(val, list):
2486 e = revop % {'val': val}
2486 e = revop % {'val': val}
2487 else:
2487 else:
2488 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2488 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2489 expr.append(e)
2489 expr.append(e)
2490
2490
2491 if expr:
2491 if expr:
2492 expr = '(' + ' and '.join(expr) + ')'
2492 expr = '(' + ' and '.join(expr) + ')'
2493 else:
2493 else:
2494 expr = None
2494 expr = None
2495 return expr, filematcher
2495 return expr, filematcher
2496
2496
2497 def _logrevs(repo, opts):
2497 def _logrevs(repo, opts):
2498 # Default --rev value depends on --follow but --follow behavior
2498 # Default --rev value depends on --follow but --follow behavior
2499 # depends on revisions resolved from --rev...
2499 # depends on revisions resolved from --rev...
2500 follow = opts.get('follow') or opts.get('follow_first')
2500 follow = opts.get('follow') or opts.get('follow_first')
2501 if opts.get('rev'):
2501 if opts.get('rev'):
2502 revs = scmutil.revrange(repo, opts['rev'])
2502 revs = scmutil.revrange(repo, opts['rev'])
2503 elif follow and repo.dirstate.p1() == nullid:
2503 elif follow and repo.dirstate.p1() == nullid:
2504 revs = smartset.baseset()
2504 revs = smartset.baseset()
2505 elif follow:
2505 elif follow:
2506 revs = repo.revs('reverse(:.)')
2506 revs = repo.revs('reverse(:.)')
2507 else:
2507 else:
2508 revs = smartset.spanset(repo)
2508 revs = smartset.spanset(repo)
2509 revs.reverse()
2509 revs.reverse()
2510 return revs
2510 return revs
2511
2511
2512 def getgraphlogrevs(repo, pats, opts):
2512 def getgraphlogrevs(repo, pats, opts):
2513 """Return (revs, expr, filematcher) where revs is an iterable of
2513 """Return (revs, expr, filematcher) where revs is an iterable of
2514 revision numbers, expr is a revset string built from log options
2514 revision numbers, expr is a revset string built from log options
2515 and file patterns or None, and used to filter 'revs'. If --stat or
2515 and file patterns or None, and used to filter 'revs'. If --stat or
2516 --patch are not passed filematcher is None. Otherwise it is a
2516 --patch are not passed filematcher is None. Otherwise it is a
2517 callable taking a revision number and returning a match objects
2517 callable taking a revision number and returning a match objects
2518 filtering the files to be detailed when displaying the revision.
2518 filtering the files to be detailed when displaying the revision.
2519 """
2519 """
2520 limit = loglimit(opts)
2520 limit = loglimit(opts)
2521 revs = _logrevs(repo, opts)
2521 revs = _logrevs(repo, opts)
2522 if not revs:
2522 if not revs:
2523 return smartset.baseset(), None, None
2523 return smartset.baseset(), None, None
2524 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2524 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2525 if opts.get('rev'):
2525 if opts.get('rev'):
2526 # User-specified revs might be unsorted, but don't sort before
2526 # User-specified revs might be unsorted, but don't sort before
2527 # _makelogrevset because it might depend on the order of revs
2527 # _makelogrevset because it might depend on the order of revs
2528 if not (revs.isdescending() or revs.istopo()):
2528 if not (revs.isdescending() or revs.istopo()):
2529 revs.sort(reverse=True)
2529 revs.sort(reverse=True)
2530 if expr:
2530 if expr:
2531 matcher = revset.match(repo.ui, expr)
2531 matcher = revset.match(repo.ui, expr)
2532 revs = matcher(repo, revs)
2532 revs = matcher(repo, revs)
2533 if limit is not None:
2533 if limit is not None:
2534 limitedrevs = []
2534 limitedrevs = []
2535 for idx, rev in enumerate(revs):
2535 for idx, rev in enumerate(revs):
2536 if idx >= limit:
2536 if idx >= limit:
2537 break
2537 break
2538 limitedrevs.append(rev)
2538 limitedrevs.append(rev)
2539 revs = smartset.baseset(limitedrevs)
2539 revs = smartset.baseset(limitedrevs)
2540
2540
2541 return revs, expr, filematcher
2541 return revs, expr, filematcher
2542
2542
2543 def getlogrevs(repo, pats, opts):
2543 def getlogrevs(repo, pats, opts):
2544 """Return (revs, expr, filematcher) where revs is an iterable of
2544 """Return (revs, expr, filematcher) where revs is an iterable of
2545 revision numbers, expr is a revset string built from log options
2545 revision numbers, expr is a revset string built from log options
2546 and file patterns or None, and used to filter 'revs'. If --stat or
2546 and file patterns or None, and used to filter 'revs'. If --stat or
2547 --patch are not passed filematcher is None. Otherwise it is a
2547 --patch are not passed filematcher is None. Otherwise it is a
2548 callable taking a revision number and returning a match objects
2548 callable taking a revision number and returning a match objects
2549 filtering the files to be detailed when displaying the revision.
2549 filtering the files to be detailed when displaying the revision.
2550 """
2550 """
2551 limit = loglimit(opts)
2551 limit = loglimit(opts)
2552 revs = _logrevs(repo, opts)
2552 revs = _logrevs(repo, opts)
2553 if not revs:
2553 if not revs:
2554 return smartset.baseset([]), None, None
2554 return smartset.baseset([]), None, None
2555 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2555 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2556 if expr:
2556 if expr:
2557 matcher = revset.match(repo.ui, expr)
2557 matcher = revset.match(repo.ui, expr)
2558 revs = matcher(repo, revs)
2558 revs = matcher(repo, revs)
2559 if limit is not None:
2559 if limit is not None:
2560 limitedrevs = []
2560 limitedrevs = []
2561 for idx, r in enumerate(revs):
2561 for idx, r in enumerate(revs):
2562 if limit <= idx:
2562 if limit <= idx:
2563 break
2563 break
2564 limitedrevs.append(r)
2564 limitedrevs.append(r)
2565 revs = smartset.baseset(limitedrevs)
2565 revs = smartset.baseset(limitedrevs)
2566
2566
2567 return revs, expr, filematcher
2567 return revs, expr, filematcher
2568
2568
2569 def _parselinerangelogopt(repo, opts):
2569 def _parselinerangelogopt(repo, opts):
2570 """Parse --line-range log option and return a list of tuples (filename,
2570 """Parse --line-range log option and return a list of tuples (filename,
2571 (fromline, toline)).
2571 (fromline, toline)).
2572 """
2572 """
2573 linerangebyfname = []
2573 linerangebyfname = []
2574 for pat in opts.get('line_range', []):
2574 for pat in opts.get('line_range', []):
2575 try:
2575 try:
2576 pat, linerange = pat.rsplit(',', 1)
2576 pat, linerange = pat.rsplit(',', 1)
2577 except ValueError:
2577 except ValueError:
2578 raise error.Abort(_('malformatted line-range pattern %s') % pat)
2578 raise error.Abort(_('malformatted line-range pattern %s') % pat)
2579 try:
2579 try:
2580 fromline, toline = map(int, linerange.split(':'))
2580 fromline, toline = map(int, linerange.split(':'))
2581 except ValueError:
2581 except ValueError:
2582 raise error.Abort(_("invalid line range for %s") % pat)
2582 raise error.Abort(_("invalid line range for %s") % pat)
2583 msg = _("line range pattern '%s' must match exactly one file") % pat
2583 msg = _("line range pattern '%s' must match exactly one file") % pat
2584 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
2584 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
2585 linerangebyfname.append(
2585 linerangebyfname.append(
2586 (fname, util.processlinerange(fromline, toline)))
2586 (fname, util.processlinerange(fromline, toline)))
2587 return linerangebyfname
2587 return linerangebyfname
2588
2588
2589 def getloglinerangerevs(repo, userrevs, opts):
2589 def getloglinerangerevs(repo, userrevs, opts):
2590 """Return (revs, filematcher, hunksfilter).
2590 """Return (revs, filematcher, hunksfilter).
2591
2591
2592 "revs" are revisions obtained by processing "line-range" log options and
2592 "revs" are revisions obtained by processing "line-range" log options and
2593 walking block ancestors of each specified file/line-range.
2593 walking block ancestors of each specified file/line-range.
2594
2594
2595 "filematcher(rev) -> match" is a factory function returning a match object
2595 "filematcher(rev) -> match" is a factory function returning a match object
2596 for a given revision for file patterns specified in --line-range option.
2596 for a given revision for file patterns specified in --line-range option.
2597 If neither --stat nor --patch options are passed, "filematcher" is None.
2597 If neither --stat nor --patch options are passed, "filematcher" is None.
2598
2598
2599 "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
2599 "hunksfilter(rev) -> filterfn(fctx, hunks)" is a factory function
2600 returning a hunks filtering function.
2600 returning a hunks filtering function.
2601 If neither --stat nor --patch options are passed, "filterhunks" is None.
2601 If neither --stat nor --patch options are passed, "filterhunks" is None.
2602 """
2602 """
2603 wctx = repo[None]
2603 wctx = repo[None]
2604
2604
2605 # Two-levels map of "rev -> file ctx -> [line range]".
2605 # Two-levels map of "rev -> file ctx -> [line range]".
2606 linerangesbyrev = {}
2606 linerangesbyrev = {}
2607 for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
2607 for fname, (fromline, toline) in _parselinerangelogopt(repo, opts):
2608 if fname not in wctx:
2608 if fname not in wctx:
2609 raise error.Abort(_('cannot follow file not in parent '
2609 raise error.Abort(_('cannot follow file not in parent '
2610 'revision: "%s"') % fname)
2610 'revision: "%s"') % fname)
2611 fctx = wctx.filectx(fname)
2611 fctx = wctx.filectx(fname)
2612 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
2612 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
2613 rev = fctx.introrev()
2613 rev = fctx.introrev()
2614 if rev not in userrevs:
2614 if rev not in userrevs:
2615 continue
2615 continue
2616 linerangesbyrev.setdefault(
2616 linerangesbyrev.setdefault(
2617 rev, {}).setdefault(
2617 rev, {}).setdefault(
2618 fctx.path(), []).append(linerange)
2618 fctx.path(), []).append(linerange)
2619
2619
2620 filematcher = None
2620 filematcher = None
2621 hunksfilter = None
2621 hunksfilter = None
2622 if opts.get('patch') or opts.get('stat'):
2622 if opts.get('patch') or opts.get('stat'):
2623
2623
2624 def nofilterhunksfn(fctx, hunks):
2624 def nofilterhunksfn(fctx, hunks):
2625 return hunks
2625 return hunks
2626
2626
2627 def hunksfilter(rev):
2627 def hunksfilter(rev):
2628 fctxlineranges = linerangesbyrev.get(rev)
2628 fctxlineranges = linerangesbyrev.get(rev)
2629 if fctxlineranges is None:
2629 if fctxlineranges is None:
2630 return nofilterhunksfn
2630 return nofilterhunksfn
2631
2631
2632 def filterfn(fctx, hunks):
2632 def filterfn(fctx, hunks):
2633 lineranges = fctxlineranges.get(fctx.path())
2633 lineranges = fctxlineranges.get(fctx.path())
2634 if lineranges is not None:
2634 if lineranges is not None:
2635 for hr, lines in hunks:
2635 for hr, lines in hunks:
2636 if hr is None: # binary
2636 if hr is None: # binary
2637 yield hr, lines
2637 yield hr, lines
2638 continue
2638 continue
2639 if any(mdiff.hunkinrange(hr[2:], lr)
2639 if any(mdiff.hunkinrange(hr[2:], lr)
2640 for lr in lineranges):
2640 for lr in lineranges):
2641 yield hr, lines
2641 yield hr, lines
2642 else:
2642 else:
2643 for hunk in hunks:
2643 for hunk in hunks:
2644 yield hunk
2644 yield hunk
2645
2645
2646 return filterfn
2646 return filterfn
2647
2647
2648 def filematcher(rev):
2648 def filematcher(rev):
2649 files = list(linerangesbyrev.get(rev, []))
2649 files = list(linerangesbyrev.get(rev, []))
2650 return scmutil.matchfiles(repo, files)
2650 return scmutil.matchfiles(repo, files)
2651
2651
2652 revs = sorted(linerangesbyrev, reverse=True)
2652 revs = sorted(linerangesbyrev, reverse=True)
2653
2653
2654 return revs, filematcher, hunksfilter
2654 return revs, filematcher, hunksfilter
2655
2655
2656 def _graphnodeformatter(ui, displayer):
2656 def _graphnodeformatter(ui, displayer):
2657 spec = ui.config('ui', 'graphnodetemplate')
2657 spec = ui.config('ui', 'graphnodetemplate')
2658 if not spec:
2658 if not spec:
2659 return templatekw.showgraphnode # fast path for "{graphnode}"
2659 return templatekw.showgraphnode # fast path for "{graphnode}"
2660
2660
2661 spec = templater.unquotestring(spec)
2661 spec = templater.unquotestring(spec)
2662 templ = formatter.maketemplater(ui, spec)
2662 templ = formatter.maketemplater(ui, spec)
2663 cache = {}
2663 cache = {}
2664 if isinstance(displayer, changeset_templater):
2664 if isinstance(displayer, changeset_templater):
2665 cache = displayer.cache # reuse cache of slow templates
2665 cache = displayer.cache # reuse cache of slow templates
2666 props = templatekw.keywords.copy()
2666 props = templatekw.keywords.copy()
2667 props['templ'] = templ
2667 props['templ'] = templ
2668 props['cache'] = cache
2668 props['cache'] = cache
2669 def formatnode(repo, ctx):
2669 def formatnode(repo, ctx):
2670 props['ctx'] = ctx
2670 props['ctx'] = ctx
2671 props['repo'] = repo
2671 props['repo'] = repo
2672 props['ui'] = repo.ui
2672 props['ui'] = repo.ui
2673 props['revcache'] = {}
2673 props['revcache'] = {}
2674 return templ.render(props)
2674 return templ.render(props)
2675 return formatnode
2675 return formatnode
2676
2676
2677 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2677 def displaygraph(ui, repo, dag, displayer, edgefn, getrenamed=None,
2678 filematcher=None, props=None):
2678 filematcher=None, props=None):
2679 props = props or {}
2679 props = props or {}
2680 formatnode = _graphnodeformatter(ui, displayer)
2680 formatnode = _graphnodeformatter(ui, displayer)
2681 state = graphmod.asciistate()
2681 state = graphmod.asciistate()
2682 styles = state['styles']
2682 styles = state['styles']
2683
2683
2684 # only set graph styling if HGPLAIN is not set.
2684 # only set graph styling if HGPLAIN is not set.
2685 if ui.plain('graph'):
2685 if ui.plain('graph'):
2686 # set all edge styles to |, the default pre-3.8 behaviour
2686 # set all edge styles to |, the default pre-3.8 behaviour
2687 styles.update(dict.fromkeys(styles, '|'))
2687 styles.update(dict.fromkeys(styles, '|'))
2688 else:
2688 else:
2689 edgetypes = {
2689 edgetypes = {
2690 'parent': graphmod.PARENT,
2690 'parent': graphmod.PARENT,
2691 'grandparent': graphmod.GRANDPARENT,
2691 'grandparent': graphmod.GRANDPARENT,
2692 'missing': graphmod.MISSINGPARENT
2692 'missing': graphmod.MISSINGPARENT
2693 }
2693 }
2694 for name, key in edgetypes.items():
2694 for name, key in edgetypes.items():
2695 # experimental config: experimental.graphstyle.*
2695 # experimental config: experimental.graphstyle.*
2696 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2696 styles[key] = ui.config('experimental', 'graphstyle.%s' % name,
2697 styles[key])
2697 styles[key])
2698 if not styles[key]:
2698 if not styles[key]:
2699 styles[key] = None
2699 styles[key] = None
2700
2700
2701 # experimental config: experimental.graphshorten
2701 # experimental config: experimental.graphshorten
2702 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2702 state['graphshorten'] = ui.configbool('experimental', 'graphshorten')
2703
2703
2704 for rev, type, ctx, parents in dag:
2704 for rev, type, ctx, parents in dag:
2705 char = formatnode(repo, ctx)
2705 char = formatnode(repo, ctx)
2706 copies = None
2706 copies = None
2707 if getrenamed and ctx.rev():
2707 if getrenamed and ctx.rev():
2708 copies = []
2708 copies = []
2709 for fn in ctx.files():
2709 for fn in ctx.files():
2710 rename = getrenamed(fn, ctx.rev())
2710 rename = getrenamed(fn, ctx.rev())
2711 if rename:
2711 if rename:
2712 copies.append((fn, rename[0]))
2712 copies.append((fn, rename[0]))
2713 revmatchfn = None
2713 revmatchfn = None
2714 if filematcher is not None:
2714 if filematcher is not None:
2715 revmatchfn = filematcher(ctx.rev())
2715 revmatchfn = filematcher(ctx.rev())
2716 edges = edgefn(type, char, state, rev, parents)
2716 edges = edgefn(type, char, state, rev, parents)
2717 firstedge = next(edges)
2717 firstedge = next(edges)
2718 width = firstedge[2]
2718 width = firstedge[2]
2719 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2719 displayer.show(ctx, copies=copies, matchfn=revmatchfn,
2720 _graphwidth=width, **props)
2720 _graphwidth=width, **props)
2721 lines = displayer.hunk.pop(rev).split('\n')
2721 lines = displayer.hunk.pop(rev).split('\n')
2722 if not lines[-1]:
2722 if not lines[-1]:
2723 del lines[-1]
2723 del lines[-1]
2724 displayer.flush(ctx)
2724 displayer.flush(ctx)
2725 for type, char, width, coldata in itertools.chain([firstedge], edges):
2725 for type, char, width, coldata in itertools.chain([firstedge], edges):
2726 graphmod.ascii(ui, state, type, char, lines, coldata)
2726 graphmod.ascii(ui, state, type, char, lines, coldata)
2727 lines = []
2727 lines = []
2728 displayer.close()
2728 displayer.close()
2729
2729
2730 def graphlog(ui, repo, pats, opts):
2730 def graphlog(ui, repo, pats, opts):
2731 # Parameters are identical to log command ones
2731 # Parameters are identical to log command ones
2732 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2732 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2733 revdag = graphmod.dagwalker(repo, revs)
2733 revdag = graphmod.dagwalker(repo, revs)
2734
2734
2735 getrenamed = None
2735 getrenamed = None
2736 if opts.get('copies'):
2736 if opts.get('copies'):
2737 endrev = None
2737 endrev = None
2738 if opts.get('rev'):
2738 if opts.get('rev'):
2739 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2739 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2740 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2740 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2741
2741
2742 ui.pager('log')
2742 ui.pager('log')
2743 displayer = show_changeset(ui, repo, opts, buffered=True)
2743 displayer = show_changeset(ui, repo, opts, buffered=True)
2744 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2744 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed,
2745 filematcher)
2745 filematcher)
2746
2746
2747 def checkunsupportedgraphflags(pats, opts):
2747 def checkunsupportedgraphflags(pats, opts):
2748 for op in ["newest_first"]:
2748 for op in ["newest_first"]:
2749 if op in opts and opts[op]:
2749 if op in opts and opts[op]:
2750 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2750 raise error.Abort(_("-G/--graph option is incompatible with --%s")
2751 % op.replace("_", "-"))
2751 % op.replace("_", "-"))
2752
2752
2753 def graphrevs(repo, nodes, opts):
2753 def graphrevs(repo, nodes, opts):
2754 limit = loglimit(opts)
2754 limit = loglimit(opts)
2755 nodes.reverse()
2755 nodes.reverse()
2756 if limit is not None:
2756 if limit is not None:
2757 nodes = nodes[:limit]
2757 nodes = nodes[:limit]
2758 return graphmod.nodes(repo, nodes)
2758 return graphmod.nodes(repo, nodes)
2759
2759
2760 def add(ui, repo, match, prefix, explicitonly, **opts):
2760 def add(ui, repo, match, prefix, explicitonly, **opts):
2761 join = lambda f: os.path.join(prefix, f)
2761 join = lambda f: os.path.join(prefix, f)
2762 bad = []
2762 bad = []
2763
2763
2764 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2764 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2765 names = []
2765 names = []
2766 wctx = repo[None]
2766 wctx = repo[None]
2767 cca = None
2767 cca = None
2768 abort, warn = scmutil.checkportabilityalert(ui)
2768 abort, warn = scmutil.checkportabilityalert(ui)
2769 if abort or warn:
2769 if abort or warn:
2770 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2770 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2771
2771
2772 badmatch = matchmod.badmatch(match, badfn)
2772 badmatch = matchmod.badmatch(match, badfn)
2773 dirstate = repo.dirstate
2773 dirstate = repo.dirstate
2774 # We don't want to just call wctx.walk here, since it would return a lot of
2774 # We don't want to just call wctx.walk here, since it would return a lot of
2775 # clean files, which we aren't interested in and takes time.
2775 # clean files, which we aren't interested in and takes time.
2776 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2776 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2777 unknown=True, ignored=False, full=False)):
2777 unknown=True, ignored=False, full=False)):
2778 exact = match.exact(f)
2778 exact = match.exact(f)
2779 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2779 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2780 if cca:
2780 if cca:
2781 cca(f)
2781 cca(f)
2782 names.append(f)
2782 names.append(f)
2783 if ui.verbose or not exact:
2783 if ui.verbose or not exact:
2784 ui.status(_('adding %s\n') % match.rel(f))
2784 ui.status(_('adding %s\n') % match.rel(f))
2785
2785
2786 for subpath in sorted(wctx.substate):
2786 for subpath in sorted(wctx.substate):
2787 sub = wctx.sub(subpath)
2787 sub = wctx.sub(subpath)
2788 try:
2788 try:
2789 submatch = matchmod.subdirmatcher(subpath, match)
2789 submatch = matchmod.subdirmatcher(subpath, match)
2790 if opts.get(r'subrepos'):
2790 if opts.get(r'subrepos'):
2791 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2791 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2792 else:
2792 else:
2793 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2793 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2794 except error.LookupError:
2794 except error.LookupError:
2795 ui.status(_("skipping missing subrepository: %s\n")
2795 ui.status(_("skipping missing subrepository: %s\n")
2796 % join(subpath))
2796 % join(subpath))
2797
2797
2798 if not opts.get(r'dry_run'):
2798 if not opts.get(r'dry_run'):
2799 rejected = wctx.add(names, prefix)
2799 rejected = wctx.add(names, prefix)
2800 bad.extend(f for f in rejected if f in match.files())
2800 bad.extend(f for f in rejected if f in match.files())
2801 return bad
2801 return bad
2802
2802
2803 def addwebdirpath(repo, serverpath, webconf):
2803 def addwebdirpath(repo, serverpath, webconf):
2804 webconf[serverpath] = repo.root
2804 webconf[serverpath] = repo.root
2805 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2805 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2806
2806
2807 for r in repo.revs('filelog("path:.hgsub")'):
2807 for r in repo.revs('filelog("path:.hgsub")'):
2808 ctx = repo[r]
2808 ctx = repo[r]
2809 for subpath in ctx.substate:
2809 for subpath in ctx.substate:
2810 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2810 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2811
2811
2812 def forget(ui, repo, match, prefix, explicitonly):
2812 def forget(ui, repo, match, prefix, explicitonly):
2813 join = lambda f: os.path.join(prefix, f)
2813 join = lambda f: os.path.join(prefix, f)
2814 bad = []
2814 bad = []
2815 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2815 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2816 wctx = repo[None]
2816 wctx = repo[None]
2817 forgot = []
2817 forgot = []
2818
2818
2819 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2819 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2820 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2820 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2821 if explicitonly:
2821 if explicitonly:
2822 forget = [f for f in forget if match.exact(f)]
2822 forget = [f for f in forget if match.exact(f)]
2823
2823
2824 for subpath in sorted(wctx.substate):
2824 for subpath in sorted(wctx.substate):
2825 sub = wctx.sub(subpath)
2825 sub = wctx.sub(subpath)
2826 try:
2826 try:
2827 submatch = matchmod.subdirmatcher(subpath, match)
2827 submatch = matchmod.subdirmatcher(subpath, match)
2828 subbad, subforgot = sub.forget(submatch, prefix)
2828 subbad, subforgot = sub.forget(submatch, prefix)
2829 bad.extend([subpath + '/' + f for f in subbad])
2829 bad.extend([subpath + '/' + f for f in subbad])
2830 forgot.extend([subpath + '/' + f for f in subforgot])
2830 forgot.extend([subpath + '/' + f for f in subforgot])
2831 except error.LookupError:
2831 except error.LookupError:
2832 ui.status(_("skipping missing subrepository: %s\n")
2832 ui.status(_("skipping missing subrepository: %s\n")
2833 % join(subpath))
2833 % join(subpath))
2834
2834
2835 if not explicitonly:
2835 if not explicitonly:
2836 for f in match.files():
2836 for f in match.files():
2837 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2837 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2838 if f not in forgot:
2838 if f not in forgot:
2839 if repo.wvfs.exists(f):
2839 if repo.wvfs.exists(f):
2840 # Don't complain if the exact case match wasn't given.
2840 # Don't complain if the exact case match wasn't given.
2841 # But don't do this until after checking 'forgot', so
2841 # But don't do this until after checking 'forgot', so
2842 # that subrepo files aren't normalized, and this op is
2842 # that subrepo files aren't normalized, and this op is
2843 # purely from data cached by the status walk above.
2843 # purely from data cached by the status walk above.
2844 if repo.dirstate.normalize(f) in repo.dirstate:
2844 if repo.dirstate.normalize(f) in repo.dirstate:
2845 continue
2845 continue
2846 ui.warn(_('not removing %s: '
2846 ui.warn(_('not removing %s: '
2847 'file is already untracked\n')
2847 'file is already untracked\n')
2848 % match.rel(f))
2848 % match.rel(f))
2849 bad.append(f)
2849 bad.append(f)
2850
2850
2851 for f in forget:
2851 for f in forget:
2852 if ui.verbose or not match.exact(f):
2852 if ui.verbose or not match.exact(f):
2853 ui.status(_('removing %s\n') % match.rel(f))
2853 ui.status(_('removing %s\n') % match.rel(f))
2854
2854
2855 rejected = wctx.forget(forget, prefix)
2855 rejected = wctx.forget(forget, prefix)
2856 bad.extend(f for f in rejected if f in match.files())
2856 bad.extend(f for f in rejected if f in match.files())
2857 forgot.extend(f for f in forget if f not in rejected)
2857 forgot.extend(f for f in forget if f not in rejected)
2858 return bad, forgot
2858 return bad, forgot
2859
2859
2860 def files(ui, ctx, m, fm, fmt, subrepos):
2860 def files(ui, ctx, m, fm, fmt, subrepos):
2861 rev = ctx.rev()
2861 rev = ctx.rev()
2862 ret = 1
2862 ret = 1
2863 ds = ctx.repo().dirstate
2863 ds = ctx.repo().dirstate
2864
2864
2865 for f in ctx.matches(m):
2865 for f in ctx.matches(m):
2866 if rev is None and ds[f] == 'r':
2866 if rev is None and ds[f] == 'r':
2867 continue
2867 continue
2868 fm.startitem()
2868 fm.startitem()
2869 if ui.verbose:
2869 if ui.verbose:
2870 fc = ctx[f]
2870 fc = ctx[f]
2871 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2871 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2872 fm.data(abspath=f)
2872 fm.data(abspath=f)
2873 fm.write('path', fmt, m.rel(f))
2873 fm.write('path', fmt, m.rel(f))
2874 ret = 0
2874 ret = 0
2875
2875
2876 for subpath in sorted(ctx.substate):
2876 for subpath in sorted(ctx.substate):
2877 submatch = matchmod.subdirmatcher(subpath, m)
2877 submatch = matchmod.subdirmatcher(subpath, m)
2878 if (subrepos or m.exact(subpath) or any(submatch.files())):
2878 if (subrepos or m.exact(subpath) or any(submatch.files())):
2879 sub = ctx.sub(subpath)
2879 sub = ctx.sub(subpath)
2880 try:
2880 try:
2881 recurse = m.exact(subpath) or subrepos
2881 recurse = m.exact(subpath) or subrepos
2882 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2882 if sub.printfiles(ui, submatch, fm, fmt, recurse) == 0:
2883 ret = 0
2883 ret = 0
2884 except error.LookupError:
2884 except error.LookupError:
2885 ui.status(_("skipping missing subrepository: %s\n")
2885 ui.status(_("skipping missing subrepository: %s\n")
2886 % m.abs(subpath))
2886 % m.abs(subpath))
2887
2887
2888 return ret
2888 return ret
2889
2889
2890 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2890 def remove(ui, repo, m, prefix, after, force, subrepos, warnings=None):
2891 join = lambda f: os.path.join(prefix, f)
2891 join = lambda f: os.path.join(prefix, f)
2892 ret = 0
2892 ret = 0
2893 s = repo.status(match=m, clean=True)
2893 s = repo.status(match=m, clean=True)
2894 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2894 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2895
2895
2896 wctx = repo[None]
2896 wctx = repo[None]
2897
2897
2898 if warnings is None:
2898 if warnings is None:
2899 warnings = []
2899 warnings = []
2900 warn = True
2900 warn = True
2901 else:
2901 else:
2902 warn = False
2902 warn = False
2903
2903
2904 subs = sorted(wctx.substate)
2904 subs = sorted(wctx.substate)
2905 total = len(subs)
2905 total = len(subs)
2906 count = 0
2906 count = 0
2907 for subpath in subs:
2907 for subpath in subs:
2908 count += 1
2908 count += 1
2909 submatch = matchmod.subdirmatcher(subpath, m)
2909 submatch = matchmod.subdirmatcher(subpath, m)
2910 if subrepos or m.exact(subpath) or any(submatch.files()):
2910 if subrepos or m.exact(subpath) or any(submatch.files()):
2911 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2911 ui.progress(_('searching'), count, total=total, unit=_('subrepos'))
2912 sub = wctx.sub(subpath)
2912 sub = wctx.sub(subpath)
2913 try:
2913 try:
2914 if sub.removefiles(submatch, prefix, after, force, subrepos,
2914 if sub.removefiles(submatch, prefix, after, force, subrepos,
2915 warnings):
2915 warnings):
2916 ret = 1
2916 ret = 1
2917 except error.LookupError:
2917 except error.LookupError:
2918 warnings.append(_("skipping missing subrepository: %s\n")
2918 warnings.append(_("skipping missing subrepository: %s\n")
2919 % join(subpath))
2919 % join(subpath))
2920 ui.progress(_('searching'), None)
2920 ui.progress(_('searching'), None)
2921
2921
2922 # warn about failure to delete explicit files/dirs
2922 # warn about failure to delete explicit files/dirs
2923 deleteddirs = util.dirs(deleted)
2923 deleteddirs = util.dirs(deleted)
2924 files = m.files()
2924 files = m.files()
2925 total = len(files)
2925 total = len(files)
2926 count = 0
2926 count = 0
2927 for f in files:
2927 for f in files:
2928 def insubrepo():
2928 def insubrepo():
2929 for subpath in wctx.substate:
2929 for subpath in wctx.substate:
2930 if f.startswith(subpath + '/'):
2930 if f.startswith(subpath + '/'):
2931 return True
2931 return True
2932 return False
2932 return False
2933
2933
2934 count += 1
2934 count += 1
2935 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2935 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2936 isdir = f in deleteddirs or wctx.hasdir(f)
2936 isdir = f in deleteddirs or wctx.hasdir(f)
2937 if (f in repo.dirstate or isdir or f == '.'
2937 if (f in repo.dirstate or isdir or f == '.'
2938 or insubrepo() or f in subs):
2938 or insubrepo() or f in subs):
2939 continue
2939 continue
2940
2940
2941 if repo.wvfs.exists(f):
2941 if repo.wvfs.exists(f):
2942 if repo.wvfs.isdir(f):
2942 if repo.wvfs.isdir(f):
2943 warnings.append(_('not removing %s: no tracked files\n')
2943 warnings.append(_('not removing %s: no tracked files\n')
2944 % m.rel(f))
2944 % m.rel(f))
2945 else:
2945 else:
2946 warnings.append(_('not removing %s: file is untracked\n')
2946 warnings.append(_('not removing %s: file is untracked\n')
2947 % m.rel(f))
2947 % m.rel(f))
2948 # missing files will generate a warning elsewhere
2948 # missing files will generate a warning elsewhere
2949 ret = 1
2949 ret = 1
2950 ui.progress(_('deleting'), None)
2950 ui.progress(_('deleting'), None)
2951
2951
2952 if force:
2952 if force:
2953 list = modified + deleted + clean + added
2953 list = modified + deleted + clean + added
2954 elif after:
2954 elif after:
2955 list = deleted
2955 list = deleted
2956 remaining = modified + added + clean
2956 remaining = modified + added + clean
2957 total = len(remaining)
2957 total = len(remaining)
2958 count = 0
2958 count = 0
2959 for f in remaining:
2959 for f in remaining:
2960 count += 1
2960 count += 1
2961 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2961 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2962 if ui.verbose or (f in files):
2962 if ui.verbose or (f in files):
2963 warnings.append(_('not removing %s: file still exists\n')
2963 warnings.append(_('not removing %s: file still exists\n')
2964 % m.rel(f))
2964 % m.rel(f))
2965 ret = 1
2965 ret = 1
2966 ui.progress(_('skipping'), None)
2966 ui.progress(_('skipping'), None)
2967 else:
2967 else:
2968 list = deleted + clean
2968 list = deleted + clean
2969 total = len(modified) + len(added)
2969 total = len(modified) + len(added)
2970 count = 0
2970 count = 0
2971 for f in modified:
2971 for f in modified:
2972 count += 1
2972 count += 1
2973 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2973 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2974 warnings.append(_('not removing %s: file is modified (use -f'
2974 warnings.append(_('not removing %s: file is modified (use -f'
2975 ' to force removal)\n') % m.rel(f))
2975 ' to force removal)\n') % m.rel(f))
2976 ret = 1
2976 ret = 1
2977 for f in added:
2977 for f in added:
2978 count += 1
2978 count += 1
2979 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2979 ui.progress(_('skipping'), count, total=total, unit=_('files'))
2980 warnings.append(_("not removing %s: file has been marked for add"
2980 warnings.append(_("not removing %s: file has been marked for add"
2981 " (use 'hg forget' to undo add)\n") % m.rel(f))
2981 " (use 'hg forget' to undo add)\n") % m.rel(f))
2982 ret = 1
2982 ret = 1
2983 ui.progress(_('skipping'), None)
2983 ui.progress(_('skipping'), None)
2984
2984
2985 list = sorted(list)
2985 list = sorted(list)
2986 total = len(list)
2986 total = len(list)
2987 count = 0
2987 count = 0
2988 for f in list:
2988 for f in list:
2989 count += 1
2989 count += 1
2990 if ui.verbose or not m.exact(f):
2990 if ui.verbose or not m.exact(f):
2991 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2991 ui.progress(_('deleting'), count, total=total, unit=_('files'))
2992 ui.status(_('removing %s\n') % m.rel(f))
2992 ui.status(_('removing %s\n') % m.rel(f))
2993 ui.progress(_('deleting'), None)
2993 ui.progress(_('deleting'), None)
2994
2994
2995 with repo.wlock():
2995 with repo.wlock():
2996 if not after:
2996 if not after:
2997 for f in list:
2997 for f in list:
2998 if f in added:
2998 if f in added:
2999 continue # we never unlink added files on remove
2999 continue # we never unlink added files on remove
3000 repo.wvfs.unlinkpath(f, ignoremissing=True)
3000 repo.wvfs.unlinkpath(f, ignoremissing=True)
3001 repo[None].forget(list)
3001 repo[None].forget(list)
3002
3002
3003 if warn:
3003 if warn:
3004 for warning in warnings:
3004 for warning in warnings:
3005 ui.warn(warning)
3005 ui.warn(warning)
3006
3006
3007 return ret
3007 return ret
3008
3008
3009 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
3009 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
3010 err = 1
3010 err = 1
3011
3011
3012 def write(path):
3012 def write(path):
3013 filename = None
3013 filename = None
3014 if fntemplate:
3014 if fntemplate:
3015 filename = makefilename(repo, fntemplate, ctx.node(),
3015 filename = makefilename(repo, fntemplate, ctx.node(),
3016 pathname=os.path.join(prefix, path))
3016 pathname=os.path.join(prefix, path))
3017 # attempt to create the directory if it does not already exist
3017 # attempt to create the directory if it does not already exist
3018 try:
3018 try:
3019 os.makedirs(os.path.dirname(filename))
3019 os.makedirs(os.path.dirname(filename))
3020 except OSError:
3020 except OSError:
3021 pass
3021 pass
3022 with formatter.maybereopen(basefm, filename, opts) as fm:
3022 with formatter.maybereopen(basefm, filename, opts) as fm:
3023 data = ctx[path].data()
3023 data = ctx[path].data()
3024 if opts.get('decode'):
3024 if opts.get('decode'):
3025 data = repo.wwritedata(path, data)
3025 data = repo.wwritedata(path, data)
3026 fm.startitem()
3026 fm.startitem()
3027 fm.write('data', '%s', data)
3027 fm.write('data', '%s', data)
3028 fm.data(abspath=path, path=matcher.rel(path))
3028 fm.data(abspath=path, path=matcher.rel(path))
3029
3029
3030 # Automation often uses hg cat on single files, so special case it
3030 # Automation often uses hg cat on single files, so special case it
3031 # for performance to avoid the cost of parsing the manifest.
3031 # for performance to avoid the cost of parsing the manifest.
3032 if len(matcher.files()) == 1 and not matcher.anypats():
3032 if len(matcher.files()) == 1 and not matcher.anypats():
3033 file = matcher.files()[0]
3033 file = matcher.files()[0]
3034 mfl = repo.manifestlog
3034 mfl = repo.manifestlog
3035 mfnode = ctx.manifestnode()
3035 mfnode = ctx.manifestnode()
3036 try:
3036 try:
3037 if mfnode and mfl[mfnode].find(file)[0]:
3037 if mfnode and mfl[mfnode].find(file)[0]:
3038 write(file)
3038 write(file)
3039 return 0
3039 return 0
3040 except KeyError:
3040 except KeyError:
3041 pass
3041 pass
3042
3042
3043 for abs in ctx.walk(matcher):
3043 for abs in ctx.walk(matcher):
3044 write(abs)
3044 write(abs)
3045 err = 0
3045 err = 0
3046
3046
3047 for subpath in sorted(ctx.substate):
3047 for subpath in sorted(ctx.substate):
3048 sub = ctx.sub(subpath)
3048 sub = ctx.sub(subpath)
3049 try:
3049 try:
3050 submatch = matchmod.subdirmatcher(subpath, matcher)
3050 submatch = matchmod.subdirmatcher(subpath, matcher)
3051
3051
3052 if not sub.cat(submatch, basefm, fntemplate,
3052 if not sub.cat(submatch, basefm, fntemplate,
3053 os.path.join(prefix, sub._path), **opts):
3053 os.path.join(prefix, sub._path), **opts):
3054 err = 0
3054 err = 0
3055 except error.RepoLookupError:
3055 except error.RepoLookupError:
3056 ui.status(_("skipping missing subrepository: %s\n")
3056 ui.status(_("skipping missing subrepository: %s\n")
3057 % os.path.join(prefix, subpath))
3057 % os.path.join(prefix, subpath))
3058
3058
3059 return err
3059 return err
3060
3060
3061 def commit(ui, repo, commitfunc, pats, opts):
3061 def commit(ui, repo, commitfunc, pats, opts):
3062 '''commit the specified files or all outstanding changes'''
3062 '''commit the specified files or all outstanding changes'''
3063 date = opts.get('date')
3063 date = opts.get('date')
3064 if date:
3064 if date:
3065 opts['date'] = util.parsedate(date)
3065 opts['date'] = util.parsedate(date)
3066 message = logmessage(ui, opts)
3066 message = logmessage(ui, opts)
3067 matcher = scmutil.match(repo[None], pats, opts)
3067 matcher = scmutil.match(repo[None], pats, opts)
3068
3068
3069 dsguard = None
3069 dsguard = None
3070 # extract addremove carefully -- this function can be called from a command
3070 # extract addremove carefully -- this function can be called from a command
3071 # that doesn't support addremove
3071 # that doesn't support addremove
3072 if opts.get('addremove'):
3072 if opts.get('addremove'):
3073 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3073 dsguard = dirstateguard.dirstateguard(repo, 'commit')
3074 with dsguard or util.nullcontextmanager():
3074 with dsguard or util.nullcontextmanager():
3075 if dsguard:
3075 if dsguard:
3076 if scmutil.addremove(repo, matcher, "", opts) != 0:
3076 if scmutil.addremove(repo, matcher, "", opts) != 0:
3077 raise error.Abort(
3077 raise error.Abort(
3078 _("failed to mark all new/missing files as added/removed"))
3078 _("failed to mark all new/missing files as added/removed"))
3079
3079
3080 return commitfunc(ui, repo, message, matcher, opts)
3080 return commitfunc(ui, repo, message, matcher, opts)
3081
3081
3082 def samefile(f, ctx1, ctx2):
3082 def samefile(f, ctx1, ctx2):
3083 if f in ctx1.manifest():
3083 if f in ctx1.manifest():
3084 a = ctx1.filectx(f)
3084 a = ctx1.filectx(f)
3085 if f in ctx2.manifest():
3085 if f in ctx2.manifest():
3086 b = ctx2.filectx(f)
3086 b = ctx2.filectx(f)
3087 return (not a.cmp(b)
3087 return (not a.cmp(b)
3088 and a.flags() == b.flags())
3088 and a.flags() == b.flags())
3089 else:
3089 else:
3090 return False
3090 return False
3091 else:
3091 else:
3092 return f not in ctx2.manifest()
3092 return f not in ctx2.manifest()
3093
3093
3094 def amend(ui, repo, old, extra, pats, opts):
3094 def amend(ui, repo, old, extra, pats, opts):
3095 # avoid cycle context -> subrepo -> cmdutil
3095 # avoid cycle context -> subrepo -> cmdutil
3096 from . import context
3096 from . import context
3097
3097
3098 # amend will reuse the existing user if not specified, but the obsolete
3098 # amend will reuse the existing user if not specified, but the obsolete
3099 # marker creation requires that the current user's name is specified.
3099 # marker creation requires that the current user's name is specified.
3100 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3100 if obsolete.isenabled(repo, obsolete.createmarkersopt):
3101 ui.username() # raise exception if username not set
3101 ui.username() # raise exception if username not set
3102
3102
3103 ui.note(_('amending changeset %s\n') % old)
3103 ui.note(_('amending changeset %s\n') % old)
3104 base = old.p1()
3104 base = old.p1()
3105
3105
3106 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3106 with repo.wlock(), repo.lock(), repo.transaction('amend'):
3107 # Participating changesets:
3107 # Participating changesets:
3108 #
3108 #
3109 # wctx o - workingctx that contains changes from working copy
3109 # wctx o - workingctx that contains changes from working copy
3110 # | to go into amending commit
3110 # | to go into amending commit
3111 # |
3111 # |
3112 # old o - changeset to amend
3112 # old o - changeset to amend
3113 # |
3113 # |
3114 # base o - first parent of the changeset to amend
3114 # base o - first parent of the changeset to amend
3115 wctx = repo[None]
3115 wctx = repo[None]
3116
3116
3117 # Copy to avoid mutating input
3117 # Copy to avoid mutating input
3118 extra = extra.copy()
3118 extra = extra.copy()
3119 # Update extra dict from amended commit (e.g. to preserve graft
3119 # Update extra dict from amended commit (e.g. to preserve graft
3120 # source)
3120 # source)
3121 extra.update(old.extra())
3121 extra.update(old.extra())
3122
3122
3123 # Also update it from the from the wctx
3123 # Also update it from the from the wctx
3124 extra.update(wctx.extra())
3124 extra.update(wctx.extra())
3125
3125
3126 user = opts.get('user') or old.user()
3126 user = opts.get('user') or old.user()
3127 date = opts.get('date') or old.date()
3127 date = opts.get('date') or old.date()
3128
3128
3129 # Parse the date to allow comparison between date and old.date()
3129 # Parse the date to allow comparison between date and old.date()
3130 date = util.parsedate(date)
3130 date = util.parsedate(date)
3131
3131
3132 if len(old.parents()) > 1:
3132 if len(old.parents()) > 1:
3133 # ctx.files() isn't reliable for merges, so fall back to the
3133 # ctx.files() isn't reliable for merges, so fall back to the
3134 # slower repo.status() method
3134 # slower repo.status() method
3135 files = set([fn for st in repo.status(base, old)[:3]
3135 files = set([fn for st in repo.status(base, old)[:3]
3136 for fn in st])
3136 for fn in st])
3137 else:
3137 else:
3138 files = set(old.files())
3138 files = set(old.files())
3139
3139
3140 # add/remove the files to the working copy if the "addremove" option
3140 # add/remove the files to the working copy if the "addremove" option
3141 # was specified.
3141 # was specified.
3142 matcher = scmutil.match(wctx, pats, opts)
3142 matcher = scmutil.match(wctx, pats, opts)
3143 if (opts.get('addremove')
3143 if (opts.get('addremove')
3144 and scmutil.addremove(repo, matcher, "", opts)):
3144 and scmutil.addremove(repo, matcher, "", opts)):
3145 raise error.Abort(
3145 raise error.Abort(
3146 _("failed to mark all new/missing files as added/removed"))
3146 _("failed to mark all new/missing files as added/removed"))
3147
3147
3148 # Check subrepos. This depends on in-place wctx._status update in
3148 # Check subrepos. This depends on in-place wctx._status update in
3149 # subrepo.precommit(). To minimize the risk of this hack, we do
3149 # subrepo.precommit(). To minimize the risk of this hack, we do
3150 # nothing if .hgsub does not exist.
3150 # nothing if .hgsub does not exist.
3151 if '.hgsub' in wctx or '.hgsub' in old:
3151 if '.hgsub' in wctx or '.hgsub' in old:
3152 from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
3152 from . import subrepo # avoid cycle: cmdutil -> subrepo -> cmdutil
3153 subs, commitsubs, newsubstate = subrepo.precommit(
3153 subs, commitsubs, newsubstate = subrepo.precommit(
3154 ui, wctx, wctx._status, matcher)
3154 ui, wctx, wctx._status, matcher)
3155 # amend should abort if commitsubrepos is enabled
3155 # amend should abort if commitsubrepos is enabled
3156 assert not commitsubs
3156 assert not commitsubs
3157 if subs:
3157 if subs:
3158 subrepo.writestate(repo, newsubstate)
3158 subrepo.writestate(repo, newsubstate)
3159
3159
3160 filestoamend = set(f for f in wctx.files() if matcher(f))
3160 filestoamend = set(f for f in wctx.files() if matcher(f))
3161
3161
3162 changes = (len(filestoamend) > 0)
3162 changes = (len(filestoamend) > 0)
3163 if changes:
3163 if changes:
3164 # Recompute copies (avoid recording a -> b -> a)
3164 # Recompute copies (avoid recording a -> b -> a)
3165 copied = copies.pathcopies(base, wctx, matcher)
3165 copied = copies.pathcopies(base, wctx, matcher)
3166 if old.p2:
3166 if old.p2:
3167 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3167 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3168
3168
3169 # Prune files which were reverted by the updates: if old
3169 # Prune files which were reverted by the updates: if old
3170 # introduced file X and the file was renamed in the working
3170 # introduced file X and the file was renamed in the working
3171 # copy, then those two files are the same and
3171 # copy, then those two files are the same and
3172 # we can discard X from our list of files. Likewise if X
3172 # we can discard X from our list of files. Likewise if X
3173 # was removed, it's no longer relevant. If X is missing (aka
3173 # was removed, it's no longer relevant. If X is missing (aka
3174 # deleted), old X must be preserved.
3174 # deleted), old X must be preserved.
3175 files.update(filestoamend)
3175 files.update(filestoamend)
3176 files = [f for f in files if (not samefile(f, wctx, base)
3176 files = [f for f in files if (not samefile(f, wctx, base)
3177 or f in wctx.deleted())]
3177 or f in wctx.deleted())]
3178
3178
3179 def filectxfn(repo, ctx_, path):
3179 def filectxfn(repo, ctx_, path):
3180 try:
3180 try:
3181 # If the file being considered is not amongst the files
3181 # If the file being considered is not amongst the files
3182 # to be amended, we should return the file context from the
3182 # to be amended, we should return the file context from the
3183 # old changeset. This avoids issues when only some files in
3183 # old changeset. This avoids issues when only some files in
3184 # the working copy are being amended but there are also
3184 # the working copy are being amended but there are also
3185 # changes to other files from the old changeset.
3185 # changes to other files from the old changeset.
3186 if path not in filestoamend:
3186 if path not in filestoamend:
3187 return old.filectx(path)
3187 return old.filectx(path)
3188
3188
3189 # Return None for removed files.
3189 # Return None for removed files.
3190 if path in wctx.removed():
3190 if path in wctx.removed():
3191 return None
3191 return None
3192
3192
3193 fctx = wctx[path]
3193 fctx = wctx[path]
3194 flags = fctx.flags()
3194 flags = fctx.flags()
3195 mctx = context.memfilectx(repo,
3195 mctx = context.memfilectx(repo,
3196 fctx.path(), fctx.data(),
3196 fctx.path(), fctx.data(),
3197 islink='l' in flags,
3197 islink='l' in flags,
3198 isexec='x' in flags,
3198 isexec='x' in flags,
3199 copied=copied.get(path))
3199 copied=copied.get(path))
3200 return mctx
3200 return mctx
3201 except KeyError:
3201 except KeyError:
3202 return None
3202 return None
3203 else:
3203 else:
3204 ui.note(_('copying changeset %s to %s\n') % (old, base))
3204 ui.note(_('copying changeset %s to %s\n') % (old, base))
3205
3205
3206 # Use version of files as in the old cset
3206 # Use version of files as in the old cset
3207 def filectxfn(repo, ctx_, path):
3207 def filectxfn(repo, ctx_, path):
3208 try:
3208 try:
3209 return old.filectx(path)
3209 return old.filectx(path)
3210 except KeyError:
3210 except KeyError:
3211 return None
3211 return None
3212
3212
3213 # See if we got a message from -m or -l, if not, open the editor with
3213 # See if we got a message from -m or -l, if not, open the editor with
3214 # the message of the changeset to amend.
3214 # the message of the changeset to amend.
3215 message = logmessage(ui, opts)
3215 message = logmessage(ui, opts)
3216
3216
3217 editform = mergeeditform(old, 'commit.amend')
3217 editform = mergeeditform(old, 'commit.amend')
3218 editor = getcommiteditor(editform=editform,
3218 editor = getcommiteditor(editform=editform,
3219 **pycompat.strkwargs(opts))
3219 **pycompat.strkwargs(opts))
3220
3220
3221 if not message:
3221 if not message:
3222 editor = getcommiteditor(edit=True, editform=editform)
3222 editor = getcommiteditor(edit=True, editform=editform)
3223 message = old.description()
3223 message = old.description()
3224
3224
3225 pureextra = extra.copy()
3225 pureextra = extra.copy()
3226 extra['amend_source'] = old.hex()
3226 extra['amend_source'] = old.hex()
3227
3227
3228 new = context.memctx(repo,
3228 new = context.memctx(repo,
3229 parents=[base.node(), old.p2().node()],
3229 parents=[base.node(), old.p2().node()],
3230 text=message,
3230 text=message,
3231 files=files,
3231 files=files,
3232 filectxfn=filectxfn,
3232 filectxfn=filectxfn,
3233 user=user,
3233 user=user,
3234 date=date,
3234 date=date,
3235 extra=extra,
3235 extra=extra,
3236 editor=editor)
3236 editor=editor)
3237
3237
3238 newdesc = changelog.stripdesc(new.description())
3238 newdesc = changelog.stripdesc(new.description())
3239 if ((not changes)
3239 if ((not changes)
3240 and newdesc == old.description()
3240 and newdesc == old.description()
3241 and user == old.user()
3241 and user == old.user()
3242 and date == old.date()
3242 and date == old.date()
3243 and pureextra == old.extra()):
3243 and pureextra == old.extra()):
3244 # nothing changed. continuing here would create a new node
3244 # nothing changed. continuing here would create a new node
3245 # anyway because of the amend_source noise.
3245 # anyway because of the amend_source noise.
3246 #
3246 #
3247 # This not what we expect from amend.
3247 # This not what we expect from amend.
3248 return old.node()
3248 return old.node()
3249
3249
3250 if opts.get('secret'):
3250 if opts.get('secret'):
3251 commitphase = 'secret'
3251 commitphase = 'secret'
3252 else:
3252 else:
3253 commitphase = old.phase()
3253 commitphase = old.phase()
3254 overrides = {('phases', 'new-commit'): commitphase}
3254 overrides = {('phases', 'new-commit'): commitphase}
3255 with ui.configoverride(overrides, 'amend'):
3255 with ui.configoverride(overrides, 'amend'):
3256 newid = repo.commitctx(new)
3256 newid = repo.commitctx(new)
3257
3257
3258 # Reroute the working copy parent to the new changeset
3258 # Reroute the working copy parent to the new changeset
3259 repo.setparents(newid, nullid)
3259 repo.setparents(newid, nullid)
3260 mapping = {old.node(): (newid,)}
3260 mapping = {old.node(): (newid,)}
3261 obsmetadata = None
3261 obsmetadata = None
3262 if opts.get('note'):
3262 if opts.get('note'):
3263 obsmetadata = {'note': opts['note']}
3263 obsmetadata = {'note': opts['note']}
3264 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3264 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata)
3265
3265
3266 # Fixing the dirstate because localrepo.commitctx does not update
3266 # Fixing the dirstate because localrepo.commitctx does not update
3267 # it. This is rather convenient because we did not need to update
3267 # it. This is rather convenient because we did not need to update
3268 # the dirstate for all the files in the new commit which commitctx
3268 # the dirstate for all the files in the new commit which commitctx
3269 # could have done if it updated the dirstate. Now, we can
3269 # could have done if it updated the dirstate. Now, we can
3270 # selectively update the dirstate only for the amended files.
3270 # selectively update the dirstate only for the amended files.
3271 dirstate = repo.dirstate
3271 dirstate = repo.dirstate
3272
3272
3273 # Update the state of the files which were added and
3273 # Update the state of the files which were added and
3274 # and modified in the amend to "normal" in the dirstate.
3274 # and modified in the amend to "normal" in the dirstate.
3275 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3275 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3276 for f in normalfiles:
3276 for f in normalfiles:
3277 dirstate.normal(f)
3277 dirstate.normal(f)
3278
3278
3279 # Update the state of files which were removed in the amend
3279 # Update the state of files which were removed in the amend
3280 # to "removed" in the dirstate.
3280 # to "removed" in the dirstate.
3281 removedfiles = set(wctx.removed()) & filestoamend
3281 removedfiles = set(wctx.removed()) & filestoamend
3282 for f in removedfiles:
3282 for f in removedfiles:
3283 dirstate.drop(f)
3283 dirstate.drop(f)
3284
3284
3285 return newid
3285 return newid
3286
3286
3287 def commiteditor(repo, ctx, subs, editform=''):
3287 def commiteditor(repo, ctx, subs, editform=''):
3288 if ctx.description():
3288 if ctx.description():
3289 return ctx.description()
3289 return ctx.description()
3290 return commitforceeditor(repo, ctx, subs, editform=editform,
3290 return commitforceeditor(repo, ctx, subs, editform=editform,
3291 unchangedmessagedetection=True)
3291 unchangedmessagedetection=True)
3292
3292
3293 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3293 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
3294 editform='', unchangedmessagedetection=False):
3294 editform='', unchangedmessagedetection=False):
3295 if not extramsg:
3295 if not extramsg:
3296 extramsg = _("Leave message empty to abort commit.")
3296 extramsg = _("Leave message empty to abort commit.")
3297
3297
3298 forms = [e for e in editform.split('.') if e]
3298 forms = [e for e in editform.split('.') if e]
3299 forms.insert(0, 'changeset')
3299 forms.insert(0, 'changeset')
3300 templatetext = None
3300 templatetext = None
3301 while forms:
3301 while forms:
3302 ref = '.'.join(forms)
3302 ref = '.'.join(forms)
3303 if repo.ui.config('committemplate', ref):
3303 if repo.ui.config('committemplate', ref):
3304 templatetext = committext = buildcommittemplate(
3304 templatetext = committext = buildcommittemplate(
3305 repo, ctx, subs, extramsg, ref)
3305 repo, ctx, subs, extramsg, ref)
3306 break
3306 break
3307 forms.pop()
3307 forms.pop()
3308 else:
3308 else:
3309 committext = buildcommittext(repo, ctx, subs, extramsg)
3309 committext = buildcommittext(repo, ctx, subs, extramsg)
3310
3310
3311 # run editor in the repository root
3311 # run editor in the repository root
3312 olddir = pycompat.getcwd()
3312 olddir = pycompat.getcwd()
3313 os.chdir(repo.root)
3313 os.chdir(repo.root)
3314
3314
3315 # make in-memory changes visible to external process
3315 # make in-memory changes visible to external process
3316 tr = repo.currenttransaction()
3316 tr = repo.currenttransaction()
3317 repo.dirstate.write(tr)
3317 repo.dirstate.write(tr)
3318 pending = tr and tr.writepending() and repo.root
3318 pending = tr and tr.writepending() and repo.root
3319
3319
3320 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3320 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
3321 editform=editform, pending=pending,
3321 editform=editform, pending=pending,
3322 repopath=repo.path, action='commit')
3322 repopath=repo.path, action='commit')
3323 text = editortext
3323 text = editortext
3324
3324
3325 # strip away anything below this special string (used for editors that want
3325 # strip away anything below this special string (used for editors that want
3326 # to display the diff)
3326 # to display the diff)
3327 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3327 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3328 if stripbelow:
3328 if stripbelow:
3329 text = text[:stripbelow.start()]
3329 text = text[:stripbelow.start()]
3330
3330
3331 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3331 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
3332 os.chdir(olddir)
3332 os.chdir(olddir)
3333
3333
3334 if finishdesc:
3334 if finishdesc:
3335 text = finishdesc(text)
3335 text = finishdesc(text)
3336 if not text.strip():
3336 if not text.strip():
3337 raise error.Abort(_("empty commit message"))
3337 raise error.Abort(_("empty commit message"))
3338 if unchangedmessagedetection and editortext == templatetext:
3338 if unchangedmessagedetection and editortext == templatetext:
3339 raise error.Abort(_("commit message unchanged"))
3339 raise error.Abort(_("commit message unchanged"))
3340
3340
3341 return text
3341 return text
3342
3342
3343 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3343 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3344 ui = repo.ui
3344 ui = repo.ui
3345 spec = formatter.templatespec(ref, None, None)
3345 spec = formatter.templatespec(ref, None, None)
3346 t = changeset_templater(ui, repo, spec, None, {}, False)
3346 t = changeset_templater(ui, repo, spec, None, {}, False)
3347 t.t.cache.update((k, templater.unquotestring(v))
3347 t.t.cache.update((k, templater.unquotestring(v))
3348 for k, v in repo.ui.configitems('committemplate'))
3348 for k, v in repo.ui.configitems('committemplate'))
3349
3349
3350 if not extramsg:
3350 if not extramsg:
3351 extramsg = '' # ensure that extramsg is string
3351 extramsg = '' # ensure that extramsg is string
3352
3352
3353 ui.pushbuffer()
3353 ui.pushbuffer()
3354 t.show(ctx, extramsg=extramsg)
3354 t.show(ctx, extramsg=extramsg)
3355 return ui.popbuffer()
3355 return ui.popbuffer()
3356
3356
3357 def hgprefix(msg):
3357 def hgprefix(msg):
3358 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3358 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
3359
3359
3360 def buildcommittext(repo, ctx, subs, extramsg):
3360 def buildcommittext(repo, ctx, subs, extramsg):
3361 edittext = []
3361 edittext = []
3362 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3362 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3363 if ctx.description():
3363 if ctx.description():
3364 edittext.append(ctx.description())
3364 edittext.append(ctx.description())
3365 edittext.append("")
3365 edittext.append("")
3366 edittext.append("") # Empty line between message and comments.
3366 edittext.append("") # Empty line between message and comments.
3367 edittext.append(hgprefix(_("Enter commit message."
3367 edittext.append(hgprefix(_("Enter commit message."
3368 " Lines beginning with 'HG:' are removed.")))
3368 " Lines beginning with 'HG:' are removed.")))
3369 edittext.append(hgprefix(extramsg))
3369 edittext.append(hgprefix(extramsg))
3370 edittext.append("HG: --")
3370 edittext.append("HG: --")
3371 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3371 edittext.append(hgprefix(_("user: %s") % ctx.user()))
3372 if ctx.p2():
3372 if ctx.p2():
3373 edittext.append(hgprefix(_("branch merge")))
3373 edittext.append(hgprefix(_("branch merge")))
3374 if ctx.branch():
3374 if ctx.branch():
3375 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3375 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
3376 if bookmarks.isactivewdirparent(repo):
3376 if bookmarks.isactivewdirparent(repo):
3377 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3377 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
3378 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3378 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
3379 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3379 edittext.extend([hgprefix(_("added %s") % f) for f in added])
3380 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3380 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
3381 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3381 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
3382 if not added and not modified and not removed:
3382 if not added and not modified and not removed:
3383 edittext.append(hgprefix(_("no files changed")))
3383 edittext.append(hgprefix(_("no files changed")))
3384 edittext.append("")
3384 edittext.append("")
3385
3385
3386 return "\n".join(edittext)
3386 return "\n".join(edittext)
3387
3387
3388 def commitstatus(repo, node, branch, bheads=None, opts=None):
3388 def commitstatus(repo, node, branch, bheads=None, opts=None):
3389 if opts is None:
3389 if opts is None:
3390 opts = {}
3390 opts = {}
3391 ctx = repo[node]
3391 ctx = repo[node]
3392 parents = ctx.parents()
3392 parents = ctx.parents()
3393
3393
3394 if (not opts.get('amend') and bheads and node not in bheads and not
3394 if (not opts.get('amend') and bheads and node not in bheads and not
3395 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3395 [x for x in parents if x.node() in bheads and x.branch() == branch]):
3396 repo.ui.status(_('created new head\n'))
3396 repo.ui.status(_('created new head\n'))
3397 # The message is not printed for initial roots. For the other
3397 # The message is not printed for initial roots. For the other
3398 # changesets, it is printed in the following situations:
3398 # changesets, it is printed in the following situations:
3399 #
3399 #
3400 # Par column: for the 2 parents with ...
3400 # Par column: for the 2 parents with ...
3401 # N: null or no parent
3401 # N: null or no parent
3402 # B: parent is on another named branch
3402 # B: parent is on another named branch
3403 # C: parent is a regular non head changeset
3403 # C: parent is a regular non head changeset
3404 # H: parent was a branch head of the current branch
3404 # H: parent was a branch head of the current branch
3405 # Msg column: whether we print "created new head" message
3405 # Msg column: whether we print "created new head" message
3406 # In the following, it is assumed that there already exists some
3406 # In the following, it is assumed that there already exists some
3407 # initial branch heads of the current branch, otherwise nothing is
3407 # initial branch heads of the current branch, otherwise nothing is
3408 # printed anyway.
3408 # printed anyway.
3409 #
3409 #
3410 # Par Msg Comment
3410 # Par Msg Comment
3411 # N N y additional topo root
3411 # N N y additional topo root
3412 #
3412 #
3413 # B N y additional branch root
3413 # B N y additional branch root
3414 # C N y additional topo head
3414 # C N y additional topo head
3415 # H N n usual case
3415 # H N n usual case
3416 #
3416 #
3417 # B B y weird additional branch root
3417 # B B y weird additional branch root
3418 # C B y branch merge
3418 # C B y branch merge
3419 # H B n merge with named branch
3419 # H B n merge with named branch
3420 #
3420 #
3421 # C C y additional head from merge
3421 # C C y additional head from merge
3422 # C H n merge with a head
3422 # C H n merge with a head
3423 #
3423 #
3424 # H H n head merge: head count decreases
3424 # H H n head merge: head count decreases
3425
3425
3426 if not opts.get('close_branch'):
3426 if not opts.get('close_branch'):
3427 for r in parents:
3427 for r in parents:
3428 if r.closesbranch() and r.branch() == branch:
3428 if r.closesbranch() and r.branch() == branch:
3429 repo.ui.status(_('reopening closed branch head %d\n') % r)
3429 repo.ui.status(_('reopening closed branch head %d\n') % r)
3430
3430
3431 if repo.ui.debugflag:
3431 if repo.ui.debugflag:
3432 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3432 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
3433 elif repo.ui.verbose:
3433 elif repo.ui.verbose:
3434 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3434 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
3435
3435
3436 def postcommitstatus(repo, pats, opts):
3436 def postcommitstatus(repo, pats, opts):
3437 return repo.status(match=scmutil.match(repo[None], pats, opts))
3437 return repo.status(match=scmutil.match(repo[None], pats, opts))
3438
3438
3439 def revert(ui, repo, ctx, parents, *pats, **opts):
3439 def revert(ui, repo, ctx, parents, *pats, **opts):
3440 opts = pycompat.byteskwargs(opts)
3440 opts = pycompat.byteskwargs(opts)
3441 parent, p2 = parents
3441 parent, p2 = parents
3442 node = ctx.node()
3442 node = ctx.node()
3443
3443
3444 mf = ctx.manifest()
3444 mf = ctx.manifest()
3445 if node == p2:
3445 if node == p2:
3446 parent = p2
3446 parent = p2
3447
3447
3448 # need all matching names in dirstate and manifest of target rev,
3448 # need all matching names in dirstate and manifest of target rev,
3449 # so have to walk both. do not print errors if files exist in one
3449 # so have to walk both. do not print errors if files exist in one
3450 # but not other. in both cases, filesets should be evaluated against
3450 # but not other. in both cases, filesets should be evaluated against
3451 # workingctx to get consistent result (issue4497). this means 'set:**'
3451 # workingctx to get consistent result (issue4497). this means 'set:**'
3452 # cannot be used to select missing files from target rev.
3452 # cannot be used to select missing files from target rev.
3453
3453
3454 # `names` is a mapping for all elements in working copy and target revision
3454 # `names` is a mapping for all elements in working copy and target revision
3455 # The mapping is in the form:
3455 # The mapping is in the form:
3456 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3456 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3457 names = {}
3457 names = {}
3458
3458
3459 with repo.wlock():
3459 with repo.wlock():
3460 ## filling of the `names` mapping
3460 ## filling of the `names` mapping
3461 # walk dirstate to fill `names`
3461 # walk dirstate to fill `names`
3462
3462
3463 interactive = opts.get('interactive', False)
3463 interactive = opts.get('interactive', False)
3464 wctx = repo[None]
3464 wctx = repo[None]
3465 m = scmutil.match(wctx, pats, opts)
3465 m = scmutil.match(wctx, pats, opts)
3466
3466
3467 # we'll need this later
3467 # we'll need this later
3468 targetsubs = sorted(s for s in wctx.substate if m(s))
3468 targetsubs = sorted(s for s in wctx.substate if m(s))
3469
3469
3470 if not m.always():
3470 if not m.always():
3471 matcher = matchmod.badmatch(m, lambda x, y: False)
3471 matcher = matchmod.badmatch(m, lambda x, y: False)
3472 for abs in wctx.walk(matcher):
3472 for abs in wctx.walk(matcher):
3473 names[abs] = m.rel(abs), m.exact(abs)
3473 names[abs] = m.rel(abs), m.exact(abs)
3474
3474
3475 # walk target manifest to fill `names`
3475 # walk target manifest to fill `names`
3476
3476
3477 def badfn(path, msg):
3477 def badfn(path, msg):
3478 if path in names:
3478 if path in names:
3479 return
3479 return
3480 if path in ctx.substate:
3480 if path in ctx.substate:
3481 return
3481 return
3482 path_ = path + '/'
3482 path_ = path + '/'
3483 for f in names:
3483 for f in names:
3484 if f.startswith(path_):
3484 if f.startswith(path_):
3485 return
3485 return
3486 ui.warn("%s: %s\n" % (m.rel(path), msg))
3486 ui.warn("%s: %s\n" % (m.rel(path), msg))
3487
3487
3488 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3488 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3489 if abs not in names:
3489 if abs not in names:
3490 names[abs] = m.rel(abs), m.exact(abs)
3490 names[abs] = m.rel(abs), m.exact(abs)
3491
3491
3492 # Find status of all file in `names`.
3492 # Find status of all file in `names`.
3493 m = scmutil.matchfiles(repo, names)
3493 m = scmutil.matchfiles(repo, names)
3494
3494
3495 changes = repo.status(node1=node, match=m,
3495 changes = repo.status(node1=node, match=m,
3496 unknown=True, ignored=True, clean=True)
3496 unknown=True, ignored=True, clean=True)
3497 else:
3497 else:
3498 changes = repo.status(node1=node, match=m)
3498 changes = repo.status(node1=node, match=m)
3499 for kind in changes:
3499 for kind in changes:
3500 for abs in kind:
3500 for abs in kind:
3501 names[abs] = m.rel(abs), m.exact(abs)
3501 names[abs] = m.rel(abs), m.exact(abs)
3502
3502
3503 m = scmutil.matchfiles(repo, names)
3503 m = scmutil.matchfiles(repo, names)
3504
3504
3505 modified = set(changes.modified)
3505 modified = set(changes.modified)
3506 added = set(changes.added)
3506 added = set(changes.added)
3507 removed = set(changes.removed)
3507 removed = set(changes.removed)
3508 _deleted = set(changes.deleted)
3508 _deleted = set(changes.deleted)
3509 unknown = set(changes.unknown)
3509 unknown = set(changes.unknown)
3510 unknown.update(changes.ignored)
3510 unknown.update(changes.ignored)
3511 clean = set(changes.clean)
3511 clean = set(changes.clean)
3512 modadded = set()
3512 modadded = set()
3513
3513
3514 # We need to account for the state of the file in the dirstate,
3514 # We need to account for the state of the file in the dirstate,
3515 # even when we revert against something else than parent. This will
3515 # even when we revert against something else than parent. This will
3516 # slightly alter the behavior of revert (doing back up or not, delete
3516 # slightly alter the behavior of revert (doing back up or not, delete
3517 # or just forget etc).
3517 # or just forget etc).
3518 if parent == node:
3518 if parent == node:
3519 dsmodified = modified
3519 dsmodified = modified
3520 dsadded = added
3520 dsadded = added
3521 dsremoved = removed
3521 dsremoved = removed
3522 # store all local modifications, useful later for rename detection
3522 # store all local modifications, useful later for rename detection
3523 localchanges = dsmodified | dsadded
3523 localchanges = dsmodified | dsadded
3524 modified, added, removed = set(), set(), set()
3524 modified, added, removed = set(), set(), set()
3525 else:
3525 else:
3526 changes = repo.status(node1=parent, match=m)
3526 changes = repo.status(node1=parent, match=m)
3527 dsmodified = set(changes.modified)
3527 dsmodified = set(changes.modified)
3528 dsadded = set(changes.added)
3528 dsadded = set(changes.added)
3529 dsremoved = set(changes.removed)
3529 dsremoved = set(changes.removed)
3530 # store all local modifications, useful later for rename detection
3530 # store all local modifications, useful later for rename detection
3531 localchanges = dsmodified | dsadded
3531 localchanges = dsmodified | dsadded
3532
3532
3533 # only take into account for removes between wc and target
3533 # only take into account for removes between wc and target
3534 clean |= dsremoved - removed
3534 clean |= dsremoved - removed
3535 dsremoved &= removed
3535 dsremoved &= removed
3536 # distinct between dirstate remove and other
3536 # distinct between dirstate remove and other
3537 removed -= dsremoved
3537 removed -= dsremoved
3538
3538
3539 modadded = added & dsmodified
3539 modadded = added & dsmodified
3540 added -= modadded
3540 added -= modadded
3541
3541
3542 # tell newly modified apart.
3542 # tell newly modified apart.
3543 dsmodified &= modified
3543 dsmodified &= modified
3544 dsmodified |= modified & dsadded # dirstate added may need backup
3544 dsmodified |= modified & dsadded # dirstate added may need backup
3545 modified -= dsmodified
3545 modified -= dsmodified
3546
3546
3547 # We need to wait for some post-processing to update this set
3547 # We need to wait for some post-processing to update this set
3548 # before making the distinction. The dirstate will be used for
3548 # before making the distinction. The dirstate will be used for
3549 # that purpose.
3549 # that purpose.
3550 dsadded = added
3550 dsadded = added
3551
3551
3552 # in case of merge, files that are actually added can be reported as
3552 # in case of merge, files that are actually added can be reported as
3553 # modified, we need to post process the result
3553 # modified, we need to post process the result
3554 if p2 != nullid:
3554 if p2 != nullid:
3555 mergeadd = set(dsmodified)
3555 mergeadd = set(dsmodified)
3556 for path in dsmodified:
3556 for path in dsmodified:
3557 if path in mf:
3557 if path in mf:
3558 mergeadd.remove(path)
3558 mergeadd.remove(path)
3559 dsadded |= mergeadd
3559 dsadded |= mergeadd
3560 dsmodified -= mergeadd
3560 dsmodified -= mergeadd
3561
3561
3562 # if f is a rename, update `names` to also revert the source
3562 # if f is a rename, update `names` to also revert the source
3563 cwd = repo.getcwd()
3563 cwd = repo.getcwd()
3564 for f in localchanges:
3564 for f in localchanges:
3565 src = repo.dirstate.copied(f)
3565 src = repo.dirstate.copied(f)
3566 # XXX should we check for rename down to target node?
3566 # XXX should we check for rename down to target node?
3567 if src and src not in names and repo.dirstate[src] == 'r':
3567 if src and src not in names and repo.dirstate[src] == 'r':
3568 dsremoved.add(src)
3568 dsremoved.add(src)
3569 names[src] = (repo.pathto(src, cwd), True)
3569 names[src] = (repo.pathto(src, cwd), True)
3570
3570
3571 # determine the exact nature of the deleted changesets
3571 # determine the exact nature of the deleted changesets
3572 deladded = set(_deleted)
3572 deladded = set(_deleted)
3573 for path in _deleted:
3573 for path in _deleted:
3574 if path in mf:
3574 if path in mf:
3575 deladded.remove(path)
3575 deladded.remove(path)
3576 deleted = _deleted - deladded
3576 deleted = _deleted - deladded
3577
3577
3578 # distinguish between file to forget and the other
3578 # distinguish between file to forget and the other
3579 added = set()
3579 added = set()
3580 for abs in dsadded:
3580 for abs in dsadded:
3581 if repo.dirstate[abs] != 'a':
3581 if repo.dirstate[abs] != 'a':
3582 added.add(abs)
3582 added.add(abs)
3583 dsadded -= added
3583 dsadded -= added
3584
3584
3585 for abs in deladded:
3585 for abs in deladded:
3586 if repo.dirstate[abs] == 'a':
3586 if repo.dirstate[abs] == 'a':
3587 dsadded.add(abs)
3587 dsadded.add(abs)
3588 deladded -= dsadded
3588 deladded -= dsadded
3589
3589
3590 # For files marked as removed, we check if an unknown file is present at
3590 # For files marked as removed, we check if an unknown file is present at
3591 # the same path. If a such file exists it may need to be backed up.
3591 # the same path. If a such file exists it may need to be backed up.
3592 # Making the distinction at this stage helps have simpler backup
3592 # Making the distinction at this stage helps have simpler backup
3593 # logic.
3593 # logic.
3594 removunk = set()
3594 removunk = set()
3595 for abs in removed:
3595 for abs in removed:
3596 target = repo.wjoin(abs)
3596 target = repo.wjoin(abs)
3597 if os.path.lexists(target):
3597 if os.path.lexists(target):
3598 removunk.add(abs)
3598 removunk.add(abs)
3599 removed -= removunk
3599 removed -= removunk
3600
3600
3601 dsremovunk = set()
3601 dsremovunk = set()
3602 for abs in dsremoved:
3602 for abs in dsremoved:
3603 target = repo.wjoin(abs)
3603 target = repo.wjoin(abs)
3604 if os.path.lexists(target):
3604 if os.path.lexists(target):
3605 dsremovunk.add(abs)
3605 dsremovunk.add(abs)
3606 dsremoved -= dsremovunk
3606 dsremoved -= dsremovunk
3607
3607
3608 # action to be actually performed by revert
3608 # action to be actually performed by revert
3609 # (<list of file>, message>) tuple
3609 # (<list of file>, message>) tuple
3610 actions = {'revert': ([], _('reverting %s\n')),
3610 actions = {'revert': ([], _('reverting %s\n')),
3611 'add': ([], _('adding %s\n')),
3611 'add': ([], _('adding %s\n')),
3612 'remove': ([], _('removing %s\n')),
3612 'remove': ([], _('removing %s\n')),
3613 'drop': ([], _('removing %s\n')),
3613 'drop': ([], _('removing %s\n')),
3614 'forget': ([], _('forgetting %s\n')),
3614 'forget': ([], _('forgetting %s\n')),
3615 'undelete': ([], _('undeleting %s\n')),
3615 'undelete': ([], _('undeleting %s\n')),
3616 'noop': (None, _('no changes needed to %s\n')),
3616 'noop': (None, _('no changes needed to %s\n')),
3617 'unknown': (None, _('file not managed: %s\n')),
3617 'unknown': (None, _('file not managed: %s\n')),
3618 }
3618 }
3619
3619
3620 # "constant" that convey the backup strategy.
3620 # "constant" that convey the backup strategy.
3621 # All set to `discard` if `no-backup` is set do avoid checking
3621 # All set to `discard` if `no-backup` is set do avoid checking
3622 # no_backup lower in the code.
3622 # no_backup lower in the code.
3623 # These values are ordered for comparison purposes
3623 # These values are ordered for comparison purposes
3624 backupinteractive = 3 # do backup if interactively modified
3624 backupinteractive = 3 # do backup if interactively modified
3625 backup = 2 # unconditionally do backup
3625 backup = 2 # unconditionally do backup
3626 check = 1 # check if the existing file differs from target
3626 check = 1 # check if the existing file differs from target
3627 discard = 0 # never do backup
3627 discard = 0 # never do backup
3628 if opts.get('no_backup'):
3628 if opts.get('no_backup'):
3629 backupinteractive = backup = check = discard
3629 backupinteractive = backup = check = discard
3630 if interactive:
3630 if interactive:
3631 dsmodifiedbackup = backupinteractive
3631 dsmodifiedbackup = backupinteractive
3632 else:
3632 else:
3633 dsmodifiedbackup = backup
3633 dsmodifiedbackup = backup
3634 tobackup = set()
3634 tobackup = set()
3635
3635
3636 backupanddel = actions['remove']
3636 backupanddel = actions['remove']
3637 if not opts.get('no_backup'):
3637 if not opts.get('no_backup'):
3638 backupanddel = actions['drop']
3638 backupanddel = actions['drop']
3639
3639
3640 disptable = (
3640 disptable = (
3641 # dispatch table:
3641 # dispatch table:
3642 # file state
3642 # file state
3643 # action
3643 # action
3644 # make backup
3644 # make backup
3645
3645
3646 ## Sets that results that will change file on disk
3646 ## Sets that results that will change file on disk
3647 # Modified compared to target, no local change
3647 # Modified compared to target, no local change
3648 (modified, actions['revert'], discard),
3648 (modified, actions['revert'], discard),
3649 # Modified compared to target, but local file is deleted
3649 # Modified compared to target, but local file is deleted
3650 (deleted, actions['revert'], discard),
3650 (deleted, actions['revert'], discard),
3651 # Modified compared to target, local change
3651 # Modified compared to target, local change
3652 (dsmodified, actions['revert'], dsmodifiedbackup),
3652 (dsmodified, actions['revert'], dsmodifiedbackup),
3653 # Added since target
3653 # Added since target
3654 (added, actions['remove'], discard),
3654 (added, actions['remove'], discard),
3655 # Added in working directory
3655 # Added in working directory
3656 (dsadded, actions['forget'], discard),
3656 (dsadded, actions['forget'], discard),
3657 # Added since target, have local modification
3657 # Added since target, have local modification
3658 (modadded, backupanddel, backup),
3658 (modadded, backupanddel, backup),
3659 # Added since target but file is missing in working directory
3659 # Added since target but file is missing in working directory
3660 (deladded, actions['drop'], discard),
3660 (deladded, actions['drop'], discard),
3661 # Removed since target, before working copy parent
3661 # Removed since target, before working copy parent
3662 (removed, actions['add'], discard),
3662 (removed, actions['add'], discard),
3663 # Same as `removed` but an unknown file exists at the same path
3663 # Same as `removed` but an unknown file exists at the same path
3664 (removunk, actions['add'], check),
3664 (removunk, actions['add'], check),
3665 # Removed since targe, marked as such in working copy parent
3665 # Removed since targe, marked as such in working copy parent
3666 (dsremoved, actions['undelete'], discard),
3666 (dsremoved, actions['undelete'], discard),
3667 # Same as `dsremoved` but an unknown file exists at the same path
3667 # Same as `dsremoved` but an unknown file exists at the same path
3668 (dsremovunk, actions['undelete'], check),
3668 (dsremovunk, actions['undelete'], check),
3669 ## the following sets does not result in any file changes
3669 ## the following sets does not result in any file changes
3670 # File with no modification
3670 # File with no modification
3671 (clean, actions['noop'], discard),
3671 (clean, actions['noop'], discard),
3672 # Existing file, not tracked anywhere
3672 # Existing file, not tracked anywhere
3673 (unknown, actions['unknown'], discard),
3673 (unknown, actions['unknown'], discard),
3674 )
3674 )
3675
3675
3676 for abs, (rel, exact) in sorted(names.items()):
3676 for abs, (rel, exact) in sorted(names.items()):
3677 # target file to be touch on disk (relative to cwd)
3677 # target file to be touch on disk (relative to cwd)
3678 target = repo.wjoin(abs)
3678 target = repo.wjoin(abs)
3679 # search the entry in the dispatch table.
3679 # search the entry in the dispatch table.
3680 # if the file is in any of these sets, it was touched in the working
3680 # if the file is in any of these sets, it was touched in the working
3681 # directory parent and we are sure it needs to be reverted.
3681 # directory parent and we are sure it needs to be reverted.
3682 for table, (xlist, msg), dobackup in disptable:
3682 for table, (xlist, msg), dobackup in disptable:
3683 if abs not in table:
3683 if abs not in table:
3684 continue
3684 continue
3685 if xlist is not None:
3685 if xlist is not None:
3686 xlist.append(abs)
3686 xlist.append(abs)
3687 if dobackup:
3687 if dobackup:
3688 # If in interactive mode, don't automatically create
3688 # If in interactive mode, don't automatically create
3689 # .orig files (issue4793)
3689 # .orig files (issue4793)
3690 if dobackup == backupinteractive:
3690 if dobackup == backupinteractive:
3691 tobackup.add(abs)
3691 tobackup.add(abs)
3692 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3692 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3693 bakname = scmutil.origpath(ui, repo, rel)
3693 bakname = scmutil.origpath(ui, repo, rel)
3694 ui.note(_('saving current version of %s as %s\n') %
3694 ui.note(_('saving current version of %s as %s\n') %
3695 (rel, bakname))
3695 (rel, bakname))
3696 if not opts.get('dry_run'):
3696 if not opts.get('dry_run'):
3697 if interactive:
3697 if interactive:
3698 util.copyfile(target, bakname)
3698 util.copyfile(target, bakname)
3699 else:
3699 else:
3700 util.rename(target, bakname)
3700 util.rename(target, bakname)
3701 if ui.verbose or not exact:
3701 if ui.verbose or not exact:
3702 if not isinstance(msg, bytes):
3702 if not isinstance(msg, bytes):
3703 msg = msg(abs)
3703 msg = msg(abs)
3704 ui.status(msg % rel)
3704 ui.status(msg % rel)
3705 elif exact:
3705 elif exact:
3706 ui.warn(msg % rel)
3706 ui.warn(msg % rel)
3707 break
3707 break
3708
3708
3709 if not opts.get('dry_run'):
3709 if not opts.get('dry_run'):
3710 needdata = ('revert', 'add', 'undelete')
3710 needdata = ('revert', 'add', 'undelete')
3711 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3711 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3712 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3712 _performrevert(repo, parents, ctx, actions, interactive, tobackup)
3713
3713
3714 if targetsubs:
3714 if targetsubs:
3715 # Revert the subrepos on the revert list
3715 # Revert the subrepos on the revert list
3716 for sub in targetsubs:
3716 for sub in targetsubs:
3717 try:
3717 try:
3718 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3718 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3719 **pycompat.strkwargs(opts))
3719 **pycompat.strkwargs(opts))
3720 except KeyError:
3720 except KeyError:
3721 raise error.Abort("subrepository '%s' does not exist in %s!"
3721 raise error.Abort("subrepository '%s' does not exist in %s!"
3722 % (sub, short(ctx.node())))
3722 % (sub, short(ctx.node())))
3723
3723
3724 def _revertprefetch(repo, ctx, *files):
3724 def _revertprefetch(repo, ctx, *files):
3725 """Let extension changing the storage layer prefetch content"""
3725 """Let extension changing the storage layer prefetch content"""
3726
3726
3727 def _performrevert(repo, parents, ctx, actions, interactive=False,
3727 def _performrevert(repo, parents, ctx, actions, interactive=False,
3728 tobackup=None):
3728 tobackup=None):
3729 """function that actually perform all the actions computed for revert
3729 """function that actually perform all the actions computed for revert
3730
3730
3731 This is an independent function to let extension to plug in and react to
3731 This is an independent function to let extension to plug in and react to
3732 the imminent revert.
3732 the imminent revert.
3733
3733
3734 Make sure you have the working directory locked when calling this function.
3734 Make sure you have the working directory locked when calling this function.
3735 """
3735 """
3736 parent, p2 = parents
3736 parent, p2 = parents
3737 node = ctx.node()
3737 node = ctx.node()
3738 excluded_files = []
3738 excluded_files = []
3739 matcher_opts = {"exclude": excluded_files}
3739 matcher_opts = {"exclude": excluded_files}
3740
3740
3741 def checkout(f):
3741 def checkout(f):
3742 fc = ctx[f]
3742 fc = ctx[f]
3743 repo.wwrite(f, fc.data(), fc.flags())
3743 repo.wwrite(f, fc.data(), fc.flags())
3744
3744
3745 def doremove(f):
3745 def doremove(f):
3746 try:
3746 try:
3747 repo.wvfs.unlinkpath(f)
3747 repo.wvfs.unlinkpath(f)
3748 except OSError:
3748 except OSError:
3749 pass
3749 pass
3750 repo.dirstate.remove(f)
3750 repo.dirstate.remove(f)
3751
3751
3752 audit_path = pathutil.pathauditor(repo.root, cached=True)
3752 audit_path = pathutil.pathauditor(repo.root, cached=True)
3753 for f in actions['forget'][0]:
3753 for f in actions['forget'][0]:
3754 if interactive:
3754 if interactive:
3755 choice = repo.ui.promptchoice(
3755 choice = repo.ui.promptchoice(
3756 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3756 _("forget added file %s (Yn)?$$ &Yes $$ &No") % f)
3757 if choice == 0:
3757 if choice == 0:
3758 repo.dirstate.drop(f)
3758 repo.dirstate.drop(f)
3759 else:
3759 else:
3760 excluded_files.append(repo.wjoin(f))
3760 excluded_files.append(repo.wjoin(f))
3761 else:
3761 else:
3762 repo.dirstate.drop(f)
3762 repo.dirstate.drop(f)
3763 for f in actions['remove'][0]:
3763 for f in actions['remove'][0]:
3764 audit_path(f)
3764 audit_path(f)
3765 if interactive:
3765 if interactive:
3766 choice = repo.ui.promptchoice(
3766 choice = repo.ui.promptchoice(
3767 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3767 _("remove added file %s (Yn)?$$ &Yes $$ &No") % f)
3768 if choice == 0:
3768 if choice == 0:
3769 doremove(f)
3769 doremove(f)
3770 else:
3770 else:
3771 excluded_files.append(repo.wjoin(f))
3771 excluded_files.append(repo.wjoin(f))
3772 else:
3772 else:
3773 doremove(f)
3773 doremove(f)
3774 for f in actions['drop'][0]:
3774 for f in actions['drop'][0]:
3775 audit_path(f)
3775 audit_path(f)
3776 repo.dirstate.remove(f)
3776 repo.dirstate.remove(f)
3777
3777
3778 normal = None
3778 normal = None
3779 if node == parent:
3779 if node == parent:
3780 # We're reverting to our parent. If possible, we'd like status
3780 # We're reverting to our parent. If possible, we'd like status
3781 # to report the file as clean. We have to use normallookup for
3781 # to report the file as clean. We have to use normallookup for
3782 # merges to avoid losing information about merged/dirty files.
3782 # merges to avoid losing information about merged/dirty files.
3783 if p2 != nullid:
3783 if p2 != nullid:
3784 normal = repo.dirstate.normallookup
3784 normal = repo.dirstate.normallookup
3785 else:
3785 else:
3786 normal = repo.dirstate.normal
3786 normal = repo.dirstate.normal
3787
3787
3788 newlyaddedandmodifiedfiles = set()
3788 newlyaddedandmodifiedfiles = set()
3789 if interactive:
3789 if interactive:
3790 # Prompt the user for changes to revert
3790 # Prompt the user for changes to revert
3791 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3791 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3792 m = scmutil.match(ctx, torevert, matcher_opts)
3792 m = scmutil.match(ctx, torevert, matcher_opts)
3793 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3793 diffopts = patch.difffeatureopts(repo.ui, whitespace=True)
3794 diffopts.nodates = True
3794 diffopts.nodates = True
3795 diffopts.git = True
3795 diffopts.git = True
3796 operation = 'discard'
3796 operation = 'discard'
3797 reversehunks = True
3797 reversehunks = True
3798 if node != parent:
3798 if node != parent:
3799 operation = 'apply'
3799 operation = 'apply'
3800 reversehunks = False
3800 reversehunks = False
3801 if reversehunks:
3801 if reversehunks:
3802 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3802 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3803 else:
3803 else:
3804 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3804 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3805 originalchunks = patch.parsepatch(diff)
3805 originalchunks = patch.parsepatch(diff)
3806
3806
3807 try:
3807 try:
3808
3808
3809 chunks, opts = recordfilter(repo.ui, originalchunks,
3809 chunks, opts = recordfilter(repo.ui, originalchunks,
3810 operation=operation)
3810 operation=operation)
3811 if reversehunks:
3811 if reversehunks:
3812 chunks = patch.reversehunks(chunks)
3812 chunks = patch.reversehunks(chunks)
3813
3813
3814 except error.PatchError as err:
3814 except error.PatchError as err:
3815 raise error.Abort(_('error parsing patch: %s') % err)
3815 raise error.Abort(_('error parsing patch: %s') % err)
3816
3816
3817 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3817 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3818 if tobackup is None:
3818 if tobackup is None:
3819 tobackup = set()
3819 tobackup = set()
3820 # Apply changes
3820 # Apply changes
3821 fp = stringio()
3821 fp = stringio()
3822 for c in chunks:
3822 for c in chunks:
3823 # Create a backup file only if this hunk should be backed up
3823 # Create a backup file only if this hunk should be backed up
3824 if ishunk(c) and c.header.filename() in tobackup:
3824 if ishunk(c) and c.header.filename() in tobackup:
3825 abs = c.header.filename()
3825 abs = c.header.filename()
3826 target = repo.wjoin(abs)
3826 target = repo.wjoin(abs)
3827 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3827 bakname = scmutil.origpath(repo.ui, repo, m.rel(abs))
3828 util.copyfile(target, bakname)
3828 util.copyfile(target, bakname)
3829 tobackup.remove(abs)
3829 tobackup.remove(abs)
3830 c.write(fp)
3830 c.write(fp)
3831 dopatch = fp.tell()
3831 dopatch = fp.tell()
3832 fp.seek(0)
3832 fp.seek(0)
3833 if dopatch:
3833 if dopatch:
3834 try:
3834 try:
3835 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3835 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3836 except error.PatchError as err:
3836 except error.PatchError as err:
3837 raise error.Abort(str(err))
3837 raise error.Abort(str(err))
3838 del fp
3838 del fp
3839 else:
3839 else:
3840 for f in actions['revert'][0]:
3840 for f in actions['revert'][0]:
3841 checkout(f)
3841 checkout(f)
3842 if normal:
3842 if normal:
3843 normal(f)
3843 normal(f)
3844
3844
3845 for f in actions['add'][0]:
3845 for f in actions['add'][0]:
3846 # Don't checkout modified files, they are already created by the diff
3846 # Don't checkout modified files, they are already created by the diff
3847 if f not in newlyaddedandmodifiedfiles:
3847 if f not in newlyaddedandmodifiedfiles:
3848 checkout(f)
3848 checkout(f)
3849 repo.dirstate.add(f)
3849 repo.dirstate.add(f)
3850
3850
3851 normal = repo.dirstate.normallookup
3851 normal = repo.dirstate.normallookup
3852 if node == parent and p2 == nullid:
3852 if node == parent and p2 == nullid:
3853 normal = repo.dirstate.normal
3853 normal = repo.dirstate.normal
3854 for f in actions['undelete'][0]:
3854 for f in actions['undelete'][0]:
3855 checkout(f)
3855 checkout(f)
3856 normal(f)
3856 normal(f)
3857
3857
3858 copied = copies.pathcopies(repo[parent], ctx)
3858 copied = copies.pathcopies(repo[parent], ctx)
3859
3859
3860 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3860 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3861 if f in copied:
3861 if f in copied:
3862 repo.dirstate.copy(copied[f], f)
3862 repo.dirstate.copy(copied[f], f)
3863
3863
3864 class command(registrar.command):
3864 class command(registrar.command):
3865 """deprecated: used registrar.command instead"""
3865 """deprecated: used registrar.command instead"""
3866 def _doregister(self, func, name, *args, **kwargs):
3866 def _doregister(self, func, name, *args, **kwargs):
3867 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3867 func._deprecatedregistrar = True # flag for deprecwarn in extensions.py
3868 return super(command, self)._doregister(func, name, *args, **kwargs)
3868 return super(command, self)._doregister(func, name, *args, **kwargs)
3869
3869
3870 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3870 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3871 # commands.outgoing. "missing" is "missing" of the result of
3871 # commands.outgoing. "missing" is "missing" of the result of
3872 # "findcommonoutgoing()"
3872 # "findcommonoutgoing()"
3873 outgoinghooks = util.hooks()
3873 outgoinghooks = util.hooks()
3874
3874
3875 # a list of (ui, repo) functions called by commands.summary
3875 # a list of (ui, repo) functions called by commands.summary
3876 summaryhooks = util.hooks()
3876 summaryhooks = util.hooks()
3877
3877
3878 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3878 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3879 #
3879 #
3880 # functions should return tuple of booleans below, if 'changes' is None:
3880 # functions should return tuple of booleans below, if 'changes' is None:
3881 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3881 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3882 #
3882 #
3883 # otherwise, 'changes' is a tuple of tuples below:
3883 # otherwise, 'changes' is a tuple of tuples below:
3884 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3884 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3885 # - (desturl, destbranch, destpeer, outgoing)
3885 # - (desturl, destbranch, destpeer, outgoing)
3886 summaryremotehooks = util.hooks()
3886 summaryremotehooks = util.hooks()
3887
3887
3888 # A list of state files kept by multistep operations like graft.
3888 # A list of state files kept by multistep operations like graft.
3889 # Since graft cannot be aborted, it is considered 'clearable' by update.
3889 # Since graft cannot be aborted, it is considered 'clearable' by update.
3890 # note: bisect is intentionally excluded
3890 # note: bisect is intentionally excluded
3891 # (state file, clearable, allowcommit, error, hint)
3891 # (state file, clearable, allowcommit, error, hint)
3892 unfinishedstates = [
3892 unfinishedstates = [
3893 ('graftstate', True, False, _('graft in progress'),
3893 ('graftstate', True, False, _('graft in progress'),
3894 _("use 'hg graft --continue' or 'hg update' to abort")),
3894 _("use 'hg graft --continue' or 'hg update' to abort")),
3895 ('updatestate', True, False, _('last update was interrupted'),
3895 ('updatestate', True, False, _('last update was interrupted'),
3896 _("use 'hg update' to get a consistent checkout"))
3896 _("use 'hg update' to get a consistent checkout"))
3897 ]
3897 ]
3898
3898
3899 def checkunfinished(repo, commit=False):
3899 def checkunfinished(repo, commit=False):
3900 '''Look for an unfinished multistep operation, like graft, and abort
3900 '''Look for an unfinished multistep operation, like graft, and abort
3901 if found. It's probably good to check this right before
3901 if found. It's probably good to check this right before
3902 bailifchanged().
3902 bailifchanged().
3903 '''
3903 '''
3904 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3904 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3905 if commit and allowcommit:
3905 if commit and allowcommit:
3906 continue
3906 continue
3907 if repo.vfs.exists(f):
3907 if repo.vfs.exists(f):
3908 raise error.Abort(msg, hint=hint)
3908 raise error.Abort(msg, hint=hint)
3909
3909
3910 def clearunfinished(repo):
3910 def clearunfinished(repo):
3911 '''Check for unfinished operations (as above), and clear the ones
3911 '''Check for unfinished operations (as above), and clear the ones
3912 that are clearable.
3912 that are clearable.
3913 '''
3913 '''
3914 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3914 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3915 if not clearable and repo.vfs.exists(f):
3915 if not clearable and repo.vfs.exists(f):
3916 raise error.Abort(msg, hint=hint)
3916 raise error.Abort(msg, hint=hint)
3917 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3917 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3918 if clearable and repo.vfs.exists(f):
3918 if clearable and repo.vfs.exists(f):
3919 util.unlink(repo.vfs.join(f))
3919 util.unlink(repo.vfs.join(f))
3920
3920
3921 afterresolvedstates = [
3921 afterresolvedstates = [
3922 ('graftstate',
3922 ('graftstate',
3923 _('hg graft --continue')),
3923 _('hg graft --continue')),
3924 ]
3924 ]
3925
3925
3926 def howtocontinue(repo):
3926 def howtocontinue(repo):
3927 '''Check for an unfinished operation and return the command to finish
3927 '''Check for an unfinished operation and return the command to finish
3928 it.
3928 it.
3929
3929
3930 afterresolvedstates tuples define a .hg/{file} and the corresponding
3930 afterresolvedstates tuples define a .hg/{file} and the corresponding
3931 command needed to finish it.
3931 command needed to finish it.
3932
3932
3933 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3933 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3934 a boolean.
3934 a boolean.
3935 '''
3935 '''
3936 contmsg = _("continue: %s")
3936 contmsg = _("continue: %s")
3937 for f, msg in afterresolvedstates:
3937 for f, msg in afterresolvedstates:
3938 if repo.vfs.exists(f):
3938 if repo.vfs.exists(f):
3939 return contmsg % msg, True
3939 return contmsg % msg, True
3940 if repo[None].dirty(missing=True, merge=False, branch=False):
3940 if repo[None].dirty(missing=True, merge=False, branch=False):
3941 return contmsg % _("hg commit"), False
3941 return contmsg % _("hg commit"), False
3942 return None, None
3942 return None, None
3943
3943
3944 def checkafterresolved(repo):
3944 def checkafterresolved(repo):
3945 '''Inform the user about the next action after completing hg resolve
3945 '''Inform the user about the next action after completing hg resolve
3946
3946
3947 If there's a matching afterresolvedstates, howtocontinue will yield
3947 If there's a matching afterresolvedstates, howtocontinue will yield
3948 repo.ui.warn as the reporter.
3948 repo.ui.warn as the reporter.
3949
3949
3950 Otherwise, it will yield repo.ui.note.
3950 Otherwise, it will yield repo.ui.note.
3951 '''
3951 '''
3952 msg, warning = howtocontinue(repo)
3952 msg, warning = howtocontinue(repo)
3953 if msg is not None:
3953 if msg is not None:
3954 if warning:
3954 if warning:
3955 repo.ui.warn("%s\n" % msg)
3955 repo.ui.warn("%s\n" % msg)
3956 else:
3956 else:
3957 repo.ui.note("%s\n" % msg)
3957 repo.ui.note("%s\n" % msg)
3958
3958
3959 def wrongtooltocontinue(repo, task):
3959 def wrongtooltocontinue(repo, task):
3960 '''Raise an abort suggesting how to properly continue if there is an
3960 '''Raise an abort suggesting how to properly continue if there is an
3961 active task.
3961 active task.
3962
3962
3963 Uses howtocontinue() to find the active task.
3963 Uses howtocontinue() to find the active task.
3964
3964
3965 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3965 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3966 a hint.
3966 a hint.
3967 '''
3967 '''
3968 after = howtocontinue(repo)
3968 after = howtocontinue(repo)
3969 hint = None
3969 hint = None
3970 if after[1]:
3970 if after[1]:
3971 hint = after[0]
3971 hint = after[0]
3972 raise error.Abort(_('no %s in progress') % task, hint=hint)
3972 raise error.Abort(_('no %s in progress') % task, hint=hint)
@@ -1,518 +1,520 b''
1 # utility for color output for Mercurial commands
1 # utility for color output for Mercurial commands
2 #
2 #
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com> and other
3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com> and other
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11
11
12 from .i18n import _
12 from .i18n import _
13
13
14 from . import (
14 from . import (
15 encoding,
15 encoding,
16 pycompat,
16 pycompat,
17 util
17 util
18 )
18 )
19
19
20 try:
20 try:
21 import curses
21 import curses
22 # Mapping from effect name to terminfo attribute name (or raw code) or
22 # Mapping from effect name to terminfo attribute name (or raw code) or
23 # color number. This will also force-load the curses module.
23 # color number. This will also force-load the curses module.
24 _baseterminfoparams = {
24 _baseterminfoparams = {
25 'none': (True, 'sgr0', ''),
25 'none': (True, 'sgr0', ''),
26 'standout': (True, 'smso', ''),
26 'standout': (True, 'smso', ''),
27 'underline': (True, 'smul', ''),
27 'underline': (True, 'smul', ''),
28 'reverse': (True, 'rev', ''),
28 'reverse': (True, 'rev', ''),
29 'inverse': (True, 'rev', ''),
29 'inverse': (True, 'rev', ''),
30 'blink': (True, 'blink', ''),
30 'blink': (True, 'blink', ''),
31 'dim': (True, 'dim', ''),
31 'dim': (True, 'dim', ''),
32 'bold': (True, 'bold', ''),
32 'bold': (True, 'bold', ''),
33 'invisible': (True, 'invis', ''),
33 'invisible': (True, 'invis', ''),
34 'italic': (True, 'sitm', ''),
34 'italic': (True, 'sitm', ''),
35 'black': (False, curses.COLOR_BLACK, ''),
35 'black': (False, curses.COLOR_BLACK, ''),
36 'red': (False, curses.COLOR_RED, ''),
36 'red': (False, curses.COLOR_RED, ''),
37 'green': (False, curses.COLOR_GREEN, ''),
37 'green': (False, curses.COLOR_GREEN, ''),
38 'yellow': (False, curses.COLOR_YELLOW, ''),
38 'yellow': (False, curses.COLOR_YELLOW, ''),
39 'blue': (False, curses.COLOR_BLUE, ''),
39 'blue': (False, curses.COLOR_BLUE, ''),
40 'magenta': (False, curses.COLOR_MAGENTA, ''),
40 'magenta': (False, curses.COLOR_MAGENTA, ''),
41 'cyan': (False, curses.COLOR_CYAN, ''),
41 'cyan': (False, curses.COLOR_CYAN, ''),
42 'white': (False, curses.COLOR_WHITE, ''),
42 'white': (False, curses.COLOR_WHITE, ''),
43 }
43 }
44 except ImportError:
44 except ImportError:
45 curses = None
45 curses = None
46 _baseterminfoparams = {}
46 _baseterminfoparams = {}
47
47
48 # start and stop parameters for effects
48 # start and stop parameters for effects
49 _effects = {
49 _effects = {
50 'none': 0,
50 'none': 0,
51 'black': 30,
51 'black': 30,
52 'red': 31,
52 'red': 31,
53 'green': 32,
53 'green': 32,
54 'yellow': 33,
54 'yellow': 33,
55 'blue': 34,
55 'blue': 34,
56 'magenta': 35,
56 'magenta': 35,
57 'cyan': 36,
57 'cyan': 36,
58 'white': 37,
58 'white': 37,
59 'bold': 1,
59 'bold': 1,
60 'italic': 3,
60 'italic': 3,
61 'underline': 4,
61 'underline': 4,
62 'inverse': 7,
62 'inverse': 7,
63 'dim': 2,
63 'dim': 2,
64 'black_background': 40,
64 'black_background': 40,
65 'red_background': 41,
65 'red_background': 41,
66 'green_background': 42,
66 'green_background': 42,
67 'yellow_background': 43,
67 'yellow_background': 43,
68 'blue_background': 44,
68 'blue_background': 44,
69 'purple_background': 45,
69 'purple_background': 45,
70 'cyan_background': 46,
70 'cyan_background': 46,
71 'white_background': 47,
71 'white_background': 47,
72 }
72 }
73
73
74 _defaultstyles = {
74 _defaultstyles = {
75 'grep.match': 'red bold',
75 'grep.match': 'red bold',
76 'grep.linenumber': 'green',
76 'grep.linenumber': 'green',
77 'grep.rev': 'green',
77 'grep.rev': 'green',
78 'grep.change': 'green',
78 'grep.change': 'green',
79 'grep.sep': 'cyan',
79 'grep.sep': 'cyan',
80 'grep.filename': 'magenta',
80 'grep.filename': 'magenta',
81 'grep.user': 'magenta',
81 'grep.user': 'magenta',
82 'grep.date': 'magenta',
82 'grep.date': 'magenta',
83 'bookmarks.active': 'green',
83 'bookmarks.active': 'green',
84 'branches.active': 'none',
84 'branches.active': 'none',
85 'branches.closed': 'black bold',
85 'branches.closed': 'black bold',
86 'branches.current': 'green',
86 'branches.current': 'green',
87 'branches.inactive': 'none',
87 'branches.inactive': 'none',
88 'diff.changed': 'white',
88 'diff.changed': 'white',
89 'diff.deleted': 'red',
89 'diff.deleted': 'red',
90 'diff.deleted.highlight': 'red bold underline',
90 'diff.diffline': 'bold',
91 'diff.diffline': 'bold',
91 'diff.extended': 'cyan bold',
92 'diff.extended': 'cyan bold',
92 'diff.file_a': 'red bold',
93 'diff.file_a': 'red bold',
93 'diff.file_b': 'green bold',
94 'diff.file_b': 'green bold',
94 'diff.hunk': 'magenta',
95 'diff.hunk': 'magenta',
95 'diff.inserted': 'green',
96 'diff.inserted': 'green',
97 'diff.inserted.highlight': 'green bold underline',
96 'diff.tab': '',
98 'diff.tab': '',
97 'diff.trailingwhitespace': 'bold red_background',
99 'diff.trailingwhitespace': 'bold red_background',
98 'changeset.public': '',
100 'changeset.public': '',
99 'changeset.draft': '',
101 'changeset.draft': '',
100 'changeset.secret': '',
102 'changeset.secret': '',
101 'diffstat.deleted': 'red',
103 'diffstat.deleted': 'red',
102 'diffstat.inserted': 'green',
104 'diffstat.inserted': 'green',
103 'histedit.remaining': 'red bold',
105 'histedit.remaining': 'red bold',
104 'ui.prompt': 'yellow',
106 'ui.prompt': 'yellow',
105 'log.changeset': 'yellow',
107 'log.changeset': 'yellow',
106 'patchbomb.finalsummary': '',
108 'patchbomb.finalsummary': '',
107 'patchbomb.from': 'magenta',
109 'patchbomb.from': 'magenta',
108 'patchbomb.to': 'cyan',
110 'patchbomb.to': 'cyan',
109 'patchbomb.subject': 'green',
111 'patchbomb.subject': 'green',
110 'patchbomb.diffstats': '',
112 'patchbomb.diffstats': '',
111 'rebase.rebased': 'blue',
113 'rebase.rebased': 'blue',
112 'rebase.remaining': 'red bold',
114 'rebase.remaining': 'red bold',
113 'resolve.resolved': 'green bold',
115 'resolve.resolved': 'green bold',
114 'resolve.unresolved': 'red bold',
116 'resolve.unresolved': 'red bold',
115 'shelve.age': 'cyan',
117 'shelve.age': 'cyan',
116 'shelve.newest': 'green bold',
118 'shelve.newest': 'green bold',
117 'shelve.name': 'blue bold',
119 'shelve.name': 'blue bold',
118 'status.added': 'green bold',
120 'status.added': 'green bold',
119 'status.clean': 'none',
121 'status.clean': 'none',
120 'status.copied': 'none',
122 'status.copied': 'none',
121 'status.deleted': 'cyan bold underline',
123 'status.deleted': 'cyan bold underline',
122 'status.ignored': 'black bold',
124 'status.ignored': 'black bold',
123 'status.modified': 'blue bold',
125 'status.modified': 'blue bold',
124 'status.removed': 'red bold',
126 'status.removed': 'red bold',
125 'status.unknown': 'magenta bold underline',
127 'status.unknown': 'magenta bold underline',
126 'tags.normal': 'green',
128 'tags.normal': 'green',
127 'tags.local': 'black bold',
129 'tags.local': 'black bold',
128 }
130 }
129
131
130 def loadcolortable(ui, extname, colortable):
132 def loadcolortable(ui, extname, colortable):
131 _defaultstyles.update(colortable)
133 _defaultstyles.update(colortable)
132
134
133 def _terminfosetup(ui, mode, formatted):
135 def _terminfosetup(ui, mode, formatted):
134 '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
136 '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
135
137
136 # If we failed to load curses, we go ahead and return.
138 # If we failed to load curses, we go ahead and return.
137 if curses is None:
139 if curses is None:
138 return
140 return
139 # Otherwise, see what the config file says.
141 # Otherwise, see what the config file says.
140 if mode not in ('auto', 'terminfo'):
142 if mode not in ('auto', 'terminfo'):
141 return
143 return
142 ui._terminfoparams.update(_baseterminfoparams)
144 ui._terminfoparams.update(_baseterminfoparams)
143
145
144 for key, val in ui.configitems('color'):
146 for key, val in ui.configitems('color'):
145 if key.startswith('color.'):
147 if key.startswith('color.'):
146 newval = (False, int(val), '')
148 newval = (False, int(val), '')
147 ui._terminfoparams[key[6:]] = newval
149 ui._terminfoparams[key[6:]] = newval
148 elif key.startswith('terminfo.'):
150 elif key.startswith('terminfo.'):
149 newval = (True, '', val.replace('\\E', '\x1b'))
151 newval = (True, '', val.replace('\\E', '\x1b'))
150 ui._terminfoparams[key[9:]] = newval
152 ui._terminfoparams[key[9:]] = newval
151 try:
153 try:
152 curses.setupterm()
154 curses.setupterm()
153 except curses.error as e:
155 except curses.error as e:
154 ui._terminfoparams.clear()
156 ui._terminfoparams.clear()
155 return
157 return
156
158
157 for key, (b, e, c) in ui._terminfoparams.items():
159 for key, (b, e, c) in ui._terminfoparams.items():
158 if not b:
160 if not b:
159 continue
161 continue
160 if not c and not curses.tigetstr(e):
162 if not c and not curses.tigetstr(e):
161 # Most terminals don't support dim, invis, etc, so don't be
163 # Most terminals don't support dim, invis, etc, so don't be
162 # noisy and use ui.debug().
164 # noisy and use ui.debug().
163 ui.debug("no terminfo entry for %s\n" % e)
165 ui.debug("no terminfo entry for %s\n" % e)
164 del ui._terminfoparams[key]
166 del ui._terminfoparams[key]
165 if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
167 if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
166 # Only warn about missing terminfo entries if we explicitly asked for
168 # Only warn about missing terminfo entries if we explicitly asked for
167 # terminfo mode and we're in a formatted terminal.
169 # terminfo mode and we're in a formatted terminal.
168 if mode == "terminfo" and formatted:
170 if mode == "terminfo" and formatted:
169 ui.warn(_("no terminfo entry for setab/setaf: reverting to "
171 ui.warn(_("no terminfo entry for setab/setaf: reverting to "
170 "ECMA-48 color\n"))
172 "ECMA-48 color\n"))
171 ui._terminfoparams.clear()
173 ui._terminfoparams.clear()
172
174
173 def setup(ui):
175 def setup(ui):
174 """configure color on a ui
176 """configure color on a ui
175
177
176 That function both set the colormode for the ui object and read
178 That function both set the colormode for the ui object and read
177 the configuration looking for custom colors and effect definitions."""
179 the configuration looking for custom colors and effect definitions."""
178 mode = _modesetup(ui)
180 mode = _modesetup(ui)
179 ui._colormode = mode
181 ui._colormode = mode
180 if mode and mode != 'debug':
182 if mode and mode != 'debug':
181 configstyles(ui)
183 configstyles(ui)
182
184
183 def _modesetup(ui):
185 def _modesetup(ui):
184 if ui.plain('color'):
186 if ui.plain('color'):
185 return None
187 return None
186 config = ui.config('ui', 'color')
188 config = ui.config('ui', 'color')
187 if config == 'debug':
189 if config == 'debug':
188 return 'debug'
190 return 'debug'
189
191
190 auto = (config == 'auto')
192 auto = (config == 'auto')
191 always = False
193 always = False
192 if not auto and util.parsebool(config):
194 if not auto and util.parsebool(config):
193 # We want the config to behave like a boolean, "on" is actually auto,
195 # We want the config to behave like a boolean, "on" is actually auto,
194 # but "always" value is treated as a special case to reduce confusion.
196 # but "always" value is treated as a special case to reduce confusion.
195 if ui.configsource('ui', 'color') == '--color' or config == 'always':
197 if ui.configsource('ui', 'color') == '--color' or config == 'always':
196 always = True
198 always = True
197 else:
199 else:
198 auto = True
200 auto = True
199
201
200 if not always and not auto:
202 if not always and not auto:
201 return None
203 return None
202
204
203 formatted = (always or (encoding.environ.get('TERM') != 'dumb'
205 formatted = (always or (encoding.environ.get('TERM') != 'dumb'
204 and ui.formatted()))
206 and ui.formatted()))
205
207
206 mode = ui.config('color', 'mode')
208 mode = ui.config('color', 'mode')
207
209
208 # If pager is active, color.pagermode overrides color.mode.
210 # If pager is active, color.pagermode overrides color.mode.
209 if getattr(ui, 'pageractive', False):
211 if getattr(ui, 'pageractive', False):
210 mode = ui.config('color', 'pagermode', mode)
212 mode = ui.config('color', 'pagermode', mode)
211
213
212 realmode = mode
214 realmode = mode
213 if pycompat.iswindows:
215 if pycompat.iswindows:
214 from . import win32
216 from . import win32
215
217
216 term = encoding.environ.get('TERM')
218 term = encoding.environ.get('TERM')
217 # TERM won't be defined in a vanilla cmd.exe environment.
219 # TERM won't be defined in a vanilla cmd.exe environment.
218
220
219 # UNIX-like environments on Windows such as Cygwin and MSYS will
221 # UNIX-like environments on Windows such as Cygwin and MSYS will
220 # set TERM. They appear to make a best effort attempt at setting it
222 # set TERM. They appear to make a best effort attempt at setting it
221 # to something appropriate. However, not all environments with TERM
223 # to something appropriate. However, not all environments with TERM
222 # defined support ANSI.
224 # defined support ANSI.
223 ansienviron = term and 'xterm' in term
225 ansienviron = term and 'xterm' in term
224
226
225 if mode == 'auto':
227 if mode == 'auto':
226 # Since "ansi" could result in terminal gibberish, we error on the
228 # Since "ansi" could result in terminal gibberish, we error on the
227 # side of selecting "win32". However, if w32effects is not defined,
229 # side of selecting "win32". However, if w32effects is not defined,
228 # we almost certainly don't support "win32", so don't even try.
230 # we almost certainly don't support "win32", so don't even try.
229 # w32ffects is not populated when stdout is redirected, so checking
231 # w32ffects is not populated when stdout is redirected, so checking
230 # it first avoids win32 calls in a state known to error out.
232 # it first avoids win32 calls in a state known to error out.
231 if ansienviron or not w32effects or win32.enablevtmode():
233 if ansienviron or not w32effects or win32.enablevtmode():
232 realmode = 'ansi'
234 realmode = 'ansi'
233 else:
235 else:
234 realmode = 'win32'
236 realmode = 'win32'
235 # An empty w32effects is a clue that stdout is redirected, and thus
237 # An empty w32effects is a clue that stdout is redirected, and thus
236 # cannot enable VT mode.
238 # cannot enable VT mode.
237 elif mode == 'ansi' and w32effects and not ansienviron:
239 elif mode == 'ansi' and w32effects and not ansienviron:
238 win32.enablevtmode()
240 win32.enablevtmode()
239 elif mode == 'auto':
241 elif mode == 'auto':
240 realmode = 'ansi'
242 realmode = 'ansi'
241
243
242 def modewarn():
244 def modewarn():
243 # only warn if color.mode was explicitly set and we're in
245 # only warn if color.mode was explicitly set and we're in
244 # a formatted terminal
246 # a formatted terminal
245 if mode == realmode and formatted:
247 if mode == realmode and formatted:
246 ui.warn(_('warning: failed to set color mode to %s\n') % mode)
248 ui.warn(_('warning: failed to set color mode to %s\n') % mode)
247
249
248 if realmode == 'win32':
250 if realmode == 'win32':
249 ui._terminfoparams.clear()
251 ui._terminfoparams.clear()
250 if not w32effects:
252 if not w32effects:
251 modewarn()
253 modewarn()
252 return None
254 return None
253 elif realmode == 'ansi':
255 elif realmode == 'ansi':
254 ui._terminfoparams.clear()
256 ui._terminfoparams.clear()
255 elif realmode == 'terminfo':
257 elif realmode == 'terminfo':
256 _terminfosetup(ui, mode, formatted)
258 _terminfosetup(ui, mode, formatted)
257 if not ui._terminfoparams:
259 if not ui._terminfoparams:
258 ## FIXME Shouldn't we return None in this case too?
260 ## FIXME Shouldn't we return None in this case too?
259 modewarn()
261 modewarn()
260 realmode = 'ansi'
262 realmode = 'ansi'
261 else:
263 else:
262 return None
264 return None
263
265
264 if always or (auto and formatted):
266 if always or (auto and formatted):
265 return realmode
267 return realmode
266 return None
268 return None
267
269
268 def configstyles(ui):
270 def configstyles(ui):
269 ui._styles.update(_defaultstyles)
271 ui._styles.update(_defaultstyles)
270 for status, cfgeffects in ui.configitems('color'):
272 for status, cfgeffects in ui.configitems('color'):
271 if '.' not in status or status.startswith(('color.', 'terminfo.')):
273 if '.' not in status or status.startswith(('color.', 'terminfo.')):
272 continue
274 continue
273 cfgeffects = ui.configlist('color', status)
275 cfgeffects = ui.configlist('color', status)
274 if cfgeffects:
276 if cfgeffects:
275 good = []
277 good = []
276 for e in cfgeffects:
278 for e in cfgeffects:
277 if valideffect(ui, e):
279 if valideffect(ui, e):
278 good.append(e)
280 good.append(e)
279 else:
281 else:
280 ui.warn(_("ignoring unknown color/effect %r "
282 ui.warn(_("ignoring unknown color/effect %r "
281 "(configured in color.%s)\n")
283 "(configured in color.%s)\n")
282 % (e, status))
284 % (e, status))
283 ui._styles[status] = ' '.join(good)
285 ui._styles[status] = ' '.join(good)
284
286
285 def _activeeffects(ui):
287 def _activeeffects(ui):
286 '''Return the effects map for the color mode set on the ui.'''
288 '''Return the effects map for the color mode set on the ui.'''
287 if ui._colormode == 'win32':
289 if ui._colormode == 'win32':
288 return w32effects
290 return w32effects
289 elif ui._colormode is not None:
291 elif ui._colormode is not None:
290 return _effects
292 return _effects
291 return {}
293 return {}
292
294
293 def valideffect(ui, effect):
295 def valideffect(ui, effect):
294 'Determine if the effect is valid or not.'
296 'Determine if the effect is valid or not.'
295 return ((not ui._terminfoparams and effect in _activeeffects(ui))
297 return ((not ui._terminfoparams and effect in _activeeffects(ui))
296 or (effect in ui._terminfoparams
298 or (effect in ui._terminfoparams
297 or effect[:-11] in ui._terminfoparams))
299 or effect[:-11] in ui._terminfoparams))
298
300
299 def _effect_str(ui, effect):
301 def _effect_str(ui, effect):
300 '''Helper function for render_effects().'''
302 '''Helper function for render_effects().'''
301
303
302 bg = False
304 bg = False
303 if effect.endswith('_background'):
305 if effect.endswith('_background'):
304 bg = True
306 bg = True
305 effect = effect[:-11]
307 effect = effect[:-11]
306 try:
308 try:
307 attr, val, termcode = ui._terminfoparams[effect]
309 attr, val, termcode = ui._terminfoparams[effect]
308 except KeyError:
310 except KeyError:
309 return ''
311 return ''
310 if attr:
312 if attr:
311 if termcode:
313 if termcode:
312 return termcode
314 return termcode
313 else:
315 else:
314 return curses.tigetstr(val)
316 return curses.tigetstr(val)
315 elif bg:
317 elif bg:
316 return curses.tparm(curses.tigetstr('setab'), val)
318 return curses.tparm(curses.tigetstr('setab'), val)
317 else:
319 else:
318 return curses.tparm(curses.tigetstr('setaf'), val)
320 return curses.tparm(curses.tigetstr('setaf'), val)
319
321
320 def _mergeeffects(text, start, stop):
322 def _mergeeffects(text, start, stop):
321 """Insert start sequence at every occurrence of stop sequence
323 """Insert start sequence at every occurrence of stop sequence
322
324
323 >>> s = _mergeeffects(b'cyan', b'[C]', b'|')
325 >>> s = _mergeeffects(b'cyan', b'[C]', b'|')
324 >>> s = _mergeeffects(s + b'yellow', b'[Y]', b'|')
326 >>> s = _mergeeffects(s + b'yellow', b'[Y]', b'|')
325 >>> s = _mergeeffects(b'ma' + s + b'genta', b'[M]', b'|')
327 >>> s = _mergeeffects(b'ma' + s + b'genta', b'[M]', b'|')
326 >>> s = _mergeeffects(b'red' + s, b'[R]', b'|')
328 >>> s = _mergeeffects(b'red' + s, b'[R]', b'|')
327 >>> s
329 >>> s
328 '[R]red[M]ma[Y][C]cyan|[R][M][Y]yellow|[R][M]genta|'
330 '[R]red[M]ma[Y][C]cyan|[R][M][Y]yellow|[R][M]genta|'
329 """
331 """
330 parts = []
332 parts = []
331 for t in text.split(stop):
333 for t in text.split(stop):
332 if not t:
334 if not t:
333 continue
335 continue
334 parts.extend([start, t, stop])
336 parts.extend([start, t, stop])
335 return ''.join(parts)
337 return ''.join(parts)
336
338
337 def _render_effects(ui, text, effects):
339 def _render_effects(ui, text, effects):
338 'Wrap text in commands to turn on each effect.'
340 'Wrap text in commands to turn on each effect.'
339 if not text:
341 if not text:
340 return text
342 return text
341 if ui._terminfoparams:
343 if ui._terminfoparams:
342 start = ''.join(_effect_str(ui, effect)
344 start = ''.join(_effect_str(ui, effect)
343 for effect in ['none'] + effects.split())
345 for effect in ['none'] + effects.split())
344 stop = _effect_str(ui, 'none')
346 stop = _effect_str(ui, 'none')
345 else:
347 else:
346 activeeffects = _activeeffects(ui)
348 activeeffects = _activeeffects(ui)
347 start = [pycompat.bytestr(activeeffects[e])
349 start = [pycompat.bytestr(activeeffects[e])
348 for e in ['none'] + effects.split()]
350 for e in ['none'] + effects.split()]
349 start = '\033[' + ';'.join(start) + 'm'
351 start = '\033[' + ';'.join(start) + 'm'
350 stop = '\033[' + pycompat.bytestr(activeeffects['none']) + 'm'
352 stop = '\033[' + pycompat.bytestr(activeeffects['none']) + 'm'
351 return _mergeeffects(text, start, stop)
353 return _mergeeffects(text, start, stop)
352
354
353 _ansieffectre = re.compile(br'\x1b\[[0-9;]*m')
355 _ansieffectre = re.compile(br'\x1b\[[0-9;]*m')
354
356
355 def stripeffects(text):
357 def stripeffects(text):
356 """Strip ANSI control codes which could be inserted by colorlabel()"""
358 """Strip ANSI control codes which could be inserted by colorlabel()"""
357 return _ansieffectre.sub('', text)
359 return _ansieffectre.sub('', text)
358
360
359 def colorlabel(ui, msg, label):
361 def colorlabel(ui, msg, label):
360 """add color control code according to the mode"""
362 """add color control code according to the mode"""
361 if ui._colormode == 'debug':
363 if ui._colormode == 'debug':
362 if label and msg:
364 if label and msg:
363 if msg[-1] == '\n':
365 if msg[-1] == '\n':
364 msg = "[%s|%s]\n" % (label, msg[:-1])
366 msg = "[%s|%s]\n" % (label, msg[:-1])
365 else:
367 else:
366 msg = "[%s|%s]" % (label, msg)
368 msg = "[%s|%s]" % (label, msg)
367 elif ui._colormode is not None:
369 elif ui._colormode is not None:
368 effects = []
370 effects = []
369 for l in label.split():
371 for l in label.split():
370 s = ui._styles.get(l, '')
372 s = ui._styles.get(l, '')
371 if s:
373 if s:
372 effects.append(s)
374 effects.append(s)
373 elif valideffect(ui, l):
375 elif valideffect(ui, l):
374 effects.append(l)
376 effects.append(l)
375 effects = ' '.join(effects)
377 effects = ' '.join(effects)
376 if effects:
378 if effects:
377 msg = '\n'.join([_render_effects(ui, line, effects)
379 msg = '\n'.join([_render_effects(ui, line, effects)
378 for line in msg.split('\n')])
380 for line in msg.split('\n')])
379 return msg
381 return msg
380
382
381 w32effects = None
383 w32effects = None
382 if pycompat.iswindows:
384 if pycompat.iswindows:
383 import ctypes
385 import ctypes
384
386
385 _kernel32 = ctypes.windll.kernel32
387 _kernel32 = ctypes.windll.kernel32
386
388
387 _WORD = ctypes.c_ushort
389 _WORD = ctypes.c_ushort
388
390
389 _INVALID_HANDLE_VALUE = -1
391 _INVALID_HANDLE_VALUE = -1
390
392
391 class _COORD(ctypes.Structure):
393 class _COORD(ctypes.Structure):
392 _fields_ = [('X', ctypes.c_short),
394 _fields_ = [('X', ctypes.c_short),
393 ('Y', ctypes.c_short)]
395 ('Y', ctypes.c_short)]
394
396
395 class _SMALL_RECT(ctypes.Structure):
397 class _SMALL_RECT(ctypes.Structure):
396 _fields_ = [('Left', ctypes.c_short),
398 _fields_ = [('Left', ctypes.c_short),
397 ('Top', ctypes.c_short),
399 ('Top', ctypes.c_short),
398 ('Right', ctypes.c_short),
400 ('Right', ctypes.c_short),
399 ('Bottom', ctypes.c_short)]
401 ('Bottom', ctypes.c_short)]
400
402
401 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
403 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
402 _fields_ = [('dwSize', _COORD),
404 _fields_ = [('dwSize', _COORD),
403 ('dwCursorPosition', _COORD),
405 ('dwCursorPosition', _COORD),
404 ('wAttributes', _WORD),
406 ('wAttributes', _WORD),
405 ('srWindow', _SMALL_RECT),
407 ('srWindow', _SMALL_RECT),
406 ('dwMaximumWindowSize', _COORD)]
408 ('dwMaximumWindowSize', _COORD)]
407
409
408 _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
410 _STD_OUTPUT_HANDLE = 0xfffffff5 # (DWORD)-11
409 _STD_ERROR_HANDLE = 0xfffffff4 # (DWORD)-12
411 _STD_ERROR_HANDLE = 0xfffffff4 # (DWORD)-12
410
412
411 _FOREGROUND_BLUE = 0x0001
413 _FOREGROUND_BLUE = 0x0001
412 _FOREGROUND_GREEN = 0x0002
414 _FOREGROUND_GREEN = 0x0002
413 _FOREGROUND_RED = 0x0004
415 _FOREGROUND_RED = 0x0004
414 _FOREGROUND_INTENSITY = 0x0008
416 _FOREGROUND_INTENSITY = 0x0008
415
417
416 _BACKGROUND_BLUE = 0x0010
418 _BACKGROUND_BLUE = 0x0010
417 _BACKGROUND_GREEN = 0x0020
419 _BACKGROUND_GREEN = 0x0020
418 _BACKGROUND_RED = 0x0040
420 _BACKGROUND_RED = 0x0040
419 _BACKGROUND_INTENSITY = 0x0080
421 _BACKGROUND_INTENSITY = 0x0080
420
422
421 _COMMON_LVB_REVERSE_VIDEO = 0x4000
423 _COMMON_LVB_REVERSE_VIDEO = 0x4000
422 _COMMON_LVB_UNDERSCORE = 0x8000
424 _COMMON_LVB_UNDERSCORE = 0x8000
423
425
424 # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
426 # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
425 w32effects = {
427 w32effects = {
426 'none': -1,
428 'none': -1,
427 'black': 0,
429 'black': 0,
428 'red': _FOREGROUND_RED,
430 'red': _FOREGROUND_RED,
429 'green': _FOREGROUND_GREEN,
431 'green': _FOREGROUND_GREEN,
430 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
432 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
431 'blue': _FOREGROUND_BLUE,
433 'blue': _FOREGROUND_BLUE,
432 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
434 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
433 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
435 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
434 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
436 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
435 'bold': _FOREGROUND_INTENSITY,
437 'bold': _FOREGROUND_INTENSITY,
436 'black_background': 0x100, # unused value > 0x0f
438 'black_background': 0x100, # unused value > 0x0f
437 'red_background': _BACKGROUND_RED,
439 'red_background': _BACKGROUND_RED,
438 'green_background': _BACKGROUND_GREEN,
440 'green_background': _BACKGROUND_GREEN,
439 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
441 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
440 'blue_background': _BACKGROUND_BLUE,
442 'blue_background': _BACKGROUND_BLUE,
441 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
443 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
442 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
444 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
443 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
445 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
444 _BACKGROUND_BLUE),
446 _BACKGROUND_BLUE),
445 'bold_background': _BACKGROUND_INTENSITY,
447 'bold_background': _BACKGROUND_INTENSITY,
446 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
448 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
447 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
449 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
448 }
450 }
449
451
450 passthrough = {_FOREGROUND_INTENSITY,
452 passthrough = {_FOREGROUND_INTENSITY,
451 _BACKGROUND_INTENSITY,
453 _BACKGROUND_INTENSITY,
452 _COMMON_LVB_UNDERSCORE,
454 _COMMON_LVB_UNDERSCORE,
453 _COMMON_LVB_REVERSE_VIDEO}
455 _COMMON_LVB_REVERSE_VIDEO}
454
456
455 stdout = _kernel32.GetStdHandle(
457 stdout = _kernel32.GetStdHandle(
456 _STD_OUTPUT_HANDLE) # don't close the handle returned
458 _STD_OUTPUT_HANDLE) # don't close the handle returned
457 if stdout is None or stdout == _INVALID_HANDLE_VALUE:
459 if stdout is None or stdout == _INVALID_HANDLE_VALUE:
458 w32effects = None
460 w32effects = None
459 else:
461 else:
460 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
462 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
461 if not _kernel32.GetConsoleScreenBufferInfo(
463 if not _kernel32.GetConsoleScreenBufferInfo(
462 stdout, ctypes.byref(csbi)):
464 stdout, ctypes.byref(csbi)):
463 # stdout may not support GetConsoleScreenBufferInfo()
465 # stdout may not support GetConsoleScreenBufferInfo()
464 # when called from subprocess or redirected
466 # when called from subprocess or redirected
465 w32effects = None
467 w32effects = None
466 else:
468 else:
467 origattr = csbi.wAttributes
469 origattr = csbi.wAttributes
468 ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
470 ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
469 re.MULTILINE | re.DOTALL)
471 re.MULTILINE | re.DOTALL)
470
472
471 def win32print(ui, writefunc, *msgs, **opts):
473 def win32print(ui, writefunc, *msgs, **opts):
472 for text in msgs:
474 for text in msgs:
473 _win32print(ui, text, writefunc, **opts)
475 _win32print(ui, text, writefunc, **opts)
474
476
475 def _win32print(ui, text, writefunc, **opts):
477 def _win32print(ui, text, writefunc, **opts):
476 label = opts.get('label', '')
478 label = opts.get('label', '')
477 attr = origattr
479 attr = origattr
478
480
479 def mapcolor(val, attr):
481 def mapcolor(val, attr):
480 if val == -1:
482 if val == -1:
481 return origattr
483 return origattr
482 elif val in passthrough:
484 elif val in passthrough:
483 return attr | val
485 return attr | val
484 elif val > 0x0f:
486 elif val > 0x0f:
485 return (val & 0x70) | (attr & 0x8f)
487 return (val & 0x70) | (attr & 0x8f)
486 else:
488 else:
487 return (val & 0x07) | (attr & 0xf8)
489 return (val & 0x07) | (attr & 0xf8)
488
490
489 # determine console attributes based on labels
491 # determine console attributes based on labels
490 for l in label.split():
492 for l in label.split():
491 style = ui._styles.get(l, '')
493 style = ui._styles.get(l, '')
492 for effect in style.split():
494 for effect in style.split():
493 try:
495 try:
494 attr = mapcolor(w32effects[effect], attr)
496 attr = mapcolor(w32effects[effect], attr)
495 except KeyError:
497 except KeyError:
496 # w32effects could not have certain attributes so we skip
498 # w32effects could not have certain attributes so we skip
497 # them if not found
499 # them if not found
498 pass
500 pass
499 # hack to ensure regexp finds data
501 # hack to ensure regexp finds data
500 if not text.startswith('\033['):
502 if not text.startswith('\033['):
501 text = '\033[m' + text
503 text = '\033[m' + text
502
504
503 # Look for ANSI-like codes embedded in text
505 # Look for ANSI-like codes embedded in text
504 m = re.match(ansire, text)
506 m = re.match(ansire, text)
505
507
506 try:
508 try:
507 while m:
509 while m:
508 for sattr in m.group(1).split(';'):
510 for sattr in m.group(1).split(';'):
509 if sattr:
511 if sattr:
510 attr = mapcolor(int(sattr), attr)
512 attr = mapcolor(int(sattr), attr)
511 ui.flush()
513 ui.flush()
512 _kernel32.SetConsoleTextAttribute(stdout, attr)
514 _kernel32.SetConsoleTextAttribute(stdout, attr)
513 writefunc(m.group(2), **opts)
515 writefunc(m.group(2), **opts)
514 m = re.match(ansire, m.group(3))
516 m = re.match(ansire, m.group(3))
515 finally:
517 finally:
516 # Explicitly reset original attributes
518 # Explicitly reset original attributes
517 ui.flush()
519 ui.flush()
518 _kernel32.SetConsoleTextAttribute(stdout, origattr)
520 _kernel32.SetConsoleTextAttribute(stdout, origattr)
@@ -1,1271 +1,1274 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11 import re
11 import re
12
12
13 from . import (
13 from . import (
14 encoding,
14 encoding,
15 error,
15 error,
16 )
16 )
17
17
18 def loadconfigtable(ui, extname, configtable):
18 def loadconfigtable(ui, extname, configtable):
19 """update config item known to the ui with the extension ones"""
19 """update config item known to the ui with the extension ones"""
20 for section, items in configtable.items():
20 for section, items in configtable.items():
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 knownkeys = set(knownitems)
22 knownkeys = set(knownitems)
23 newkeys = set(items)
23 newkeys = set(items)
24 for key in sorted(knownkeys & newkeys):
24 for key in sorted(knownkeys & newkeys):
25 msg = "extension '%s' overwrite config item '%s.%s'"
25 msg = "extension '%s' overwrite config item '%s.%s'"
26 msg %= (extname, section, key)
26 msg %= (extname, section, key)
27 ui.develwarn(msg, config='warn-config')
27 ui.develwarn(msg, config='warn-config')
28
28
29 knownitems.update(items)
29 knownitems.update(items)
30
30
31 class configitem(object):
31 class configitem(object):
32 """represent a known config item
32 """represent a known config item
33
33
34 :section: the official config section where to find this item,
34 :section: the official config section where to find this item,
35 :name: the official name within the section,
35 :name: the official name within the section,
36 :default: default value for this item,
36 :default: default value for this item,
37 :alias: optional list of tuples as alternatives,
37 :alias: optional list of tuples as alternatives,
38 :generic: this is a generic definition, match name using regular expression.
38 :generic: this is a generic definition, match name using regular expression.
39 """
39 """
40
40
41 def __init__(self, section, name, default=None, alias=(),
41 def __init__(self, section, name, default=None, alias=(),
42 generic=False, priority=0):
42 generic=False, priority=0):
43 self.section = section
43 self.section = section
44 self.name = name
44 self.name = name
45 self.default = default
45 self.default = default
46 self.alias = list(alias)
46 self.alias = list(alias)
47 self.generic = generic
47 self.generic = generic
48 self.priority = priority
48 self.priority = priority
49 self._re = None
49 self._re = None
50 if generic:
50 if generic:
51 self._re = re.compile(self.name)
51 self._re = re.compile(self.name)
52
52
53 class itemregister(dict):
53 class itemregister(dict):
54 """A specialized dictionary that can handle wild-card selection"""
54 """A specialized dictionary that can handle wild-card selection"""
55
55
56 def __init__(self):
56 def __init__(self):
57 super(itemregister, self).__init__()
57 super(itemregister, self).__init__()
58 self._generics = set()
58 self._generics = set()
59
59
60 def update(self, other):
60 def update(self, other):
61 super(itemregister, self).update(other)
61 super(itemregister, self).update(other)
62 self._generics.update(other._generics)
62 self._generics.update(other._generics)
63
63
64 def __setitem__(self, key, item):
64 def __setitem__(self, key, item):
65 super(itemregister, self).__setitem__(key, item)
65 super(itemregister, self).__setitem__(key, item)
66 if item.generic:
66 if item.generic:
67 self._generics.add(item)
67 self._generics.add(item)
68
68
69 def get(self, key):
69 def get(self, key):
70 baseitem = super(itemregister, self).get(key)
70 baseitem = super(itemregister, self).get(key)
71 if baseitem is not None and not baseitem.generic:
71 if baseitem is not None and not baseitem.generic:
72 return baseitem
72 return baseitem
73
73
74 # search for a matching generic item
74 # search for a matching generic item
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 for item in generics:
76 for item in generics:
77 # we use 'match' instead of 'search' to make the matching simpler
77 # we use 'match' instead of 'search' to make the matching simpler
78 # for people unfamiliar with regular expression. Having the match
78 # for people unfamiliar with regular expression. Having the match
79 # rooted to the start of the string will produce less surprising
79 # rooted to the start of the string will produce less surprising
80 # result for user writing simple regex for sub-attribute.
80 # result for user writing simple regex for sub-attribute.
81 #
81 #
82 # For example using "color\..*" match produces an unsurprising
82 # For example using "color\..*" match produces an unsurprising
83 # result, while using search could suddenly match apparently
83 # result, while using search could suddenly match apparently
84 # unrelated configuration that happens to contains "color."
84 # unrelated configuration that happens to contains "color."
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 # some match to avoid the need to prefix most pattern with "^".
86 # some match to avoid the need to prefix most pattern with "^".
87 # The "^" seems more error prone.
87 # The "^" seems more error prone.
88 if item._re.match(key):
88 if item._re.match(key):
89 return item
89 return item
90
90
91 return None
91 return None
92
92
93 coreitems = {}
93 coreitems = {}
94
94
95 def _register(configtable, *args, **kwargs):
95 def _register(configtable, *args, **kwargs):
96 item = configitem(*args, **kwargs)
96 item = configitem(*args, **kwargs)
97 section = configtable.setdefault(item.section, itemregister())
97 section = configtable.setdefault(item.section, itemregister())
98 if item.name in section:
98 if item.name in section:
99 msg = "duplicated config item registration for '%s.%s'"
99 msg = "duplicated config item registration for '%s.%s'"
100 raise error.ProgrammingError(msg % (item.section, item.name))
100 raise error.ProgrammingError(msg % (item.section, item.name))
101 section[item.name] = item
101 section[item.name] = item
102
102
103 # special value for case where the default is derived from other values
103 # special value for case where the default is derived from other values
104 dynamicdefault = object()
104 dynamicdefault = object()
105
105
106 # Registering actual config items
106 # Registering actual config items
107
107
108 def getitemregister(configtable):
108 def getitemregister(configtable):
109 f = functools.partial(_register, configtable)
109 f = functools.partial(_register, configtable)
110 # export pseudo enum as configitem.*
110 # export pseudo enum as configitem.*
111 f.dynamicdefault = dynamicdefault
111 f.dynamicdefault = dynamicdefault
112 return f
112 return f
113
113
114 coreconfigitem = getitemregister(coreitems)
114 coreconfigitem = getitemregister(coreitems)
115
115
116 coreconfigitem('alias', '.*',
116 coreconfigitem('alias', '.*',
117 default=None,
117 default=None,
118 generic=True,
118 generic=True,
119 )
119 )
120 coreconfigitem('annotate', 'nodates',
120 coreconfigitem('annotate', 'nodates',
121 default=False,
121 default=False,
122 )
122 )
123 coreconfigitem('annotate', 'showfunc',
123 coreconfigitem('annotate', 'showfunc',
124 default=False,
124 default=False,
125 )
125 )
126 coreconfigitem('annotate', 'unified',
126 coreconfigitem('annotate', 'unified',
127 default=None,
127 default=None,
128 )
128 )
129 coreconfigitem('annotate', 'git',
129 coreconfigitem('annotate', 'git',
130 default=False,
130 default=False,
131 )
131 )
132 coreconfigitem('annotate', 'ignorews',
132 coreconfigitem('annotate', 'ignorews',
133 default=False,
133 default=False,
134 )
134 )
135 coreconfigitem('annotate', 'ignorewsamount',
135 coreconfigitem('annotate', 'ignorewsamount',
136 default=False,
136 default=False,
137 )
137 )
138 coreconfigitem('annotate', 'ignoreblanklines',
138 coreconfigitem('annotate', 'ignoreblanklines',
139 default=False,
139 default=False,
140 )
140 )
141 coreconfigitem('annotate', 'ignorewseol',
141 coreconfigitem('annotate', 'ignorewseol',
142 default=False,
142 default=False,
143 )
143 )
144 coreconfigitem('annotate', 'nobinary',
144 coreconfigitem('annotate', 'nobinary',
145 default=False,
145 default=False,
146 )
146 )
147 coreconfigitem('annotate', 'noprefix',
147 coreconfigitem('annotate', 'noprefix',
148 default=False,
148 default=False,
149 )
149 )
150 coreconfigitem('auth', 'cookiefile',
150 coreconfigitem('auth', 'cookiefile',
151 default=None,
151 default=None,
152 )
152 )
153 # bookmarks.pushing: internal hack for discovery
153 # bookmarks.pushing: internal hack for discovery
154 coreconfigitem('bookmarks', 'pushing',
154 coreconfigitem('bookmarks', 'pushing',
155 default=list,
155 default=list,
156 )
156 )
157 # bundle.mainreporoot: internal hack for bundlerepo
157 # bundle.mainreporoot: internal hack for bundlerepo
158 coreconfigitem('bundle', 'mainreporoot',
158 coreconfigitem('bundle', 'mainreporoot',
159 default='',
159 default='',
160 )
160 )
161 # bundle.reorder: experimental config
161 # bundle.reorder: experimental config
162 coreconfigitem('bundle', 'reorder',
162 coreconfigitem('bundle', 'reorder',
163 default='auto',
163 default='auto',
164 )
164 )
165 coreconfigitem('censor', 'policy',
165 coreconfigitem('censor', 'policy',
166 default='abort',
166 default='abort',
167 )
167 )
168 coreconfigitem('chgserver', 'idletimeout',
168 coreconfigitem('chgserver', 'idletimeout',
169 default=3600,
169 default=3600,
170 )
170 )
171 coreconfigitem('chgserver', 'skiphash',
171 coreconfigitem('chgserver', 'skiphash',
172 default=False,
172 default=False,
173 )
173 )
174 coreconfigitem('cmdserver', 'log',
174 coreconfigitem('cmdserver', 'log',
175 default=None,
175 default=None,
176 )
176 )
177 coreconfigitem('color', '.*',
177 coreconfigitem('color', '.*',
178 default=None,
178 default=None,
179 generic=True,
179 generic=True,
180 )
180 )
181 coreconfigitem('color', 'mode',
181 coreconfigitem('color', 'mode',
182 default='auto',
182 default='auto',
183 )
183 )
184 coreconfigitem('color', 'pagermode',
184 coreconfigitem('color', 'pagermode',
185 default=dynamicdefault,
185 default=dynamicdefault,
186 )
186 )
187 coreconfigitem('commands', 'show.aliasprefix',
187 coreconfigitem('commands', 'show.aliasprefix',
188 default=list,
188 default=list,
189 )
189 )
190 coreconfigitem('commands', 'status.relative',
190 coreconfigitem('commands', 'status.relative',
191 default=False,
191 default=False,
192 )
192 )
193 coreconfigitem('commands', 'status.skipstates',
193 coreconfigitem('commands', 'status.skipstates',
194 default=[],
194 default=[],
195 )
195 )
196 coreconfigitem('commands', 'status.verbose',
196 coreconfigitem('commands', 'status.verbose',
197 default=False,
197 default=False,
198 )
198 )
199 coreconfigitem('commands', 'update.check',
199 coreconfigitem('commands', 'update.check',
200 default=None,
200 default=None,
201 # Deprecated, remove after 4.4 release
201 # Deprecated, remove after 4.4 release
202 alias=[('experimental', 'updatecheck')]
202 alias=[('experimental', 'updatecheck')]
203 )
203 )
204 coreconfigitem('commands', 'update.requiredest',
204 coreconfigitem('commands', 'update.requiredest',
205 default=False,
205 default=False,
206 )
206 )
207 coreconfigitem('committemplate', '.*',
207 coreconfigitem('committemplate', '.*',
208 default=None,
208 default=None,
209 generic=True,
209 generic=True,
210 )
210 )
211 coreconfigitem('convert', 'cvsps.cache',
211 coreconfigitem('convert', 'cvsps.cache',
212 default=True,
212 default=True,
213 )
213 )
214 coreconfigitem('convert', 'cvsps.fuzz',
214 coreconfigitem('convert', 'cvsps.fuzz',
215 default=60,
215 default=60,
216 )
216 )
217 coreconfigitem('convert', 'cvsps.logencoding',
217 coreconfigitem('convert', 'cvsps.logencoding',
218 default=None,
218 default=None,
219 )
219 )
220 coreconfigitem('convert', 'cvsps.mergefrom',
220 coreconfigitem('convert', 'cvsps.mergefrom',
221 default=None,
221 default=None,
222 )
222 )
223 coreconfigitem('convert', 'cvsps.mergeto',
223 coreconfigitem('convert', 'cvsps.mergeto',
224 default=None,
224 default=None,
225 )
225 )
226 coreconfigitem('convert', 'git.committeractions',
226 coreconfigitem('convert', 'git.committeractions',
227 default=lambda: ['messagedifferent'],
227 default=lambda: ['messagedifferent'],
228 )
228 )
229 coreconfigitem('convert', 'git.extrakeys',
229 coreconfigitem('convert', 'git.extrakeys',
230 default=list,
230 default=list,
231 )
231 )
232 coreconfigitem('convert', 'git.findcopiesharder',
232 coreconfigitem('convert', 'git.findcopiesharder',
233 default=False,
233 default=False,
234 )
234 )
235 coreconfigitem('convert', 'git.remoteprefix',
235 coreconfigitem('convert', 'git.remoteprefix',
236 default='remote',
236 default='remote',
237 )
237 )
238 coreconfigitem('convert', 'git.renamelimit',
238 coreconfigitem('convert', 'git.renamelimit',
239 default=400,
239 default=400,
240 )
240 )
241 coreconfigitem('convert', 'git.saverev',
241 coreconfigitem('convert', 'git.saverev',
242 default=True,
242 default=True,
243 )
243 )
244 coreconfigitem('convert', 'git.similarity',
244 coreconfigitem('convert', 'git.similarity',
245 default=50,
245 default=50,
246 )
246 )
247 coreconfigitem('convert', 'git.skipsubmodules',
247 coreconfigitem('convert', 'git.skipsubmodules',
248 default=False,
248 default=False,
249 )
249 )
250 coreconfigitem('convert', 'hg.clonebranches',
250 coreconfigitem('convert', 'hg.clonebranches',
251 default=False,
251 default=False,
252 )
252 )
253 coreconfigitem('convert', 'hg.ignoreerrors',
253 coreconfigitem('convert', 'hg.ignoreerrors',
254 default=False,
254 default=False,
255 )
255 )
256 coreconfigitem('convert', 'hg.revs',
256 coreconfigitem('convert', 'hg.revs',
257 default=None,
257 default=None,
258 )
258 )
259 coreconfigitem('convert', 'hg.saverev',
259 coreconfigitem('convert', 'hg.saverev',
260 default=False,
260 default=False,
261 )
261 )
262 coreconfigitem('convert', 'hg.sourcename',
262 coreconfigitem('convert', 'hg.sourcename',
263 default=None,
263 default=None,
264 )
264 )
265 coreconfigitem('convert', 'hg.startrev',
265 coreconfigitem('convert', 'hg.startrev',
266 default=None,
266 default=None,
267 )
267 )
268 coreconfigitem('convert', 'hg.tagsbranch',
268 coreconfigitem('convert', 'hg.tagsbranch',
269 default='default',
269 default='default',
270 )
270 )
271 coreconfigitem('convert', 'hg.usebranchnames',
271 coreconfigitem('convert', 'hg.usebranchnames',
272 default=True,
272 default=True,
273 )
273 )
274 coreconfigitem('convert', 'ignoreancestorcheck',
274 coreconfigitem('convert', 'ignoreancestorcheck',
275 default=False,
275 default=False,
276 )
276 )
277 coreconfigitem('convert', 'localtimezone',
277 coreconfigitem('convert', 'localtimezone',
278 default=False,
278 default=False,
279 )
279 )
280 coreconfigitem('convert', 'p4.encoding',
280 coreconfigitem('convert', 'p4.encoding',
281 default=dynamicdefault,
281 default=dynamicdefault,
282 )
282 )
283 coreconfigitem('convert', 'p4.startrev',
283 coreconfigitem('convert', 'p4.startrev',
284 default=0,
284 default=0,
285 )
285 )
286 coreconfigitem('convert', 'skiptags',
286 coreconfigitem('convert', 'skiptags',
287 default=False,
287 default=False,
288 )
288 )
289 coreconfigitem('convert', 'svn.debugsvnlog',
289 coreconfigitem('convert', 'svn.debugsvnlog',
290 default=True,
290 default=True,
291 )
291 )
292 coreconfigitem('convert', 'svn.trunk',
292 coreconfigitem('convert', 'svn.trunk',
293 default=None,
293 default=None,
294 )
294 )
295 coreconfigitem('convert', 'svn.tags',
295 coreconfigitem('convert', 'svn.tags',
296 default=None,
296 default=None,
297 )
297 )
298 coreconfigitem('convert', 'svn.branches',
298 coreconfigitem('convert', 'svn.branches',
299 default=None,
299 default=None,
300 )
300 )
301 coreconfigitem('convert', 'svn.startrev',
301 coreconfigitem('convert', 'svn.startrev',
302 default=0,
302 default=0,
303 )
303 )
304 coreconfigitem('debug', 'dirstate.delaywrite',
304 coreconfigitem('debug', 'dirstate.delaywrite',
305 default=0,
305 default=0,
306 )
306 )
307 coreconfigitem('defaults', '.*',
307 coreconfigitem('defaults', '.*',
308 default=None,
308 default=None,
309 generic=True,
309 generic=True,
310 )
310 )
311 coreconfigitem('devel', 'all-warnings',
311 coreconfigitem('devel', 'all-warnings',
312 default=False,
312 default=False,
313 )
313 )
314 coreconfigitem('devel', 'bundle2.debug',
314 coreconfigitem('devel', 'bundle2.debug',
315 default=False,
315 default=False,
316 )
316 )
317 coreconfigitem('devel', 'cache-vfs',
317 coreconfigitem('devel', 'cache-vfs',
318 default=None,
318 default=None,
319 )
319 )
320 coreconfigitem('devel', 'check-locks',
320 coreconfigitem('devel', 'check-locks',
321 default=False,
321 default=False,
322 )
322 )
323 coreconfigitem('devel', 'check-relroot',
323 coreconfigitem('devel', 'check-relroot',
324 default=False,
324 default=False,
325 )
325 )
326 coreconfigitem('devel', 'default-date',
326 coreconfigitem('devel', 'default-date',
327 default=None,
327 default=None,
328 )
328 )
329 coreconfigitem('devel', 'deprec-warn',
329 coreconfigitem('devel', 'deprec-warn',
330 default=False,
330 default=False,
331 )
331 )
332 coreconfigitem('devel', 'disableloaddefaultcerts',
332 coreconfigitem('devel', 'disableloaddefaultcerts',
333 default=False,
333 default=False,
334 )
334 )
335 coreconfigitem('devel', 'warn-empty-changegroup',
335 coreconfigitem('devel', 'warn-empty-changegroup',
336 default=False,
336 default=False,
337 )
337 )
338 coreconfigitem('devel', 'legacy.exchange',
338 coreconfigitem('devel', 'legacy.exchange',
339 default=list,
339 default=list,
340 )
340 )
341 coreconfigitem('devel', 'servercafile',
341 coreconfigitem('devel', 'servercafile',
342 default='',
342 default='',
343 )
343 )
344 coreconfigitem('devel', 'serverexactprotocol',
344 coreconfigitem('devel', 'serverexactprotocol',
345 default='',
345 default='',
346 )
346 )
347 coreconfigitem('devel', 'serverrequirecert',
347 coreconfigitem('devel', 'serverrequirecert',
348 default=False,
348 default=False,
349 )
349 )
350 coreconfigitem('devel', 'strip-obsmarkers',
350 coreconfigitem('devel', 'strip-obsmarkers',
351 default=True,
351 default=True,
352 )
352 )
353 coreconfigitem('devel', 'warn-config',
353 coreconfigitem('devel', 'warn-config',
354 default=None,
354 default=None,
355 )
355 )
356 coreconfigitem('devel', 'warn-config-default',
356 coreconfigitem('devel', 'warn-config-default',
357 default=None,
357 default=None,
358 )
358 )
359 coreconfigitem('devel', 'user.obsmarker',
359 coreconfigitem('devel', 'user.obsmarker',
360 default=None,
360 default=None,
361 )
361 )
362 coreconfigitem('devel', 'warn-config-unknown',
362 coreconfigitem('devel', 'warn-config-unknown',
363 default=None,
363 default=None,
364 )
364 )
365 coreconfigitem('diff', 'nodates',
365 coreconfigitem('diff', 'nodates',
366 default=False,
366 default=False,
367 )
367 )
368 coreconfigitem('diff', 'showfunc',
368 coreconfigitem('diff', 'showfunc',
369 default=False,
369 default=False,
370 )
370 )
371 coreconfigitem('diff', 'unified',
371 coreconfigitem('diff', 'unified',
372 default=None,
372 default=None,
373 )
373 )
374 coreconfigitem('diff', 'git',
374 coreconfigitem('diff', 'git',
375 default=False,
375 default=False,
376 )
376 )
377 coreconfigitem('diff', 'ignorews',
377 coreconfigitem('diff', 'ignorews',
378 default=False,
378 default=False,
379 )
379 )
380 coreconfigitem('diff', 'ignorewsamount',
380 coreconfigitem('diff', 'ignorewsamount',
381 default=False,
381 default=False,
382 )
382 )
383 coreconfigitem('diff', 'ignoreblanklines',
383 coreconfigitem('diff', 'ignoreblanklines',
384 default=False,
384 default=False,
385 )
385 )
386 coreconfigitem('diff', 'ignorewseol',
386 coreconfigitem('diff', 'ignorewseol',
387 default=False,
387 default=False,
388 )
388 )
389 coreconfigitem('diff', 'nobinary',
389 coreconfigitem('diff', 'nobinary',
390 default=False,
390 default=False,
391 )
391 )
392 coreconfigitem('diff', 'noprefix',
392 coreconfigitem('diff', 'noprefix',
393 default=False,
393 default=False,
394 )
394 )
395 coreconfigitem('email', 'bcc',
395 coreconfigitem('email', 'bcc',
396 default=None,
396 default=None,
397 )
397 )
398 coreconfigitem('email', 'cc',
398 coreconfigitem('email', 'cc',
399 default=None,
399 default=None,
400 )
400 )
401 coreconfigitem('email', 'charsets',
401 coreconfigitem('email', 'charsets',
402 default=list,
402 default=list,
403 )
403 )
404 coreconfigitem('email', 'from',
404 coreconfigitem('email', 'from',
405 default=None,
405 default=None,
406 )
406 )
407 coreconfigitem('email', 'method',
407 coreconfigitem('email', 'method',
408 default='smtp',
408 default='smtp',
409 )
409 )
410 coreconfigitem('email', 'reply-to',
410 coreconfigitem('email', 'reply-to',
411 default=None,
411 default=None,
412 )
412 )
413 coreconfigitem('email', 'to',
413 coreconfigitem('email', 'to',
414 default=None,
414 default=None,
415 )
415 )
416 coreconfigitem('experimental', 'archivemetatemplate',
416 coreconfigitem('experimental', 'archivemetatemplate',
417 default=dynamicdefault,
417 default=dynamicdefault,
418 )
418 )
419 coreconfigitem('experimental', 'bundle-phases',
419 coreconfigitem('experimental', 'bundle-phases',
420 default=False,
420 default=False,
421 )
421 )
422 coreconfigitem('experimental', 'bundle2-advertise',
422 coreconfigitem('experimental', 'bundle2-advertise',
423 default=True,
423 default=True,
424 )
424 )
425 coreconfigitem('experimental', 'bundle2-output-capture',
425 coreconfigitem('experimental', 'bundle2-output-capture',
426 default=False,
426 default=False,
427 )
427 )
428 coreconfigitem('experimental', 'bundle2.pushback',
428 coreconfigitem('experimental', 'bundle2.pushback',
429 default=False,
429 default=False,
430 )
430 )
431 coreconfigitem('experimental', 'bundle2lazylocking',
431 coreconfigitem('experimental', 'bundle2lazylocking',
432 default=False,
432 default=False,
433 )
433 )
434 coreconfigitem('experimental', 'bundlecomplevel',
434 coreconfigitem('experimental', 'bundlecomplevel',
435 default=None,
435 default=None,
436 )
436 )
437 coreconfigitem('experimental', 'changegroup3',
437 coreconfigitem('experimental', 'changegroup3',
438 default=False,
438 default=False,
439 )
439 )
440 coreconfigitem('experimental', 'clientcompressionengines',
440 coreconfigitem('experimental', 'clientcompressionengines',
441 default=list,
441 default=list,
442 )
442 )
443 coreconfigitem('experimental', 'copytrace',
443 coreconfigitem('experimental', 'copytrace',
444 default='on',
444 default='on',
445 )
445 )
446 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
446 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
447 default=100,
447 default=100,
448 )
448 )
449 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
449 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
450 default=100,
450 default=100,
451 )
451 )
452 coreconfigitem('experimental', 'crecordtest',
452 coreconfigitem('experimental', 'crecordtest',
453 default=None,
453 default=None,
454 )
454 )
455 coreconfigitem('experimental', 'editortmpinhg',
455 coreconfigitem('experimental', 'editortmpinhg',
456 default=False,
456 default=False,
457 )
457 )
458 coreconfigitem('experimental', 'evolution',
458 coreconfigitem('experimental', 'evolution',
459 default=list,
459 default=list,
460 )
460 )
461 coreconfigitem('experimental', 'evolution.allowdivergence',
461 coreconfigitem('experimental', 'evolution.allowdivergence',
462 default=False,
462 default=False,
463 alias=[('experimental', 'allowdivergence')]
463 alias=[('experimental', 'allowdivergence')]
464 )
464 )
465 coreconfigitem('experimental', 'evolution.allowunstable',
465 coreconfigitem('experimental', 'evolution.allowunstable',
466 default=None,
466 default=None,
467 )
467 )
468 coreconfigitem('experimental', 'evolution.createmarkers',
468 coreconfigitem('experimental', 'evolution.createmarkers',
469 default=None,
469 default=None,
470 )
470 )
471 coreconfigitem('experimental', 'evolution.effect-flags',
471 coreconfigitem('experimental', 'evolution.effect-flags',
472 default=True,
472 default=True,
473 alias=[('experimental', 'effect-flags')]
473 alias=[('experimental', 'effect-flags')]
474 )
474 )
475 coreconfigitem('experimental', 'evolution.exchange',
475 coreconfigitem('experimental', 'evolution.exchange',
476 default=None,
476 default=None,
477 )
477 )
478 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
478 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
479 default=False,
479 default=False,
480 )
480 )
481 coreconfigitem('experimental', 'evolution.track-operation',
481 coreconfigitem('experimental', 'evolution.track-operation',
482 default=True,
482 default=True,
483 )
483 )
484 coreconfigitem('experimental', 'worddiff',
485 default=False,
486 )
484 coreconfigitem('experimental', 'maxdeltachainspan',
487 coreconfigitem('experimental', 'maxdeltachainspan',
485 default=-1,
488 default=-1,
486 )
489 )
487 coreconfigitem('experimental', 'mmapindexthreshold',
490 coreconfigitem('experimental', 'mmapindexthreshold',
488 default=None,
491 default=None,
489 )
492 )
490 coreconfigitem('experimental', 'nonnormalparanoidcheck',
493 coreconfigitem('experimental', 'nonnormalparanoidcheck',
491 default=False,
494 default=False,
492 )
495 )
493 coreconfigitem('experimental', 'exportableenviron',
496 coreconfigitem('experimental', 'exportableenviron',
494 default=list,
497 default=list,
495 )
498 )
496 coreconfigitem('experimental', 'extendedheader.index',
499 coreconfigitem('experimental', 'extendedheader.index',
497 default=None,
500 default=None,
498 )
501 )
499 coreconfigitem('experimental', 'extendedheader.similarity',
502 coreconfigitem('experimental', 'extendedheader.similarity',
500 default=False,
503 default=False,
501 )
504 )
502 coreconfigitem('experimental', 'format.compression',
505 coreconfigitem('experimental', 'format.compression',
503 default='zlib',
506 default='zlib',
504 )
507 )
505 coreconfigitem('experimental', 'graphshorten',
508 coreconfigitem('experimental', 'graphshorten',
506 default=False,
509 default=False,
507 )
510 )
508 coreconfigitem('experimental', 'graphstyle.parent',
511 coreconfigitem('experimental', 'graphstyle.parent',
509 default=dynamicdefault,
512 default=dynamicdefault,
510 )
513 )
511 coreconfigitem('experimental', 'graphstyle.missing',
514 coreconfigitem('experimental', 'graphstyle.missing',
512 default=dynamicdefault,
515 default=dynamicdefault,
513 )
516 )
514 coreconfigitem('experimental', 'graphstyle.grandparent',
517 coreconfigitem('experimental', 'graphstyle.grandparent',
515 default=dynamicdefault,
518 default=dynamicdefault,
516 )
519 )
517 coreconfigitem('experimental', 'hook-track-tags',
520 coreconfigitem('experimental', 'hook-track-tags',
518 default=False,
521 default=False,
519 )
522 )
520 coreconfigitem('experimental', 'httppostargs',
523 coreconfigitem('experimental', 'httppostargs',
521 default=False,
524 default=False,
522 )
525 )
523 coreconfigitem('experimental', 'manifestv2',
526 coreconfigitem('experimental', 'manifestv2',
524 default=False,
527 default=False,
525 )
528 )
526 coreconfigitem('experimental', 'mergedriver',
529 coreconfigitem('experimental', 'mergedriver',
527 default=None,
530 default=None,
528 )
531 )
529 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
532 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
530 default=False,
533 default=False,
531 )
534 )
532 coreconfigitem('experimental', 'rebase.multidest',
535 coreconfigitem('experimental', 'rebase.multidest',
533 default=False,
536 default=False,
534 )
537 )
535 coreconfigitem('experimental', 'remotenames',
538 coreconfigitem('experimental', 'remotenames',
536 default=False,
539 default=False,
537 )
540 )
538 coreconfigitem('experimental', 'revlogv2',
541 coreconfigitem('experimental', 'revlogv2',
539 default=None,
542 default=None,
540 )
543 )
541 coreconfigitem('experimental', 'single-head-per-branch',
544 coreconfigitem('experimental', 'single-head-per-branch',
542 default=False,
545 default=False,
543 )
546 )
544 coreconfigitem('experimental', 'spacemovesdown',
547 coreconfigitem('experimental', 'spacemovesdown',
545 default=False,
548 default=False,
546 )
549 )
547 coreconfigitem('experimental', 'sparse-read',
550 coreconfigitem('experimental', 'sparse-read',
548 default=False,
551 default=False,
549 )
552 )
550 coreconfigitem('experimental', 'sparse-read.density-threshold',
553 coreconfigitem('experimental', 'sparse-read.density-threshold',
551 default=0.25,
554 default=0.25,
552 )
555 )
553 coreconfigitem('experimental', 'sparse-read.min-gap-size',
556 coreconfigitem('experimental', 'sparse-read.min-gap-size',
554 default='256K',
557 default='256K',
555 )
558 )
556 coreconfigitem('experimental', 'treemanifest',
559 coreconfigitem('experimental', 'treemanifest',
557 default=False,
560 default=False,
558 )
561 )
559 coreconfigitem('extensions', '.*',
562 coreconfigitem('extensions', '.*',
560 default=None,
563 default=None,
561 generic=True,
564 generic=True,
562 )
565 )
563 coreconfigitem('extdata', '.*',
566 coreconfigitem('extdata', '.*',
564 default=None,
567 default=None,
565 generic=True,
568 generic=True,
566 )
569 )
567 coreconfigitem('format', 'aggressivemergedeltas',
570 coreconfigitem('format', 'aggressivemergedeltas',
568 default=False,
571 default=False,
569 )
572 )
570 coreconfigitem('format', 'chunkcachesize',
573 coreconfigitem('format', 'chunkcachesize',
571 default=None,
574 default=None,
572 )
575 )
573 coreconfigitem('format', 'dotencode',
576 coreconfigitem('format', 'dotencode',
574 default=True,
577 default=True,
575 )
578 )
576 coreconfigitem('format', 'generaldelta',
579 coreconfigitem('format', 'generaldelta',
577 default=False,
580 default=False,
578 )
581 )
579 coreconfigitem('format', 'manifestcachesize',
582 coreconfigitem('format', 'manifestcachesize',
580 default=None,
583 default=None,
581 )
584 )
582 coreconfigitem('format', 'maxchainlen',
585 coreconfigitem('format', 'maxchainlen',
583 default=None,
586 default=None,
584 )
587 )
585 coreconfigitem('format', 'obsstore-version',
588 coreconfigitem('format', 'obsstore-version',
586 default=None,
589 default=None,
587 )
590 )
588 coreconfigitem('format', 'usefncache',
591 coreconfigitem('format', 'usefncache',
589 default=True,
592 default=True,
590 )
593 )
591 coreconfigitem('format', 'usegeneraldelta',
594 coreconfigitem('format', 'usegeneraldelta',
592 default=True,
595 default=True,
593 )
596 )
594 coreconfigitem('format', 'usestore',
597 coreconfigitem('format', 'usestore',
595 default=True,
598 default=True,
596 )
599 )
597 coreconfigitem('fsmonitor', 'warn_when_unused',
600 coreconfigitem('fsmonitor', 'warn_when_unused',
598 default=True,
601 default=True,
599 )
602 )
600 coreconfigitem('fsmonitor', 'warn_update_file_count',
603 coreconfigitem('fsmonitor', 'warn_update_file_count',
601 default=50000,
604 default=50000,
602 )
605 )
603 coreconfigitem('hooks', '.*',
606 coreconfigitem('hooks', '.*',
604 default=dynamicdefault,
607 default=dynamicdefault,
605 generic=True,
608 generic=True,
606 )
609 )
607 coreconfigitem('hgweb-paths', '.*',
610 coreconfigitem('hgweb-paths', '.*',
608 default=list,
611 default=list,
609 generic=True,
612 generic=True,
610 )
613 )
611 coreconfigitem('hostfingerprints', '.*',
614 coreconfigitem('hostfingerprints', '.*',
612 default=list,
615 default=list,
613 generic=True,
616 generic=True,
614 )
617 )
615 coreconfigitem('hostsecurity', 'ciphers',
618 coreconfigitem('hostsecurity', 'ciphers',
616 default=None,
619 default=None,
617 )
620 )
618 coreconfigitem('hostsecurity', 'disabletls10warning',
621 coreconfigitem('hostsecurity', 'disabletls10warning',
619 default=False,
622 default=False,
620 )
623 )
621 coreconfigitem('hostsecurity', 'minimumprotocol',
624 coreconfigitem('hostsecurity', 'minimumprotocol',
622 default=dynamicdefault,
625 default=dynamicdefault,
623 )
626 )
624 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
627 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
625 default=dynamicdefault,
628 default=dynamicdefault,
626 generic=True,
629 generic=True,
627 )
630 )
628 coreconfigitem('hostsecurity', '.*:ciphers$',
631 coreconfigitem('hostsecurity', '.*:ciphers$',
629 default=dynamicdefault,
632 default=dynamicdefault,
630 generic=True,
633 generic=True,
631 )
634 )
632 coreconfigitem('hostsecurity', '.*:fingerprints$',
635 coreconfigitem('hostsecurity', '.*:fingerprints$',
633 default=list,
636 default=list,
634 generic=True,
637 generic=True,
635 )
638 )
636 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
639 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
637 default=None,
640 default=None,
638 generic=True,
641 generic=True,
639 )
642 )
640
643
641 coreconfigitem('http_proxy', 'always',
644 coreconfigitem('http_proxy', 'always',
642 default=False,
645 default=False,
643 )
646 )
644 coreconfigitem('http_proxy', 'host',
647 coreconfigitem('http_proxy', 'host',
645 default=None,
648 default=None,
646 )
649 )
647 coreconfigitem('http_proxy', 'no',
650 coreconfigitem('http_proxy', 'no',
648 default=list,
651 default=list,
649 )
652 )
650 coreconfigitem('http_proxy', 'passwd',
653 coreconfigitem('http_proxy', 'passwd',
651 default=None,
654 default=None,
652 )
655 )
653 coreconfigitem('http_proxy', 'user',
656 coreconfigitem('http_proxy', 'user',
654 default=None,
657 default=None,
655 )
658 )
656 coreconfigitem('logtoprocess', 'commandexception',
659 coreconfigitem('logtoprocess', 'commandexception',
657 default=None,
660 default=None,
658 )
661 )
659 coreconfigitem('logtoprocess', 'commandfinish',
662 coreconfigitem('logtoprocess', 'commandfinish',
660 default=None,
663 default=None,
661 )
664 )
662 coreconfigitem('logtoprocess', 'command',
665 coreconfigitem('logtoprocess', 'command',
663 default=None,
666 default=None,
664 )
667 )
665 coreconfigitem('logtoprocess', 'develwarn',
668 coreconfigitem('logtoprocess', 'develwarn',
666 default=None,
669 default=None,
667 )
670 )
668 coreconfigitem('logtoprocess', 'uiblocked',
671 coreconfigitem('logtoprocess', 'uiblocked',
669 default=None,
672 default=None,
670 )
673 )
671 coreconfigitem('merge', 'checkunknown',
674 coreconfigitem('merge', 'checkunknown',
672 default='abort',
675 default='abort',
673 )
676 )
674 coreconfigitem('merge', 'checkignored',
677 coreconfigitem('merge', 'checkignored',
675 default='abort',
678 default='abort',
676 )
679 )
677 coreconfigitem('experimental', 'merge.checkpathconflicts',
680 coreconfigitem('experimental', 'merge.checkpathconflicts',
678 default=False,
681 default=False,
679 )
682 )
680 coreconfigitem('merge', 'followcopies',
683 coreconfigitem('merge', 'followcopies',
681 default=True,
684 default=True,
682 )
685 )
683 coreconfigitem('merge', 'on-failure',
686 coreconfigitem('merge', 'on-failure',
684 default='continue',
687 default='continue',
685 )
688 )
686 coreconfigitem('merge', 'preferancestor',
689 coreconfigitem('merge', 'preferancestor',
687 default=lambda: ['*'],
690 default=lambda: ['*'],
688 )
691 )
689 coreconfigitem('merge-tools', '.*',
692 coreconfigitem('merge-tools', '.*',
690 default=None,
693 default=None,
691 generic=True,
694 generic=True,
692 )
695 )
693 coreconfigitem('merge-tools', br'.*\.args$',
696 coreconfigitem('merge-tools', br'.*\.args$',
694 default="$local $base $other",
697 default="$local $base $other",
695 generic=True,
698 generic=True,
696 priority=-1,
699 priority=-1,
697 )
700 )
698 coreconfigitem('merge-tools', br'.*\.binary$',
701 coreconfigitem('merge-tools', br'.*\.binary$',
699 default=False,
702 default=False,
700 generic=True,
703 generic=True,
701 priority=-1,
704 priority=-1,
702 )
705 )
703 coreconfigitem('merge-tools', br'.*\.check$',
706 coreconfigitem('merge-tools', br'.*\.check$',
704 default=list,
707 default=list,
705 generic=True,
708 generic=True,
706 priority=-1,
709 priority=-1,
707 )
710 )
708 coreconfigitem('merge-tools', br'.*\.checkchanged$',
711 coreconfigitem('merge-tools', br'.*\.checkchanged$',
709 default=False,
712 default=False,
710 generic=True,
713 generic=True,
711 priority=-1,
714 priority=-1,
712 )
715 )
713 coreconfigitem('merge-tools', br'.*\.executable$',
716 coreconfigitem('merge-tools', br'.*\.executable$',
714 default=dynamicdefault,
717 default=dynamicdefault,
715 generic=True,
718 generic=True,
716 priority=-1,
719 priority=-1,
717 )
720 )
718 coreconfigitem('merge-tools', br'.*\.fixeol$',
721 coreconfigitem('merge-tools', br'.*\.fixeol$',
719 default=False,
722 default=False,
720 generic=True,
723 generic=True,
721 priority=-1,
724 priority=-1,
722 )
725 )
723 coreconfigitem('merge-tools', br'.*\.gui$',
726 coreconfigitem('merge-tools', br'.*\.gui$',
724 default=False,
727 default=False,
725 generic=True,
728 generic=True,
726 priority=-1,
729 priority=-1,
727 )
730 )
728 coreconfigitem('merge-tools', br'.*\.priority$',
731 coreconfigitem('merge-tools', br'.*\.priority$',
729 default=0,
732 default=0,
730 generic=True,
733 generic=True,
731 priority=-1,
734 priority=-1,
732 )
735 )
733 coreconfigitem('merge-tools', br'.*\.premerge$',
736 coreconfigitem('merge-tools', br'.*\.premerge$',
734 default=dynamicdefault,
737 default=dynamicdefault,
735 generic=True,
738 generic=True,
736 priority=-1,
739 priority=-1,
737 )
740 )
738 coreconfigitem('merge-tools', br'.*\.symlink$',
741 coreconfigitem('merge-tools', br'.*\.symlink$',
739 default=False,
742 default=False,
740 generic=True,
743 generic=True,
741 priority=-1,
744 priority=-1,
742 )
745 )
743 coreconfigitem('pager', 'attend-.*',
746 coreconfigitem('pager', 'attend-.*',
744 default=dynamicdefault,
747 default=dynamicdefault,
745 generic=True,
748 generic=True,
746 )
749 )
747 coreconfigitem('pager', 'ignore',
750 coreconfigitem('pager', 'ignore',
748 default=list,
751 default=list,
749 )
752 )
750 coreconfigitem('pager', 'pager',
753 coreconfigitem('pager', 'pager',
751 default=dynamicdefault,
754 default=dynamicdefault,
752 )
755 )
753 coreconfigitem('patch', 'eol',
756 coreconfigitem('patch', 'eol',
754 default='strict',
757 default='strict',
755 )
758 )
756 coreconfigitem('patch', 'fuzz',
759 coreconfigitem('patch', 'fuzz',
757 default=2,
760 default=2,
758 )
761 )
759 coreconfigitem('paths', 'default',
762 coreconfigitem('paths', 'default',
760 default=None,
763 default=None,
761 )
764 )
762 coreconfigitem('paths', 'default-push',
765 coreconfigitem('paths', 'default-push',
763 default=None,
766 default=None,
764 )
767 )
765 coreconfigitem('paths', '.*',
768 coreconfigitem('paths', '.*',
766 default=None,
769 default=None,
767 generic=True,
770 generic=True,
768 )
771 )
769 coreconfigitem('phases', 'checksubrepos',
772 coreconfigitem('phases', 'checksubrepos',
770 default='follow',
773 default='follow',
771 )
774 )
772 coreconfigitem('phases', 'new-commit',
775 coreconfigitem('phases', 'new-commit',
773 default='draft',
776 default='draft',
774 )
777 )
775 coreconfigitem('phases', 'publish',
778 coreconfigitem('phases', 'publish',
776 default=True,
779 default=True,
777 )
780 )
778 coreconfigitem('profiling', 'enabled',
781 coreconfigitem('profiling', 'enabled',
779 default=False,
782 default=False,
780 )
783 )
781 coreconfigitem('profiling', 'format',
784 coreconfigitem('profiling', 'format',
782 default='text',
785 default='text',
783 )
786 )
784 coreconfigitem('profiling', 'freq',
787 coreconfigitem('profiling', 'freq',
785 default=1000,
788 default=1000,
786 )
789 )
787 coreconfigitem('profiling', 'limit',
790 coreconfigitem('profiling', 'limit',
788 default=30,
791 default=30,
789 )
792 )
790 coreconfigitem('profiling', 'nested',
793 coreconfigitem('profiling', 'nested',
791 default=0,
794 default=0,
792 )
795 )
793 coreconfigitem('profiling', 'output',
796 coreconfigitem('profiling', 'output',
794 default=None,
797 default=None,
795 )
798 )
796 coreconfigitem('profiling', 'showmax',
799 coreconfigitem('profiling', 'showmax',
797 default=0.999,
800 default=0.999,
798 )
801 )
799 coreconfigitem('profiling', 'showmin',
802 coreconfigitem('profiling', 'showmin',
800 default=dynamicdefault,
803 default=dynamicdefault,
801 )
804 )
802 coreconfigitem('profiling', 'sort',
805 coreconfigitem('profiling', 'sort',
803 default='inlinetime',
806 default='inlinetime',
804 )
807 )
805 coreconfigitem('profiling', 'statformat',
808 coreconfigitem('profiling', 'statformat',
806 default='hotpath',
809 default='hotpath',
807 )
810 )
808 coreconfigitem('profiling', 'type',
811 coreconfigitem('profiling', 'type',
809 default='stat',
812 default='stat',
810 )
813 )
811 coreconfigitem('progress', 'assume-tty',
814 coreconfigitem('progress', 'assume-tty',
812 default=False,
815 default=False,
813 )
816 )
814 coreconfigitem('progress', 'changedelay',
817 coreconfigitem('progress', 'changedelay',
815 default=1,
818 default=1,
816 )
819 )
817 coreconfigitem('progress', 'clear-complete',
820 coreconfigitem('progress', 'clear-complete',
818 default=True,
821 default=True,
819 )
822 )
820 coreconfigitem('progress', 'debug',
823 coreconfigitem('progress', 'debug',
821 default=False,
824 default=False,
822 )
825 )
823 coreconfigitem('progress', 'delay',
826 coreconfigitem('progress', 'delay',
824 default=3,
827 default=3,
825 )
828 )
826 coreconfigitem('progress', 'disable',
829 coreconfigitem('progress', 'disable',
827 default=False,
830 default=False,
828 )
831 )
829 coreconfigitem('progress', 'estimateinterval',
832 coreconfigitem('progress', 'estimateinterval',
830 default=60.0,
833 default=60.0,
831 )
834 )
832 coreconfigitem('progress', 'format',
835 coreconfigitem('progress', 'format',
833 default=lambda: ['topic', 'bar', 'number', 'estimate'],
836 default=lambda: ['topic', 'bar', 'number', 'estimate'],
834 )
837 )
835 coreconfigitem('progress', 'refresh',
838 coreconfigitem('progress', 'refresh',
836 default=0.1,
839 default=0.1,
837 )
840 )
838 coreconfigitem('progress', 'width',
841 coreconfigitem('progress', 'width',
839 default=dynamicdefault,
842 default=dynamicdefault,
840 )
843 )
841 coreconfigitem('push', 'pushvars.server',
844 coreconfigitem('push', 'pushvars.server',
842 default=False,
845 default=False,
843 )
846 )
844 coreconfigitem('server', 'bookmarks-pushkey-compat',
847 coreconfigitem('server', 'bookmarks-pushkey-compat',
845 default=True,
848 default=True,
846 )
849 )
847 coreconfigitem('server', 'bundle1',
850 coreconfigitem('server', 'bundle1',
848 default=True,
851 default=True,
849 )
852 )
850 coreconfigitem('server', 'bundle1gd',
853 coreconfigitem('server', 'bundle1gd',
851 default=None,
854 default=None,
852 )
855 )
853 coreconfigitem('server', 'bundle1.pull',
856 coreconfigitem('server', 'bundle1.pull',
854 default=None,
857 default=None,
855 )
858 )
856 coreconfigitem('server', 'bundle1gd.pull',
859 coreconfigitem('server', 'bundle1gd.pull',
857 default=None,
860 default=None,
858 )
861 )
859 coreconfigitem('server', 'bundle1.push',
862 coreconfigitem('server', 'bundle1.push',
860 default=None,
863 default=None,
861 )
864 )
862 coreconfigitem('server', 'bundle1gd.push',
865 coreconfigitem('server', 'bundle1gd.push',
863 default=None,
866 default=None,
864 )
867 )
865 coreconfigitem('server', 'compressionengines',
868 coreconfigitem('server', 'compressionengines',
866 default=list,
869 default=list,
867 )
870 )
868 coreconfigitem('server', 'concurrent-push-mode',
871 coreconfigitem('server', 'concurrent-push-mode',
869 default='strict',
872 default='strict',
870 )
873 )
871 coreconfigitem('server', 'disablefullbundle',
874 coreconfigitem('server', 'disablefullbundle',
872 default=False,
875 default=False,
873 )
876 )
874 coreconfigitem('server', 'maxhttpheaderlen',
877 coreconfigitem('server', 'maxhttpheaderlen',
875 default=1024,
878 default=1024,
876 )
879 )
877 coreconfigitem('server', 'preferuncompressed',
880 coreconfigitem('server', 'preferuncompressed',
878 default=False,
881 default=False,
879 )
882 )
880 coreconfigitem('server', 'uncompressed',
883 coreconfigitem('server', 'uncompressed',
881 default=True,
884 default=True,
882 )
885 )
883 coreconfigitem('server', 'uncompressedallowsecret',
886 coreconfigitem('server', 'uncompressedallowsecret',
884 default=False,
887 default=False,
885 )
888 )
886 coreconfigitem('server', 'validate',
889 coreconfigitem('server', 'validate',
887 default=False,
890 default=False,
888 )
891 )
889 coreconfigitem('server', 'zliblevel',
892 coreconfigitem('server', 'zliblevel',
890 default=-1,
893 default=-1,
891 )
894 )
892 coreconfigitem('share', 'pool',
895 coreconfigitem('share', 'pool',
893 default=None,
896 default=None,
894 )
897 )
895 coreconfigitem('share', 'poolnaming',
898 coreconfigitem('share', 'poolnaming',
896 default='identity',
899 default='identity',
897 )
900 )
898 coreconfigitem('smtp', 'host',
901 coreconfigitem('smtp', 'host',
899 default=None,
902 default=None,
900 )
903 )
901 coreconfigitem('smtp', 'local_hostname',
904 coreconfigitem('smtp', 'local_hostname',
902 default=None,
905 default=None,
903 )
906 )
904 coreconfigitem('smtp', 'password',
907 coreconfigitem('smtp', 'password',
905 default=None,
908 default=None,
906 )
909 )
907 coreconfigitem('smtp', 'port',
910 coreconfigitem('smtp', 'port',
908 default=dynamicdefault,
911 default=dynamicdefault,
909 )
912 )
910 coreconfigitem('smtp', 'tls',
913 coreconfigitem('smtp', 'tls',
911 default='none',
914 default='none',
912 )
915 )
913 coreconfigitem('smtp', 'username',
916 coreconfigitem('smtp', 'username',
914 default=None,
917 default=None,
915 )
918 )
916 coreconfigitem('sparse', 'missingwarning',
919 coreconfigitem('sparse', 'missingwarning',
917 default=True,
920 default=True,
918 )
921 )
919 coreconfigitem('subrepos', 'allowed',
922 coreconfigitem('subrepos', 'allowed',
920 default=dynamicdefault, # to make backporting simpler
923 default=dynamicdefault, # to make backporting simpler
921 )
924 )
922 coreconfigitem('subrepos', 'hg:allowed',
925 coreconfigitem('subrepos', 'hg:allowed',
923 default=dynamicdefault,
926 default=dynamicdefault,
924 )
927 )
925 coreconfigitem('subrepos', 'git:allowed',
928 coreconfigitem('subrepos', 'git:allowed',
926 default=dynamicdefault,
929 default=dynamicdefault,
927 )
930 )
928 coreconfigitem('subrepos', 'svn:allowed',
931 coreconfigitem('subrepos', 'svn:allowed',
929 default=dynamicdefault,
932 default=dynamicdefault,
930 )
933 )
931 coreconfigitem('templates', '.*',
934 coreconfigitem('templates', '.*',
932 default=None,
935 default=None,
933 generic=True,
936 generic=True,
934 )
937 )
935 coreconfigitem('trusted', 'groups',
938 coreconfigitem('trusted', 'groups',
936 default=list,
939 default=list,
937 )
940 )
938 coreconfigitem('trusted', 'users',
941 coreconfigitem('trusted', 'users',
939 default=list,
942 default=list,
940 )
943 )
941 coreconfigitem('ui', '_usedassubrepo',
944 coreconfigitem('ui', '_usedassubrepo',
942 default=False,
945 default=False,
943 )
946 )
944 coreconfigitem('ui', 'allowemptycommit',
947 coreconfigitem('ui', 'allowemptycommit',
945 default=False,
948 default=False,
946 )
949 )
947 coreconfigitem('ui', 'archivemeta',
950 coreconfigitem('ui', 'archivemeta',
948 default=True,
951 default=True,
949 )
952 )
950 coreconfigitem('ui', 'askusername',
953 coreconfigitem('ui', 'askusername',
951 default=False,
954 default=False,
952 )
955 )
953 coreconfigitem('ui', 'clonebundlefallback',
956 coreconfigitem('ui', 'clonebundlefallback',
954 default=False,
957 default=False,
955 )
958 )
956 coreconfigitem('ui', 'clonebundleprefers',
959 coreconfigitem('ui', 'clonebundleprefers',
957 default=list,
960 default=list,
958 )
961 )
959 coreconfigitem('ui', 'clonebundles',
962 coreconfigitem('ui', 'clonebundles',
960 default=True,
963 default=True,
961 )
964 )
962 coreconfigitem('ui', 'color',
965 coreconfigitem('ui', 'color',
963 default='auto',
966 default='auto',
964 )
967 )
965 coreconfigitem('ui', 'commitsubrepos',
968 coreconfigitem('ui', 'commitsubrepos',
966 default=False,
969 default=False,
967 )
970 )
968 coreconfigitem('ui', 'debug',
971 coreconfigitem('ui', 'debug',
969 default=False,
972 default=False,
970 )
973 )
971 coreconfigitem('ui', 'debugger',
974 coreconfigitem('ui', 'debugger',
972 default=None,
975 default=None,
973 )
976 )
974 coreconfigitem('ui', 'editor',
977 coreconfigitem('ui', 'editor',
975 default=dynamicdefault,
978 default=dynamicdefault,
976 )
979 )
977 coreconfigitem('ui', 'fallbackencoding',
980 coreconfigitem('ui', 'fallbackencoding',
978 default=None,
981 default=None,
979 )
982 )
980 coreconfigitem('ui', 'forcecwd',
983 coreconfigitem('ui', 'forcecwd',
981 default=None,
984 default=None,
982 )
985 )
983 coreconfigitem('ui', 'forcemerge',
986 coreconfigitem('ui', 'forcemerge',
984 default=None,
987 default=None,
985 )
988 )
986 coreconfigitem('ui', 'formatdebug',
989 coreconfigitem('ui', 'formatdebug',
987 default=False,
990 default=False,
988 )
991 )
989 coreconfigitem('ui', 'formatjson',
992 coreconfigitem('ui', 'formatjson',
990 default=False,
993 default=False,
991 )
994 )
992 coreconfigitem('ui', 'formatted',
995 coreconfigitem('ui', 'formatted',
993 default=None,
996 default=None,
994 )
997 )
995 coreconfigitem('ui', 'graphnodetemplate',
998 coreconfigitem('ui', 'graphnodetemplate',
996 default=None,
999 default=None,
997 )
1000 )
998 coreconfigitem('ui', 'http2debuglevel',
1001 coreconfigitem('ui', 'http2debuglevel',
999 default=None,
1002 default=None,
1000 )
1003 )
1001 coreconfigitem('ui', 'interactive',
1004 coreconfigitem('ui', 'interactive',
1002 default=None,
1005 default=None,
1003 )
1006 )
1004 coreconfigitem('ui', 'interface',
1007 coreconfigitem('ui', 'interface',
1005 default=None,
1008 default=None,
1006 )
1009 )
1007 coreconfigitem('ui', 'interface.chunkselector',
1010 coreconfigitem('ui', 'interface.chunkselector',
1008 default=None,
1011 default=None,
1009 )
1012 )
1010 coreconfigitem('ui', 'logblockedtimes',
1013 coreconfigitem('ui', 'logblockedtimes',
1011 default=False,
1014 default=False,
1012 )
1015 )
1013 coreconfigitem('ui', 'logtemplate',
1016 coreconfigitem('ui', 'logtemplate',
1014 default=None,
1017 default=None,
1015 )
1018 )
1016 coreconfigitem('ui', 'merge',
1019 coreconfigitem('ui', 'merge',
1017 default=None,
1020 default=None,
1018 )
1021 )
1019 coreconfigitem('ui', 'mergemarkers',
1022 coreconfigitem('ui', 'mergemarkers',
1020 default='basic',
1023 default='basic',
1021 )
1024 )
1022 coreconfigitem('ui', 'mergemarkertemplate',
1025 coreconfigitem('ui', 'mergemarkertemplate',
1023 default=('{node|short} '
1026 default=('{node|short} '
1024 '{ifeq(tags, "tip", "", '
1027 '{ifeq(tags, "tip", "", '
1025 'ifeq(tags, "", "", "{tags} "))}'
1028 'ifeq(tags, "", "", "{tags} "))}'
1026 '{if(bookmarks, "{bookmarks} ")}'
1029 '{if(bookmarks, "{bookmarks} ")}'
1027 '{ifeq(branch, "default", "", "{branch} ")}'
1030 '{ifeq(branch, "default", "", "{branch} ")}'
1028 '- {author|user}: {desc|firstline}')
1031 '- {author|user}: {desc|firstline}')
1029 )
1032 )
1030 coreconfigitem('ui', 'nontty',
1033 coreconfigitem('ui', 'nontty',
1031 default=False,
1034 default=False,
1032 )
1035 )
1033 coreconfigitem('ui', 'origbackuppath',
1036 coreconfigitem('ui', 'origbackuppath',
1034 default=None,
1037 default=None,
1035 )
1038 )
1036 coreconfigitem('ui', 'paginate',
1039 coreconfigitem('ui', 'paginate',
1037 default=True,
1040 default=True,
1038 )
1041 )
1039 coreconfigitem('ui', 'patch',
1042 coreconfigitem('ui', 'patch',
1040 default=None,
1043 default=None,
1041 )
1044 )
1042 coreconfigitem('ui', 'portablefilenames',
1045 coreconfigitem('ui', 'portablefilenames',
1043 default='warn',
1046 default='warn',
1044 )
1047 )
1045 coreconfigitem('ui', 'promptecho',
1048 coreconfigitem('ui', 'promptecho',
1046 default=False,
1049 default=False,
1047 )
1050 )
1048 coreconfigitem('ui', 'quiet',
1051 coreconfigitem('ui', 'quiet',
1049 default=False,
1052 default=False,
1050 )
1053 )
1051 coreconfigitem('ui', 'quietbookmarkmove',
1054 coreconfigitem('ui', 'quietbookmarkmove',
1052 default=False,
1055 default=False,
1053 )
1056 )
1054 coreconfigitem('ui', 'remotecmd',
1057 coreconfigitem('ui', 'remotecmd',
1055 default='hg',
1058 default='hg',
1056 )
1059 )
1057 coreconfigitem('ui', 'report_untrusted',
1060 coreconfigitem('ui', 'report_untrusted',
1058 default=True,
1061 default=True,
1059 )
1062 )
1060 coreconfigitem('ui', 'rollback',
1063 coreconfigitem('ui', 'rollback',
1061 default=True,
1064 default=True,
1062 )
1065 )
1063 coreconfigitem('ui', 'slash',
1066 coreconfigitem('ui', 'slash',
1064 default=False,
1067 default=False,
1065 )
1068 )
1066 coreconfigitem('ui', 'ssh',
1069 coreconfigitem('ui', 'ssh',
1067 default='ssh',
1070 default='ssh',
1068 )
1071 )
1069 coreconfigitem('ui', 'ssherrorhint',
1072 coreconfigitem('ui', 'ssherrorhint',
1070 default=None,
1073 default=None,
1071 )
1074 )
1072 coreconfigitem('ui', 'statuscopies',
1075 coreconfigitem('ui', 'statuscopies',
1073 default=False,
1076 default=False,
1074 )
1077 )
1075 coreconfigitem('ui', 'strict',
1078 coreconfigitem('ui', 'strict',
1076 default=False,
1079 default=False,
1077 )
1080 )
1078 coreconfigitem('ui', 'style',
1081 coreconfigitem('ui', 'style',
1079 default='',
1082 default='',
1080 )
1083 )
1081 coreconfigitem('ui', 'supportcontact',
1084 coreconfigitem('ui', 'supportcontact',
1082 default=None,
1085 default=None,
1083 )
1086 )
1084 coreconfigitem('ui', 'textwidth',
1087 coreconfigitem('ui', 'textwidth',
1085 default=78,
1088 default=78,
1086 )
1089 )
1087 coreconfigitem('ui', 'timeout',
1090 coreconfigitem('ui', 'timeout',
1088 default='600',
1091 default='600',
1089 )
1092 )
1090 coreconfigitem('ui', 'timeout.warn',
1093 coreconfigitem('ui', 'timeout.warn',
1091 default=0,
1094 default=0,
1092 )
1095 )
1093 coreconfigitem('ui', 'traceback',
1096 coreconfigitem('ui', 'traceback',
1094 default=False,
1097 default=False,
1095 )
1098 )
1096 coreconfigitem('ui', 'tweakdefaults',
1099 coreconfigitem('ui', 'tweakdefaults',
1097 default=False,
1100 default=False,
1098 )
1101 )
1099 coreconfigitem('ui', 'usehttp2',
1102 coreconfigitem('ui', 'usehttp2',
1100 default=False,
1103 default=False,
1101 )
1104 )
1102 coreconfigitem('ui', 'username',
1105 coreconfigitem('ui', 'username',
1103 alias=[('ui', 'user')]
1106 alias=[('ui', 'user')]
1104 )
1107 )
1105 coreconfigitem('ui', 'verbose',
1108 coreconfigitem('ui', 'verbose',
1106 default=False,
1109 default=False,
1107 )
1110 )
1108 coreconfigitem('verify', 'skipflags',
1111 coreconfigitem('verify', 'skipflags',
1109 default=None,
1112 default=None,
1110 )
1113 )
1111 coreconfigitem('web', 'allowbz2',
1114 coreconfigitem('web', 'allowbz2',
1112 default=False,
1115 default=False,
1113 )
1116 )
1114 coreconfigitem('web', 'allowgz',
1117 coreconfigitem('web', 'allowgz',
1115 default=False,
1118 default=False,
1116 )
1119 )
1117 coreconfigitem('web', 'allow-pull',
1120 coreconfigitem('web', 'allow-pull',
1118 alias=[('web', 'allowpull')],
1121 alias=[('web', 'allowpull')],
1119 default=True,
1122 default=True,
1120 )
1123 )
1121 coreconfigitem('web', 'allow-push',
1124 coreconfigitem('web', 'allow-push',
1122 alias=[('web', 'allow_push')],
1125 alias=[('web', 'allow_push')],
1123 default=list,
1126 default=list,
1124 )
1127 )
1125 coreconfigitem('web', 'allowzip',
1128 coreconfigitem('web', 'allowzip',
1126 default=False,
1129 default=False,
1127 )
1130 )
1128 coreconfigitem('web', 'archivesubrepos',
1131 coreconfigitem('web', 'archivesubrepos',
1129 default=False,
1132 default=False,
1130 )
1133 )
1131 coreconfigitem('web', 'cache',
1134 coreconfigitem('web', 'cache',
1132 default=True,
1135 default=True,
1133 )
1136 )
1134 coreconfigitem('web', 'contact',
1137 coreconfigitem('web', 'contact',
1135 default=None,
1138 default=None,
1136 )
1139 )
1137 coreconfigitem('web', 'deny_push',
1140 coreconfigitem('web', 'deny_push',
1138 default=list,
1141 default=list,
1139 )
1142 )
1140 coreconfigitem('web', 'guessmime',
1143 coreconfigitem('web', 'guessmime',
1141 default=False,
1144 default=False,
1142 )
1145 )
1143 coreconfigitem('web', 'hidden',
1146 coreconfigitem('web', 'hidden',
1144 default=False,
1147 default=False,
1145 )
1148 )
1146 coreconfigitem('web', 'labels',
1149 coreconfigitem('web', 'labels',
1147 default=list,
1150 default=list,
1148 )
1151 )
1149 coreconfigitem('web', 'logoimg',
1152 coreconfigitem('web', 'logoimg',
1150 default='hglogo.png',
1153 default='hglogo.png',
1151 )
1154 )
1152 coreconfigitem('web', 'logourl',
1155 coreconfigitem('web', 'logourl',
1153 default='https://mercurial-scm.org/',
1156 default='https://mercurial-scm.org/',
1154 )
1157 )
1155 coreconfigitem('web', 'accesslog',
1158 coreconfigitem('web', 'accesslog',
1156 default='-',
1159 default='-',
1157 )
1160 )
1158 coreconfigitem('web', 'address',
1161 coreconfigitem('web', 'address',
1159 default='',
1162 default='',
1160 )
1163 )
1161 coreconfigitem('web', 'allow_archive',
1164 coreconfigitem('web', 'allow_archive',
1162 default=list,
1165 default=list,
1163 )
1166 )
1164 coreconfigitem('web', 'allow_read',
1167 coreconfigitem('web', 'allow_read',
1165 default=list,
1168 default=list,
1166 )
1169 )
1167 coreconfigitem('web', 'baseurl',
1170 coreconfigitem('web', 'baseurl',
1168 default=None,
1171 default=None,
1169 )
1172 )
1170 coreconfigitem('web', 'cacerts',
1173 coreconfigitem('web', 'cacerts',
1171 default=None,
1174 default=None,
1172 )
1175 )
1173 coreconfigitem('web', 'certificate',
1176 coreconfigitem('web', 'certificate',
1174 default=None,
1177 default=None,
1175 )
1178 )
1176 coreconfigitem('web', 'collapse',
1179 coreconfigitem('web', 'collapse',
1177 default=False,
1180 default=False,
1178 )
1181 )
1179 coreconfigitem('web', 'csp',
1182 coreconfigitem('web', 'csp',
1180 default=None,
1183 default=None,
1181 )
1184 )
1182 coreconfigitem('web', 'deny_read',
1185 coreconfigitem('web', 'deny_read',
1183 default=list,
1186 default=list,
1184 )
1187 )
1185 coreconfigitem('web', 'descend',
1188 coreconfigitem('web', 'descend',
1186 default=True,
1189 default=True,
1187 )
1190 )
1188 coreconfigitem('web', 'description',
1191 coreconfigitem('web', 'description',
1189 default="",
1192 default="",
1190 )
1193 )
1191 coreconfigitem('web', 'encoding',
1194 coreconfigitem('web', 'encoding',
1192 default=lambda: encoding.encoding,
1195 default=lambda: encoding.encoding,
1193 )
1196 )
1194 coreconfigitem('web', 'errorlog',
1197 coreconfigitem('web', 'errorlog',
1195 default='-',
1198 default='-',
1196 )
1199 )
1197 coreconfigitem('web', 'ipv6',
1200 coreconfigitem('web', 'ipv6',
1198 default=False,
1201 default=False,
1199 )
1202 )
1200 coreconfigitem('web', 'maxchanges',
1203 coreconfigitem('web', 'maxchanges',
1201 default=10,
1204 default=10,
1202 )
1205 )
1203 coreconfigitem('web', 'maxfiles',
1206 coreconfigitem('web', 'maxfiles',
1204 default=10,
1207 default=10,
1205 )
1208 )
1206 coreconfigitem('web', 'maxshortchanges',
1209 coreconfigitem('web', 'maxshortchanges',
1207 default=60,
1210 default=60,
1208 )
1211 )
1209 coreconfigitem('web', 'motd',
1212 coreconfigitem('web', 'motd',
1210 default='',
1213 default='',
1211 )
1214 )
1212 coreconfigitem('web', 'name',
1215 coreconfigitem('web', 'name',
1213 default=dynamicdefault,
1216 default=dynamicdefault,
1214 )
1217 )
1215 coreconfigitem('web', 'port',
1218 coreconfigitem('web', 'port',
1216 default=8000,
1219 default=8000,
1217 )
1220 )
1218 coreconfigitem('web', 'prefix',
1221 coreconfigitem('web', 'prefix',
1219 default='',
1222 default='',
1220 )
1223 )
1221 coreconfigitem('web', 'push_ssl',
1224 coreconfigitem('web', 'push_ssl',
1222 default=True,
1225 default=True,
1223 )
1226 )
1224 coreconfigitem('web', 'refreshinterval',
1227 coreconfigitem('web', 'refreshinterval',
1225 default=20,
1228 default=20,
1226 )
1229 )
1227 coreconfigitem('web', 'staticurl',
1230 coreconfigitem('web', 'staticurl',
1228 default=None,
1231 default=None,
1229 )
1232 )
1230 coreconfigitem('web', 'stripes',
1233 coreconfigitem('web', 'stripes',
1231 default=1,
1234 default=1,
1232 )
1235 )
1233 coreconfigitem('web', 'style',
1236 coreconfigitem('web', 'style',
1234 default='paper',
1237 default='paper',
1235 )
1238 )
1236 coreconfigitem('web', 'templates',
1239 coreconfigitem('web', 'templates',
1237 default=None,
1240 default=None,
1238 )
1241 )
1239 coreconfigitem('web', 'view',
1242 coreconfigitem('web', 'view',
1240 default='served',
1243 default='served',
1241 )
1244 )
1242 coreconfigitem('worker', 'backgroundclose',
1245 coreconfigitem('worker', 'backgroundclose',
1243 default=dynamicdefault,
1246 default=dynamicdefault,
1244 )
1247 )
1245 # Windows defaults to a limit of 512 open files. A buffer of 128
1248 # Windows defaults to a limit of 512 open files. A buffer of 128
1246 # should give us enough headway.
1249 # should give us enough headway.
1247 coreconfigitem('worker', 'backgroundclosemaxqueue',
1250 coreconfigitem('worker', 'backgroundclosemaxqueue',
1248 default=384,
1251 default=384,
1249 )
1252 )
1250 coreconfigitem('worker', 'backgroundcloseminfilecount',
1253 coreconfigitem('worker', 'backgroundcloseminfilecount',
1251 default=2048,
1254 default=2048,
1252 )
1255 )
1253 coreconfigitem('worker', 'backgroundclosethreadcount',
1256 coreconfigitem('worker', 'backgroundclosethreadcount',
1254 default=4,
1257 default=4,
1255 )
1258 )
1256 coreconfigitem('worker', 'numcpus',
1259 coreconfigitem('worker', 'numcpus',
1257 default=None,
1260 default=None,
1258 )
1261 )
1259
1262
1260 # Rebase related configuration moved to core because other extension are doing
1263 # Rebase related configuration moved to core because other extension are doing
1261 # strange things. For example, shelve import the extensions to reuse some bit
1264 # strange things. For example, shelve import the extensions to reuse some bit
1262 # without formally loading it.
1265 # without formally loading it.
1263 coreconfigitem('commands', 'rebase.requiredest',
1266 coreconfigitem('commands', 'rebase.requiredest',
1264 default=False,
1267 default=False,
1265 )
1268 )
1266 coreconfigitem('experimental', 'rebaseskipobsolete',
1269 coreconfigitem('experimental', 'rebaseskipobsolete',
1267 default=True,
1270 default=True,
1268 )
1271 )
1269 coreconfigitem('rebase', 'singletransaction',
1272 coreconfigitem('rebase', 'singletransaction',
1270 default=False,
1273 default=False,
1271 )
1274 )
@@ -1,491 +1,492 b''
1 # mdiff.py - diff and patch routines for mercurial
1 # mdiff.py - diff and patch routines for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import struct
11 import struct
12 import zlib
12 import zlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 error,
16 error,
17 policy,
17 policy,
18 pycompat,
18 pycompat,
19 util,
19 util,
20 )
20 )
21
21
22 bdiff = policy.importmod(r'bdiff')
22 bdiff = policy.importmod(r'bdiff')
23 mpatch = policy.importmod(r'mpatch')
23 mpatch = policy.importmod(r'mpatch')
24
24
25 blocks = bdiff.blocks
25 blocks = bdiff.blocks
26 fixws = bdiff.fixws
26 fixws = bdiff.fixws
27 patches = mpatch.patches
27 patches = mpatch.patches
28 patchedsize = mpatch.patchedsize
28 patchedsize = mpatch.patchedsize
29 textdiff = bdiff.bdiff
29 textdiff = bdiff.bdiff
30
30
31 def splitnewlines(text):
31 def splitnewlines(text):
32 '''like str.splitlines, but only split on newlines.'''
32 '''like str.splitlines, but only split on newlines.'''
33 lines = [l + '\n' for l in text.split('\n')]
33 lines = [l + '\n' for l in text.split('\n')]
34 if lines:
34 if lines:
35 if lines[-1] == '\n':
35 if lines[-1] == '\n':
36 lines.pop()
36 lines.pop()
37 else:
37 else:
38 lines[-1] = lines[-1][:-1]
38 lines[-1] = lines[-1][:-1]
39 return lines
39 return lines
40
40
41 class diffopts(object):
41 class diffopts(object):
42 '''context is the number of context lines
42 '''context is the number of context lines
43 text treats all files as text
43 text treats all files as text
44 showfunc enables diff -p output
44 showfunc enables diff -p output
45 git enables the git extended patch format
45 git enables the git extended patch format
46 nodates removes dates from diff headers
46 nodates removes dates from diff headers
47 nobinary ignores binary files
47 nobinary ignores binary files
48 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
48 noprefix disables the 'a/' and 'b/' prefixes (ignored in plain mode)
49 ignorews ignores all whitespace changes in the diff
49 ignorews ignores all whitespace changes in the diff
50 ignorewsamount ignores changes in the amount of whitespace
50 ignorewsamount ignores changes in the amount of whitespace
51 ignoreblanklines ignores changes whose lines are all blank
51 ignoreblanklines ignores changes whose lines are all blank
52 upgrade generates git diffs to avoid data loss
52 upgrade generates git diffs to avoid data loss
53 '''
53 '''
54
54
55 defaults = {
55 defaults = {
56 'context': 3,
56 'context': 3,
57 'text': False,
57 'text': False,
58 'showfunc': False,
58 'showfunc': False,
59 'git': False,
59 'git': False,
60 'nodates': False,
60 'nodates': False,
61 'nobinary': False,
61 'nobinary': False,
62 'noprefix': False,
62 'noprefix': False,
63 'index': 0,
63 'index': 0,
64 'ignorews': False,
64 'ignorews': False,
65 'ignorewsamount': False,
65 'ignorewsamount': False,
66 'ignorewseol': False,
66 'ignorewseol': False,
67 'ignoreblanklines': False,
67 'ignoreblanklines': False,
68 'upgrade': False,
68 'upgrade': False,
69 'showsimilarity': False,
69 'showsimilarity': False,
70 'worddiff': False,
70 }
71 }
71
72
72 def __init__(self, **opts):
73 def __init__(self, **opts):
73 opts = pycompat.byteskwargs(opts)
74 opts = pycompat.byteskwargs(opts)
74 for k in self.defaults.keys():
75 for k in self.defaults.keys():
75 v = opts.get(k)
76 v = opts.get(k)
76 if v is None:
77 if v is None:
77 v = self.defaults[k]
78 v = self.defaults[k]
78 setattr(self, k, v)
79 setattr(self, k, v)
79
80
80 try:
81 try:
81 self.context = int(self.context)
82 self.context = int(self.context)
82 except ValueError:
83 except ValueError:
83 raise error.Abort(_('diff context lines count must be '
84 raise error.Abort(_('diff context lines count must be '
84 'an integer, not %r') % self.context)
85 'an integer, not %r') % self.context)
85
86
86 def copy(self, **kwargs):
87 def copy(self, **kwargs):
87 opts = dict((k, getattr(self, k)) for k in self.defaults)
88 opts = dict((k, getattr(self, k)) for k in self.defaults)
88 opts = pycompat.strkwargs(opts)
89 opts = pycompat.strkwargs(opts)
89 opts.update(kwargs)
90 opts.update(kwargs)
90 return diffopts(**opts)
91 return diffopts(**opts)
91
92
92 defaultopts = diffopts()
93 defaultopts = diffopts()
93
94
94 def wsclean(opts, text, blank=True):
95 def wsclean(opts, text, blank=True):
95 if opts.ignorews:
96 if opts.ignorews:
96 text = bdiff.fixws(text, 1)
97 text = bdiff.fixws(text, 1)
97 elif opts.ignorewsamount:
98 elif opts.ignorewsamount:
98 text = bdiff.fixws(text, 0)
99 text = bdiff.fixws(text, 0)
99 if blank and opts.ignoreblanklines:
100 if blank and opts.ignoreblanklines:
100 text = re.sub('\n+', '\n', text).strip('\n')
101 text = re.sub('\n+', '\n', text).strip('\n')
101 if opts.ignorewseol:
102 if opts.ignorewseol:
102 text = re.sub(r'[ \t\r\f]+\n', r'\n', text)
103 text = re.sub(r'[ \t\r\f]+\n', r'\n', text)
103 return text
104 return text
104
105
105 def splitblock(base1, lines1, base2, lines2, opts):
106 def splitblock(base1, lines1, base2, lines2, opts):
106 # The input lines matches except for interwoven blank lines. We
107 # The input lines matches except for interwoven blank lines. We
107 # transform it into a sequence of matching blocks and blank blocks.
108 # transform it into a sequence of matching blocks and blank blocks.
108 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
109 lines1 = [(wsclean(opts, l) and 1 or 0) for l in lines1]
109 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
110 lines2 = [(wsclean(opts, l) and 1 or 0) for l in lines2]
110 s1, e1 = 0, len(lines1)
111 s1, e1 = 0, len(lines1)
111 s2, e2 = 0, len(lines2)
112 s2, e2 = 0, len(lines2)
112 while s1 < e1 or s2 < e2:
113 while s1 < e1 or s2 < e2:
113 i1, i2, btype = s1, s2, '='
114 i1, i2, btype = s1, s2, '='
114 if (i1 >= e1 or lines1[i1] == 0
115 if (i1 >= e1 or lines1[i1] == 0
115 or i2 >= e2 or lines2[i2] == 0):
116 or i2 >= e2 or lines2[i2] == 0):
116 # Consume the block of blank lines
117 # Consume the block of blank lines
117 btype = '~'
118 btype = '~'
118 while i1 < e1 and lines1[i1] == 0:
119 while i1 < e1 and lines1[i1] == 0:
119 i1 += 1
120 i1 += 1
120 while i2 < e2 and lines2[i2] == 0:
121 while i2 < e2 and lines2[i2] == 0:
121 i2 += 1
122 i2 += 1
122 else:
123 else:
123 # Consume the matching lines
124 # Consume the matching lines
124 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
125 while i1 < e1 and lines1[i1] == 1 and lines2[i2] == 1:
125 i1 += 1
126 i1 += 1
126 i2 += 1
127 i2 += 1
127 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
128 yield [base1 + s1, base1 + i1, base2 + s2, base2 + i2], btype
128 s1 = i1
129 s1 = i1
129 s2 = i2
130 s2 = i2
130
131
131 def hunkinrange(hunk, linerange):
132 def hunkinrange(hunk, linerange):
132 """Return True if `hunk` defined as (start, length) is in `linerange`
133 """Return True if `hunk` defined as (start, length) is in `linerange`
133 defined as (lowerbound, upperbound).
134 defined as (lowerbound, upperbound).
134
135
135 >>> hunkinrange((5, 10), (2, 7))
136 >>> hunkinrange((5, 10), (2, 7))
136 True
137 True
137 >>> hunkinrange((5, 10), (6, 12))
138 >>> hunkinrange((5, 10), (6, 12))
138 True
139 True
139 >>> hunkinrange((5, 10), (13, 17))
140 >>> hunkinrange((5, 10), (13, 17))
140 True
141 True
141 >>> hunkinrange((5, 10), (3, 17))
142 >>> hunkinrange((5, 10), (3, 17))
142 True
143 True
143 >>> hunkinrange((5, 10), (1, 3))
144 >>> hunkinrange((5, 10), (1, 3))
144 False
145 False
145 >>> hunkinrange((5, 10), (18, 20))
146 >>> hunkinrange((5, 10), (18, 20))
146 False
147 False
147 >>> hunkinrange((5, 10), (1, 5))
148 >>> hunkinrange((5, 10), (1, 5))
148 False
149 False
149 >>> hunkinrange((5, 10), (15, 27))
150 >>> hunkinrange((5, 10), (15, 27))
150 False
151 False
151 """
152 """
152 start, length = hunk
153 start, length = hunk
153 lowerbound, upperbound = linerange
154 lowerbound, upperbound = linerange
154 return lowerbound < start + length and start < upperbound
155 return lowerbound < start + length and start < upperbound
155
156
156 def blocksinrange(blocks, rangeb):
157 def blocksinrange(blocks, rangeb):
157 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
158 """filter `blocks` like (a1, a2, b1, b2) from items outside line range
158 `rangeb` from ``(b1, b2)`` point of view.
159 `rangeb` from ``(b1, b2)`` point of view.
159
160
160 Return `filteredblocks, rangea` where:
161 Return `filteredblocks, rangea` where:
161
162
162 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
163 * `filteredblocks` is list of ``block = (a1, a2, b1, b2), stype`` items of
163 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
164 `blocks` that are inside `rangeb` from ``(b1, b2)`` point of view; a
164 block ``(b1, b2)`` being inside `rangeb` if
165 block ``(b1, b2)`` being inside `rangeb` if
165 ``rangeb[0] < b2 and b1 < rangeb[1]``;
166 ``rangeb[0] < b2 and b1 < rangeb[1]``;
166 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
167 * `rangea` is the line range w.r.t. to ``(a1, a2)`` parts of `blocks`.
167 """
168 """
168 lbb, ubb = rangeb
169 lbb, ubb = rangeb
169 lba, uba = None, None
170 lba, uba = None, None
170 filteredblocks = []
171 filteredblocks = []
171 for block in blocks:
172 for block in blocks:
172 (a1, a2, b1, b2), stype = block
173 (a1, a2, b1, b2), stype = block
173 if lbb >= b1 and ubb <= b2 and stype == '=':
174 if lbb >= b1 and ubb <= b2 and stype == '=':
174 # rangeb is within a single "=" hunk, restrict back linerange1
175 # rangeb is within a single "=" hunk, restrict back linerange1
175 # by offsetting rangeb
176 # by offsetting rangeb
176 lba = lbb - b1 + a1
177 lba = lbb - b1 + a1
177 uba = ubb - b1 + a1
178 uba = ubb - b1 + a1
178 else:
179 else:
179 if b1 <= lbb < b2:
180 if b1 <= lbb < b2:
180 if stype == '=':
181 if stype == '=':
181 lba = a2 - (b2 - lbb)
182 lba = a2 - (b2 - lbb)
182 else:
183 else:
183 lba = a1
184 lba = a1
184 if b1 < ubb <= b2:
185 if b1 < ubb <= b2:
185 if stype == '=':
186 if stype == '=':
186 uba = a1 + (ubb - b1)
187 uba = a1 + (ubb - b1)
187 else:
188 else:
188 uba = a2
189 uba = a2
189 if hunkinrange((b1, (b2 - b1)), rangeb):
190 if hunkinrange((b1, (b2 - b1)), rangeb):
190 filteredblocks.append(block)
191 filteredblocks.append(block)
191 if lba is None or uba is None or uba < lba:
192 if lba is None or uba is None or uba < lba:
192 raise error.Abort(_('line range exceeds file size'))
193 raise error.Abort(_('line range exceeds file size'))
193 return filteredblocks, (lba, uba)
194 return filteredblocks, (lba, uba)
194
195
195 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
196 def allblocks(text1, text2, opts=None, lines1=None, lines2=None):
196 """Return (block, type) tuples, where block is an mdiff.blocks
197 """Return (block, type) tuples, where block is an mdiff.blocks
197 line entry. type is '=' for blocks matching exactly one another
198 line entry. type is '=' for blocks matching exactly one another
198 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
199 (bdiff blocks), '!' for non-matching blocks and '~' for blocks
199 matching only after having filtered blank lines.
200 matching only after having filtered blank lines.
200 line1 and line2 are text1 and text2 split with splitnewlines() if
201 line1 and line2 are text1 and text2 split with splitnewlines() if
201 they are already available.
202 they are already available.
202 """
203 """
203 if opts is None:
204 if opts is None:
204 opts = defaultopts
205 opts = defaultopts
205 if opts.ignorews or opts.ignorewsamount or opts.ignorewseol:
206 if opts.ignorews or opts.ignorewsamount or opts.ignorewseol:
206 text1 = wsclean(opts, text1, False)
207 text1 = wsclean(opts, text1, False)
207 text2 = wsclean(opts, text2, False)
208 text2 = wsclean(opts, text2, False)
208 diff = bdiff.blocks(text1, text2)
209 diff = bdiff.blocks(text1, text2)
209 for i, s1 in enumerate(diff):
210 for i, s1 in enumerate(diff):
210 # The first match is special.
211 # The first match is special.
211 # we've either found a match starting at line 0 or a match later
212 # we've either found a match starting at line 0 or a match later
212 # in the file. If it starts later, old and new below will both be
213 # in the file. If it starts later, old and new below will both be
213 # empty and we'll continue to the next match.
214 # empty and we'll continue to the next match.
214 if i > 0:
215 if i > 0:
215 s = diff[i - 1]
216 s = diff[i - 1]
216 else:
217 else:
217 s = [0, 0, 0, 0]
218 s = [0, 0, 0, 0]
218 s = [s[1], s1[0], s[3], s1[2]]
219 s = [s[1], s1[0], s[3], s1[2]]
219
220
220 # bdiff sometimes gives huge matches past eof, this check eats them,
221 # bdiff sometimes gives huge matches past eof, this check eats them,
221 # and deals with the special first match case described above
222 # and deals with the special first match case described above
222 if s[0] != s[1] or s[2] != s[3]:
223 if s[0] != s[1] or s[2] != s[3]:
223 type = '!'
224 type = '!'
224 if opts.ignoreblanklines:
225 if opts.ignoreblanklines:
225 if lines1 is None:
226 if lines1 is None:
226 lines1 = splitnewlines(text1)
227 lines1 = splitnewlines(text1)
227 if lines2 is None:
228 if lines2 is None:
228 lines2 = splitnewlines(text2)
229 lines2 = splitnewlines(text2)
229 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
230 old = wsclean(opts, "".join(lines1[s[0]:s[1]]))
230 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
231 new = wsclean(opts, "".join(lines2[s[2]:s[3]]))
231 if old == new:
232 if old == new:
232 type = '~'
233 type = '~'
233 yield s, type
234 yield s, type
234 yield s1, '='
235 yield s1, '='
235
236
236 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
237 def unidiff(a, ad, b, bd, fn1, fn2, opts=defaultopts):
237 """Return a unified diff as a (headers, hunks) tuple.
238 """Return a unified diff as a (headers, hunks) tuple.
238
239
239 If the diff is not null, `headers` is a list with unified diff header
240 If the diff is not null, `headers` is a list with unified diff header
240 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
241 lines "--- <original>" and "+++ <new>" and `hunks` is a generator yielding
241 (hunkrange, hunklines) coming from _unidiff().
242 (hunkrange, hunklines) coming from _unidiff().
242 Otherwise, `headers` and `hunks` are empty.
243 Otherwise, `headers` and `hunks` are empty.
243 """
244 """
244 def datetag(date, fn=None):
245 def datetag(date, fn=None):
245 if not opts.git and not opts.nodates:
246 if not opts.git and not opts.nodates:
246 return '\t%s' % date
247 return '\t%s' % date
247 if fn and ' ' in fn:
248 if fn and ' ' in fn:
248 return '\t'
249 return '\t'
249 return ''
250 return ''
250
251
251 sentinel = [], ()
252 sentinel = [], ()
252 if not a and not b:
253 if not a and not b:
253 return sentinel
254 return sentinel
254
255
255 if opts.noprefix:
256 if opts.noprefix:
256 aprefix = bprefix = ''
257 aprefix = bprefix = ''
257 else:
258 else:
258 aprefix = 'a/'
259 aprefix = 'a/'
259 bprefix = 'b/'
260 bprefix = 'b/'
260
261
261 epoch = util.datestr((0, 0))
262 epoch = util.datestr((0, 0))
262
263
263 fn1 = util.pconvert(fn1)
264 fn1 = util.pconvert(fn1)
264 fn2 = util.pconvert(fn2)
265 fn2 = util.pconvert(fn2)
265
266
266 def checknonewline(lines):
267 def checknonewline(lines):
267 for text in lines:
268 for text in lines:
268 if text[-1:] != '\n':
269 if text[-1:] != '\n':
269 text += "\n\ No newline at end of file\n"
270 text += "\n\ No newline at end of file\n"
270 yield text
271 yield text
271
272
272 if not opts.text and (util.binary(a) or util.binary(b)):
273 if not opts.text and (util.binary(a) or util.binary(b)):
273 if a and b and len(a) == len(b) and a == b:
274 if a and b and len(a) == len(b) and a == b:
274 return sentinel
275 return sentinel
275 headerlines = []
276 headerlines = []
276 hunks = (None, ['Binary file %s has changed\n' % fn1]),
277 hunks = (None, ['Binary file %s has changed\n' % fn1]),
277 elif not a:
278 elif not a:
278 b = splitnewlines(b)
279 b = splitnewlines(b)
279 if a is None:
280 if a is None:
280 l1 = '--- /dev/null%s' % datetag(epoch)
281 l1 = '--- /dev/null%s' % datetag(epoch)
281 else:
282 else:
282 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
283 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
283 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
284 l2 = "+++ %s%s" % (bprefix + fn2, datetag(bd, fn2))
284 headerlines = [l1, l2]
285 headerlines = [l1, l2]
285 size = len(b)
286 size = len(b)
286 hunkrange = (0, 0, 1, size)
287 hunkrange = (0, 0, 1, size)
287 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
288 hunklines = ["@@ -0,0 +1,%d @@\n" % size] + ["+" + e for e in b]
288 hunks = (hunkrange, checknonewline(hunklines)),
289 hunks = (hunkrange, checknonewline(hunklines)),
289 elif not b:
290 elif not b:
290 a = splitnewlines(a)
291 a = splitnewlines(a)
291 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
292 l1 = "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1))
292 if b is None:
293 if b is None:
293 l2 = '+++ /dev/null%s' % datetag(epoch)
294 l2 = '+++ /dev/null%s' % datetag(epoch)
294 else:
295 else:
295 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
296 l2 = "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2))
296 headerlines = [l1, l2]
297 headerlines = [l1, l2]
297 size = len(a)
298 size = len(a)
298 hunkrange = (1, size, 0, 0)
299 hunkrange = (1, size, 0, 0)
299 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
300 hunklines = ["@@ -1,%d +0,0 @@\n" % size] + ["-" + e for e in a]
300 hunks = (hunkrange, checknonewline(hunklines)),
301 hunks = (hunkrange, checknonewline(hunklines)),
301 else:
302 else:
302 diffhunks = _unidiff(a, b, opts=opts)
303 diffhunks = _unidiff(a, b, opts=opts)
303 try:
304 try:
304 hunkrange, hunklines = next(diffhunks)
305 hunkrange, hunklines = next(diffhunks)
305 except StopIteration:
306 except StopIteration:
306 return sentinel
307 return sentinel
307
308
308 headerlines = [
309 headerlines = [
309 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
310 "--- %s%s%s" % (aprefix, fn1, datetag(ad, fn1)),
310 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
311 "+++ %s%s%s" % (bprefix, fn2, datetag(bd, fn2)),
311 ]
312 ]
312 def rewindhunks():
313 def rewindhunks():
313 yield hunkrange, checknonewline(hunklines)
314 yield hunkrange, checknonewline(hunklines)
314 for hr, hl in diffhunks:
315 for hr, hl in diffhunks:
315 yield hr, checknonewline(hl)
316 yield hr, checknonewline(hl)
316
317
317 hunks = rewindhunks()
318 hunks = rewindhunks()
318
319
319 return headerlines, hunks
320 return headerlines, hunks
320
321
321 def _unidiff(t1, t2, opts=defaultopts):
322 def _unidiff(t1, t2, opts=defaultopts):
322 """Yield hunks of a headerless unified diff from t1 and t2 texts.
323 """Yield hunks of a headerless unified diff from t1 and t2 texts.
323
324
324 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
325 Each hunk consists of a (hunkrange, hunklines) tuple where `hunkrange` is a
325 tuple (s1, l1, s2, l2) representing the range information of the hunk to
326 tuple (s1, l1, s2, l2) representing the range information of the hunk to
326 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
327 form the '@@ -s1,l1 +s2,l2 @@' header and `hunklines` is a list of lines
327 of the hunk combining said header followed by line additions and
328 of the hunk combining said header followed by line additions and
328 deletions.
329 deletions.
329 """
330 """
330 l1 = splitnewlines(t1)
331 l1 = splitnewlines(t1)
331 l2 = splitnewlines(t2)
332 l2 = splitnewlines(t2)
332 def contextend(l, len):
333 def contextend(l, len):
333 ret = l + opts.context
334 ret = l + opts.context
334 if ret > len:
335 if ret > len:
335 ret = len
336 ret = len
336 return ret
337 return ret
337
338
338 def contextstart(l):
339 def contextstart(l):
339 ret = l - opts.context
340 ret = l - opts.context
340 if ret < 0:
341 if ret < 0:
341 return 0
342 return 0
342 return ret
343 return ret
343
344
344 lastfunc = [0, '']
345 lastfunc = [0, '']
345 def yieldhunk(hunk):
346 def yieldhunk(hunk):
346 (astart, a2, bstart, b2, delta) = hunk
347 (astart, a2, bstart, b2, delta) = hunk
347 aend = contextend(a2, len(l1))
348 aend = contextend(a2, len(l1))
348 alen = aend - astart
349 alen = aend - astart
349 blen = b2 - bstart + aend - a2
350 blen = b2 - bstart + aend - a2
350
351
351 func = ""
352 func = ""
352 if opts.showfunc:
353 if opts.showfunc:
353 lastpos, func = lastfunc
354 lastpos, func = lastfunc
354 # walk backwards from the start of the context up to the start of
355 # walk backwards from the start of the context up to the start of
355 # the previous hunk context until we find a line starting with an
356 # the previous hunk context until we find a line starting with an
356 # alphanumeric char.
357 # alphanumeric char.
357 for i in xrange(astart - 1, lastpos - 1, -1):
358 for i in xrange(astart - 1, lastpos - 1, -1):
358 if l1[i][0].isalnum():
359 if l1[i][0].isalnum():
359 func = ' ' + l1[i].rstrip()[:40]
360 func = ' ' + l1[i].rstrip()[:40]
360 lastfunc[1] = func
361 lastfunc[1] = func
361 break
362 break
362 # by recording this hunk's starting point as the next place to
363 # by recording this hunk's starting point as the next place to
363 # start looking for function lines, we avoid reading any line in
364 # start looking for function lines, we avoid reading any line in
364 # the file more than once.
365 # the file more than once.
365 lastfunc[0] = astart
366 lastfunc[0] = astart
366
367
367 # zero-length hunk ranges report their start line as one less
368 # zero-length hunk ranges report their start line as one less
368 if alen:
369 if alen:
369 astart += 1
370 astart += 1
370 if blen:
371 if blen:
371 bstart += 1
372 bstart += 1
372
373
373 hunkrange = astart, alen, bstart, blen
374 hunkrange = astart, alen, bstart, blen
374 hunklines = (
375 hunklines = (
375 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
376 ["@@ -%d,%d +%d,%d @@%s\n" % (hunkrange + (func,))]
376 + delta
377 + delta
377 + [' ' + l1[x] for x in xrange(a2, aend)]
378 + [' ' + l1[x] for x in xrange(a2, aend)]
378 )
379 )
379 yield hunkrange, hunklines
380 yield hunkrange, hunklines
380
381
381 # bdiff.blocks gives us the matching sequences in the files. The loop
382 # bdiff.blocks gives us the matching sequences in the files. The loop
382 # below finds the spaces between those matching sequences and translates
383 # below finds the spaces between those matching sequences and translates
383 # them into diff output.
384 # them into diff output.
384 #
385 #
385 hunk = None
386 hunk = None
386 ignoredlines = 0
387 ignoredlines = 0
387 for s, stype in allblocks(t1, t2, opts, l1, l2):
388 for s, stype in allblocks(t1, t2, opts, l1, l2):
388 a1, a2, b1, b2 = s
389 a1, a2, b1, b2 = s
389 if stype != '!':
390 if stype != '!':
390 if stype == '~':
391 if stype == '~':
391 # The diff context lines are based on t1 content. When
392 # The diff context lines are based on t1 content. When
392 # blank lines are ignored, the new lines offsets must
393 # blank lines are ignored, the new lines offsets must
393 # be adjusted as if equivalent blocks ('~') had the
394 # be adjusted as if equivalent blocks ('~') had the
394 # same sizes on both sides.
395 # same sizes on both sides.
395 ignoredlines += (b2 - b1) - (a2 - a1)
396 ignoredlines += (b2 - b1) - (a2 - a1)
396 continue
397 continue
397 delta = []
398 delta = []
398 old = l1[a1:a2]
399 old = l1[a1:a2]
399 new = l2[b1:b2]
400 new = l2[b1:b2]
400
401
401 b1 -= ignoredlines
402 b1 -= ignoredlines
402 b2 -= ignoredlines
403 b2 -= ignoredlines
403 astart = contextstart(a1)
404 astart = contextstart(a1)
404 bstart = contextstart(b1)
405 bstart = contextstart(b1)
405 prev = None
406 prev = None
406 if hunk:
407 if hunk:
407 # join with the previous hunk if it falls inside the context
408 # join with the previous hunk if it falls inside the context
408 if astart < hunk[1] + opts.context + 1:
409 if astart < hunk[1] + opts.context + 1:
409 prev = hunk
410 prev = hunk
410 astart = hunk[1]
411 astart = hunk[1]
411 bstart = hunk[3]
412 bstart = hunk[3]
412 else:
413 else:
413 for x in yieldhunk(hunk):
414 for x in yieldhunk(hunk):
414 yield x
415 yield x
415 if prev:
416 if prev:
416 # we've joined the previous hunk, record the new ending points.
417 # we've joined the previous hunk, record the new ending points.
417 hunk[1] = a2
418 hunk[1] = a2
418 hunk[3] = b2
419 hunk[3] = b2
419 delta = hunk[4]
420 delta = hunk[4]
420 else:
421 else:
421 # create a new hunk
422 # create a new hunk
422 hunk = [astart, a2, bstart, b2, delta]
423 hunk = [astart, a2, bstart, b2, delta]
423
424
424 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
425 delta[len(delta):] = [' ' + x for x in l1[astart:a1]]
425 delta[len(delta):] = ['-' + x for x in old]
426 delta[len(delta):] = ['-' + x for x in old]
426 delta[len(delta):] = ['+' + x for x in new]
427 delta[len(delta):] = ['+' + x for x in new]
427
428
428 if hunk:
429 if hunk:
429 for x in yieldhunk(hunk):
430 for x in yieldhunk(hunk):
430 yield x
431 yield x
431
432
432 def b85diff(to, tn):
433 def b85diff(to, tn):
433 '''print base85-encoded binary diff'''
434 '''print base85-encoded binary diff'''
434 def fmtline(line):
435 def fmtline(line):
435 l = len(line)
436 l = len(line)
436 if l <= 26:
437 if l <= 26:
437 l = chr(ord('A') + l - 1)
438 l = chr(ord('A') + l - 1)
438 else:
439 else:
439 l = chr(l - 26 + ord('a') - 1)
440 l = chr(l - 26 + ord('a') - 1)
440 return '%c%s\n' % (l, util.b85encode(line, True))
441 return '%c%s\n' % (l, util.b85encode(line, True))
441
442
442 def chunk(text, csize=52):
443 def chunk(text, csize=52):
443 l = len(text)
444 l = len(text)
444 i = 0
445 i = 0
445 while i < l:
446 while i < l:
446 yield text[i:i + csize]
447 yield text[i:i + csize]
447 i += csize
448 i += csize
448
449
449 if to is None:
450 if to is None:
450 to = ''
451 to = ''
451 if tn is None:
452 if tn is None:
452 tn = ''
453 tn = ''
453
454
454 if to == tn:
455 if to == tn:
455 return ''
456 return ''
456
457
457 # TODO: deltas
458 # TODO: deltas
458 ret = []
459 ret = []
459 ret.append('GIT binary patch\n')
460 ret.append('GIT binary patch\n')
460 ret.append('literal %d\n' % len(tn))
461 ret.append('literal %d\n' % len(tn))
461 for l in chunk(zlib.compress(tn)):
462 for l in chunk(zlib.compress(tn)):
462 ret.append(fmtline(l))
463 ret.append(fmtline(l))
463 ret.append('\n')
464 ret.append('\n')
464
465
465 return ''.join(ret)
466 return ''.join(ret)
466
467
467 def patchtext(bin):
468 def patchtext(bin):
468 pos = 0
469 pos = 0
469 t = []
470 t = []
470 while pos < len(bin):
471 while pos < len(bin):
471 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
472 p1, p2, l = struct.unpack(">lll", bin[pos:pos + 12])
472 pos += 12
473 pos += 12
473 t.append(bin[pos:pos + l])
474 t.append(bin[pos:pos + l])
474 pos += l
475 pos += l
475 return "".join(t)
476 return "".join(t)
476
477
477 def patch(a, bin):
478 def patch(a, bin):
478 if len(a) == 0:
479 if len(a) == 0:
479 # skip over trivial delta header
480 # skip over trivial delta header
480 return util.buffer(bin, 12)
481 return util.buffer(bin, 12)
481 return mpatch.patches(a, [bin])
482 return mpatch.patches(a, [bin])
482
483
483 # similar to difflib.SequenceMatcher.get_matching_blocks
484 # similar to difflib.SequenceMatcher.get_matching_blocks
484 def get_matching_blocks(a, b):
485 def get_matching_blocks(a, b):
485 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
486 return [(d[0], d[2], d[1] - d[0]) for d in bdiff.blocks(a, b)]
486
487
487 def trivialdiffheader(length):
488 def trivialdiffheader(length):
488 return struct.pack(">lll", 0, 0, length) if length else ''
489 return struct.pack(">lll", 0, 0, length) if length else ''
489
490
490 def replacediffheader(oldlen, newlen):
491 def replacediffheader(oldlen, newlen):
491 return struct.pack(">lll", 0, oldlen, newlen)
492 return struct.pack(">lll", 0, oldlen, newlen)
@@ -1,2816 +1,2895 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import copy
12 import copy
13 import difflib
13 import email
14 import email
14 import errno
15 import errno
15 import hashlib
16 import hashlib
16 import os
17 import os
17 import posixpath
18 import posixpath
18 import re
19 import re
19 import shutil
20 import shutil
20 import tempfile
21 import tempfile
21 import zlib
22 import zlib
22
23
23 from .i18n import _
24 from .i18n import _
24 from .node import (
25 from .node import (
25 hex,
26 hex,
26 short,
27 short,
27 )
28 )
28 from . import (
29 from . import (
29 copies,
30 copies,
30 encoding,
31 encoding,
31 error,
32 error,
32 mail,
33 mail,
33 mdiff,
34 mdiff,
34 pathutil,
35 pathutil,
35 policy,
36 policy,
36 pycompat,
37 pycompat,
37 scmutil,
38 scmutil,
38 similar,
39 similar,
39 util,
40 util,
40 vfs as vfsmod,
41 vfs as vfsmod,
41 )
42 )
42
43
43 diffhelpers = policy.importmod(r'diffhelpers')
44 diffhelpers = policy.importmod(r'diffhelpers')
44 stringio = util.stringio
45 stringio = util.stringio
45
46
46 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
47 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48 tabsplitter = re.compile(br'(\t+|[^\t]+)')
48
49
49 PatchError = error.PatchError
50 PatchError = error.PatchError
50
51
51 # public functions
52 # public functions
52
53
53 def split(stream):
54 def split(stream):
54 '''return an iterator of individual patches from a stream'''
55 '''return an iterator of individual patches from a stream'''
55 def isheader(line, inheader):
56 def isheader(line, inheader):
56 if inheader and line[0] in (' ', '\t'):
57 if inheader and line[0] in (' ', '\t'):
57 # continuation
58 # continuation
58 return True
59 return True
59 if line[0] in (' ', '-', '+'):
60 if line[0] in (' ', '-', '+'):
60 # diff line - don't check for header pattern in there
61 # diff line - don't check for header pattern in there
61 return False
62 return False
62 l = line.split(': ', 1)
63 l = line.split(': ', 1)
63 return len(l) == 2 and ' ' not in l[0]
64 return len(l) == 2 and ' ' not in l[0]
64
65
65 def chunk(lines):
66 def chunk(lines):
66 return stringio(''.join(lines))
67 return stringio(''.join(lines))
67
68
68 def hgsplit(stream, cur):
69 def hgsplit(stream, cur):
69 inheader = True
70 inheader = True
70
71
71 for line in stream:
72 for line in stream:
72 if not line.strip():
73 if not line.strip():
73 inheader = False
74 inheader = False
74 if not inheader and line.startswith('# HG changeset patch'):
75 if not inheader and line.startswith('# HG changeset patch'):
75 yield chunk(cur)
76 yield chunk(cur)
76 cur = []
77 cur = []
77 inheader = True
78 inheader = True
78
79
79 cur.append(line)
80 cur.append(line)
80
81
81 if cur:
82 if cur:
82 yield chunk(cur)
83 yield chunk(cur)
83
84
84 def mboxsplit(stream, cur):
85 def mboxsplit(stream, cur):
85 for line in stream:
86 for line in stream:
86 if line.startswith('From '):
87 if line.startswith('From '):
87 for c in split(chunk(cur[1:])):
88 for c in split(chunk(cur[1:])):
88 yield c
89 yield c
89 cur = []
90 cur = []
90
91
91 cur.append(line)
92 cur.append(line)
92
93
93 if cur:
94 if cur:
94 for c in split(chunk(cur[1:])):
95 for c in split(chunk(cur[1:])):
95 yield c
96 yield c
96
97
97 def mimesplit(stream, cur):
98 def mimesplit(stream, cur):
98 def msgfp(m):
99 def msgfp(m):
99 fp = stringio()
100 fp = stringio()
100 g = email.Generator.Generator(fp, mangle_from_=False)
101 g = email.Generator.Generator(fp, mangle_from_=False)
101 g.flatten(m)
102 g.flatten(m)
102 fp.seek(0)
103 fp.seek(0)
103 return fp
104 return fp
104
105
105 for line in stream:
106 for line in stream:
106 cur.append(line)
107 cur.append(line)
107 c = chunk(cur)
108 c = chunk(cur)
108
109
109 m = email.Parser.Parser().parse(c)
110 m = email.Parser.Parser().parse(c)
110 if not m.is_multipart():
111 if not m.is_multipart():
111 yield msgfp(m)
112 yield msgfp(m)
112 else:
113 else:
113 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
114 for part in m.walk():
115 for part in m.walk():
115 ct = part.get_content_type()
116 ct = part.get_content_type()
116 if ct not in ok_types:
117 if ct not in ok_types:
117 continue
118 continue
118 yield msgfp(part)
119 yield msgfp(part)
119
120
120 def headersplit(stream, cur):
121 def headersplit(stream, cur):
121 inheader = False
122 inheader = False
122
123
123 for line in stream:
124 for line in stream:
124 if not inheader and isheader(line, inheader):
125 if not inheader and isheader(line, inheader):
125 yield chunk(cur)
126 yield chunk(cur)
126 cur = []
127 cur = []
127 inheader = True
128 inheader = True
128 if inheader and not isheader(line, inheader):
129 if inheader and not isheader(line, inheader):
129 inheader = False
130 inheader = False
130
131
131 cur.append(line)
132 cur.append(line)
132
133
133 if cur:
134 if cur:
134 yield chunk(cur)
135 yield chunk(cur)
135
136
136 def remainder(cur):
137 def remainder(cur):
137 yield chunk(cur)
138 yield chunk(cur)
138
139
139 class fiter(object):
140 class fiter(object):
140 def __init__(self, fp):
141 def __init__(self, fp):
141 self.fp = fp
142 self.fp = fp
142
143
143 def __iter__(self):
144 def __iter__(self):
144 return self
145 return self
145
146
146 def next(self):
147 def next(self):
147 l = self.fp.readline()
148 l = self.fp.readline()
148 if not l:
149 if not l:
149 raise StopIteration
150 raise StopIteration
150 return l
151 return l
151
152
152 __next__ = next
153 __next__ = next
153
154
154 inheader = False
155 inheader = False
155 cur = []
156 cur = []
156
157
157 mimeheaders = ['content-type']
158 mimeheaders = ['content-type']
158
159
159 if not util.safehasattr(stream, 'next'):
160 if not util.safehasattr(stream, 'next'):
160 # http responses, for example, have readline but not next
161 # http responses, for example, have readline but not next
161 stream = fiter(stream)
162 stream = fiter(stream)
162
163
163 for line in stream:
164 for line in stream:
164 cur.append(line)
165 cur.append(line)
165 if line.startswith('# HG changeset patch'):
166 if line.startswith('# HG changeset patch'):
166 return hgsplit(stream, cur)
167 return hgsplit(stream, cur)
167 elif line.startswith('From '):
168 elif line.startswith('From '):
168 return mboxsplit(stream, cur)
169 return mboxsplit(stream, cur)
169 elif isheader(line, inheader):
170 elif isheader(line, inheader):
170 inheader = True
171 inheader = True
171 if line.split(':', 1)[0].lower() in mimeheaders:
172 if line.split(':', 1)[0].lower() in mimeheaders:
172 # let email parser handle this
173 # let email parser handle this
173 return mimesplit(stream, cur)
174 return mimesplit(stream, cur)
174 elif line.startswith('--- ') and inheader:
175 elif line.startswith('--- ') and inheader:
175 # No evil headers seen by diff start, split by hand
176 # No evil headers seen by diff start, split by hand
176 return headersplit(stream, cur)
177 return headersplit(stream, cur)
177 # Not enough info, keep reading
178 # Not enough info, keep reading
178
179
179 # if we are here, we have a very plain patch
180 # if we are here, we have a very plain patch
180 return remainder(cur)
181 return remainder(cur)
181
182
182 ## Some facility for extensible patch parsing:
183 ## Some facility for extensible patch parsing:
183 # list of pairs ("header to match", "data key")
184 # list of pairs ("header to match", "data key")
184 patchheadermap = [('Date', 'date'),
185 patchheadermap = [('Date', 'date'),
185 ('Branch', 'branch'),
186 ('Branch', 'branch'),
186 ('Node ID', 'nodeid'),
187 ('Node ID', 'nodeid'),
187 ]
188 ]
188
189
189 def extract(ui, fileobj):
190 def extract(ui, fileobj):
190 '''extract patch from data read from fileobj.
191 '''extract patch from data read from fileobj.
191
192
192 patch can be a normal patch or contained in an email message.
193 patch can be a normal patch or contained in an email message.
193
194
194 return a dictionary. Standard keys are:
195 return a dictionary. Standard keys are:
195 - filename,
196 - filename,
196 - message,
197 - message,
197 - user,
198 - user,
198 - date,
199 - date,
199 - branch,
200 - branch,
200 - node,
201 - node,
201 - p1,
202 - p1,
202 - p2.
203 - p2.
203 Any item can be missing from the dictionary. If filename is missing,
204 Any item can be missing from the dictionary. If filename is missing,
204 fileobj did not contain a patch. Caller must unlink filename when done.'''
205 fileobj did not contain a patch. Caller must unlink filename when done.'''
205
206
206 # attempt to detect the start of a patch
207 # attempt to detect the start of a patch
207 # (this heuristic is borrowed from quilt)
208 # (this heuristic is borrowed from quilt)
208 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
209 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
209 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
210 br'---[ \t].*?^\+\+\+[ \t]|'
211 br'---[ \t].*?^\+\+\+[ \t]|'
211 br'\*\*\*[ \t].*?^---[ \t])',
212 br'\*\*\*[ \t].*?^---[ \t])',
212 re.MULTILINE | re.DOTALL)
213 re.MULTILINE | re.DOTALL)
213
214
214 data = {}
215 data = {}
215 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
216 fd, tmpname = tempfile.mkstemp(prefix='hg-patch-')
216 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
217 tmpfp = os.fdopen(fd, pycompat.sysstr('w'))
217 try:
218 try:
218 msg = email.Parser.Parser().parse(fileobj)
219 msg = email.Parser.Parser().parse(fileobj)
219
220
220 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
221 subject = msg['Subject'] and mail.headdecode(msg['Subject'])
221 data['user'] = msg['From'] and mail.headdecode(msg['From'])
222 data['user'] = msg['From'] and mail.headdecode(msg['From'])
222 if not subject and not data['user']:
223 if not subject and not data['user']:
223 # Not an email, restore parsed headers if any
224 # Not an email, restore parsed headers if any
224 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
225 subject = '\n'.join(': '.join(h) for h in msg.items()) + '\n'
225
226
226 # should try to parse msg['Date']
227 # should try to parse msg['Date']
227 parents = []
228 parents = []
228
229
229 if subject:
230 if subject:
230 if subject.startswith('[PATCH'):
231 if subject.startswith('[PATCH'):
231 pend = subject.find(']')
232 pend = subject.find(']')
232 if pend >= 0:
233 if pend >= 0:
233 subject = subject[pend + 1:].lstrip()
234 subject = subject[pend + 1:].lstrip()
234 subject = re.sub(br'\n[ \t]+', ' ', subject)
235 subject = re.sub(br'\n[ \t]+', ' ', subject)
235 ui.debug('Subject: %s\n' % subject)
236 ui.debug('Subject: %s\n' % subject)
236 if data['user']:
237 if data['user']:
237 ui.debug('From: %s\n' % data['user'])
238 ui.debug('From: %s\n' % data['user'])
238 diffs_seen = 0
239 diffs_seen = 0
239 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
240 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
240 message = ''
241 message = ''
241 for part in msg.walk():
242 for part in msg.walk():
242 content_type = part.get_content_type()
243 content_type = part.get_content_type()
243 ui.debug('Content-Type: %s\n' % content_type)
244 ui.debug('Content-Type: %s\n' % content_type)
244 if content_type not in ok_types:
245 if content_type not in ok_types:
245 continue
246 continue
246 payload = part.get_payload(decode=True)
247 payload = part.get_payload(decode=True)
247 m = diffre.search(payload)
248 m = diffre.search(payload)
248 if m:
249 if m:
249 hgpatch = False
250 hgpatch = False
250 hgpatchheader = False
251 hgpatchheader = False
251 ignoretext = False
252 ignoretext = False
252
253
253 ui.debug('found patch at byte %d\n' % m.start(0))
254 ui.debug('found patch at byte %d\n' % m.start(0))
254 diffs_seen += 1
255 diffs_seen += 1
255 cfp = stringio()
256 cfp = stringio()
256 for line in payload[:m.start(0)].splitlines():
257 for line in payload[:m.start(0)].splitlines():
257 if line.startswith('# HG changeset patch') and not hgpatch:
258 if line.startswith('# HG changeset patch') and not hgpatch:
258 ui.debug('patch generated by hg export\n')
259 ui.debug('patch generated by hg export\n')
259 hgpatch = True
260 hgpatch = True
260 hgpatchheader = True
261 hgpatchheader = True
261 # drop earlier commit message content
262 # drop earlier commit message content
262 cfp.seek(0)
263 cfp.seek(0)
263 cfp.truncate()
264 cfp.truncate()
264 subject = None
265 subject = None
265 elif hgpatchheader:
266 elif hgpatchheader:
266 if line.startswith('# User '):
267 if line.startswith('# User '):
267 data['user'] = line[7:]
268 data['user'] = line[7:]
268 ui.debug('From: %s\n' % data['user'])
269 ui.debug('From: %s\n' % data['user'])
269 elif line.startswith("# Parent "):
270 elif line.startswith("# Parent "):
270 parents.append(line[9:].lstrip())
271 parents.append(line[9:].lstrip())
271 elif line.startswith("# "):
272 elif line.startswith("# "):
272 for header, key in patchheadermap:
273 for header, key in patchheadermap:
273 prefix = '# %s ' % header
274 prefix = '# %s ' % header
274 if line.startswith(prefix):
275 if line.startswith(prefix):
275 data[key] = line[len(prefix):]
276 data[key] = line[len(prefix):]
276 else:
277 else:
277 hgpatchheader = False
278 hgpatchheader = False
278 elif line == '---':
279 elif line == '---':
279 ignoretext = True
280 ignoretext = True
280 if not hgpatchheader and not ignoretext:
281 if not hgpatchheader and not ignoretext:
281 cfp.write(line)
282 cfp.write(line)
282 cfp.write('\n')
283 cfp.write('\n')
283 message = cfp.getvalue()
284 message = cfp.getvalue()
284 if tmpfp:
285 if tmpfp:
285 tmpfp.write(payload)
286 tmpfp.write(payload)
286 if not payload.endswith('\n'):
287 if not payload.endswith('\n'):
287 tmpfp.write('\n')
288 tmpfp.write('\n')
288 elif not diffs_seen and message and content_type == 'text/plain':
289 elif not diffs_seen and message and content_type == 'text/plain':
289 message += '\n' + payload
290 message += '\n' + payload
290 except: # re-raises
291 except: # re-raises
291 tmpfp.close()
292 tmpfp.close()
292 os.unlink(tmpname)
293 os.unlink(tmpname)
293 raise
294 raise
294
295
295 if subject and not message.startswith(subject):
296 if subject and not message.startswith(subject):
296 message = '%s\n%s' % (subject, message)
297 message = '%s\n%s' % (subject, message)
297 data['message'] = message
298 data['message'] = message
298 tmpfp.close()
299 tmpfp.close()
299 if parents:
300 if parents:
300 data['p1'] = parents.pop(0)
301 data['p1'] = parents.pop(0)
301 if parents:
302 if parents:
302 data['p2'] = parents.pop(0)
303 data['p2'] = parents.pop(0)
303
304
304 if diffs_seen:
305 if diffs_seen:
305 data['filename'] = tmpname
306 data['filename'] = tmpname
306 else:
307 else:
307 os.unlink(tmpname)
308 os.unlink(tmpname)
308 return data
309 return data
309
310
310 class patchmeta(object):
311 class patchmeta(object):
311 """Patched file metadata
312 """Patched file metadata
312
313
313 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
314 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
314 or COPY. 'path' is patched file path. 'oldpath' is set to the
315 or COPY. 'path' is patched file path. 'oldpath' is set to the
315 origin file when 'op' is either COPY or RENAME, None otherwise. If
316 origin file when 'op' is either COPY or RENAME, None otherwise. If
316 file mode is changed, 'mode' is a tuple (islink, isexec) where
317 file mode is changed, 'mode' is a tuple (islink, isexec) where
317 'islink' is True if the file is a symlink and 'isexec' is True if
318 'islink' is True if the file is a symlink and 'isexec' is True if
318 the file is executable. Otherwise, 'mode' is None.
319 the file is executable. Otherwise, 'mode' is None.
319 """
320 """
320 def __init__(self, path):
321 def __init__(self, path):
321 self.path = path
322 self.path = path
322 self.oldpath = None
323 self.oldpath = None
323 self.mode = None
324 self.mode = None
324 self.op = 'MODIFY'
325 self.op = 'MODIFY'
325 self.binary = False
326 self.binary = False
326
327
327 def setmode(self, mode):
328 def setmode(self, mode):
328 islink = mode & 0o20000
329 islink = mode & 0o20000
329 isexec = mode & 0o100
330 isexec = mode & 0o100
330 self.mode = (islink, isexec)
331 self.mode = (islink, isexec)
331
332
332 def copy(self):
333 def copy(self):
333 other = patchmeta(self.path)
334 other = patchmeta(self.path)
334 other.oldpath = self.oldpath
335 other.oldpath = self.oldpath
335 other.mode = self.mode
336 other.mode = self.mode
336 other.op = self.op
337 other.op = self.op
337 other.binary = self.binary
338 other.binary = self.binary
338 return other
339 return other
339
340
340 def _ispatchinga(self, afile):
341 def _ispatchinga(self, afile):
341 if afile == '/dev/null':
342 if afile == '/dev/null':
342 return self.op == 'ADD'
343 return self.op == 'ADD'
343 return afile == 'a/' + (self.oldpath or self.path)
344 return afile == 'a/' + (self.oldpath or self.path)
344
345
345 def _ispatchingb(self, bfile):
346 def _ispatchingb(self, bfile):
346 if bfile == '/dev/null':
347 if bfile == '/dev/null':
347 return self.op == 'DELETE'
348 return self.op == 'DELETE'
348 return bfile == 'b/' + self.path
349 return bfile == 'b/' + self.path
349
350
350 def ispatching(self, afile, bfile):
351 def ispatching(self, afile, bfile):
351 return self._ispatchinga(afile) and self._ispatchingb(bfile)
352 return self._ispatchinga(afile) and self._ispatchingb(bfile)
352
353
353 def __repr__(self):
354 def __repr__(self):
354 return "<patchmeta %s %r>" % (self.op, self.path)
355 return "<patchmeta %s %r>" % (self.op, self.path)
355
356
356 def readgitpatch(lr):
357 def readgitpatch(lr):
357 """extract git-style metadata about patches from <patchname>"""
358 """extract git-style metadata about patches from <patchname>"""
358
359
359 # Filter patch for git information
360 # Filter patch for git information
360 gp = None
361 gp = None
361 gitpatches = []
362 gitpatches = []
362 for line in lr:
363 for line in lr:
363 line = line.rstrip(' \r\n')
364 line = line.rstrip(' \r\n')
364 if line.startswith('diff --git a/'):
365 if line.startswith('diff --git a/'):
365 m = gitre.match(line)
366 m = gitre.match(line)
366 if m:
367 if m:
367 if gp:
368 if gp:
368 gitpatches.append(gp)
369 gitpatches.append(gp)
369 dst = m.group(2)
370 dst = m.group(2)
370 gp = patchmeta(dst)
371 gp = patchmeta(dst)
371 elif gp:
372 elif gp:
372 if line.startswith('--- '):
373 if line.startswith('--- '):
373 gitpatches.append(gp)
374 gitpatches.append(gp)
374 gp = None
375 gp = None
375 continue
376 continue
376 if line.startswith('rename from '):
377 if line.startswith('rename from '):
377 gp.op = 'RENAME'
378 gp.op = 'RENAME'
378 gp.oldpath = line[12:]
379 gp.oldpath = line[12:]
379 elif line.startswith('rename to '):
380 elif line.startswith('rename to '):
380 gp.path = line[10:]
381 gp.path = line[10:]
381 elif line.startswith('copy from '):
382 elif line.startswith('copy from '):
382 gp.op = 'COPY'
383 gp.op = 'COPY'
383 gp.oldpath = line[10:]
384 gp.oldpath = line[10:]
384 elif line.startswith('copy to '):
385 elif line.startswith('copy to '):
385 gp.path = line[8:]
386 gp.path = line[8:]
386 elif line.startswith('deleted file'):
387 elif line.startswith('deleted file'):
387 gp.op = 'DELETE'
388 gp.op = 'DELETE'
388 elif line.startswith('new file mode '):
389 elif line.startswith('new file mode '):
389 gp.op = 'ADD'
390 gp.op = 'ADD'
390 gp.setmode(int(line[-6:], 8))
391 gp.setmode(int(line[-6:], 8))
391 elif line.startswith('new mode '):
392 elif line.startswith('new mode '):
392 gp.setmode(int(line[-6:], 8))
393 gp.setmode(int(line[-6:], 8))
393 elif line.startswith('GIT binary patch'):
394 elif line.startswith('GIT binary patch'):
394 gp.binary = True
395 gp.binary = True
395 if gp:
396 if gp:
396 gitpatches.append(gp)
397 gitpatches.append(gp)
397
398
398 return gitpatches
399 return gitpatches
399
400
400 class linereader(object):
401 class linereader(object):
401 # simple class to allow pushing lines back into the input stream
402 # simple class to allow pushing lines back into the input stream
402 def __init__(self, fp):
403 def __init__(self, fp):
403 self.fp = fp
404 self.fp = fp
404 self.buf = []
405 self.buf = []
405
406
406 def push(self, line):
407 def push(self, line):
407 if line is not None:
408 if line is not None:
408 self.buf.append(line)
409 self.buf.append(line)
409
410
410 def readline(self):
411 def readline(self):
411 if self.buf:
412 if self.buf:
412 l = self.buf[0]
413 l = self.buf[0]
413 del self.buf[0]
414 del self.buf[0]
414 return l
415 return l
415 return self.fp.readline()
416 return self.fp.readline()
416
417
417 def __iter__(self):
418 def __iter__(self):
418 return iter(self.readline, '')
419 return iter(self.readline, '')
419
420
420 class abstractbackend(object):
421 class abstractbackend(object):
421 def __init__(self, ui):
422 def __init__(self, ui):
422 self.ui = ui
423 self.ui = ui
423
424
424 def getfile(self, fname):
425 def getfile(self, fname):
425 """Return target file data and flags as a (data, (islink,
426 """Return target file data and flags as a (data, (islink,
426 isexec)) tuple. Data is None if file is missing/deleted.
427 isexec)) tuple. Data is None if file is missing/deleted.
427 """
428 """
428 raise NotImplementedError
429 raise NotImplementedError
429
430
430 def setfile(self, fname, data, mode, copysource):
431 def setfile(self, fname, data, mode, copysource):
431 """Write data to target file fname and set its mode. mode is a
432 """Write data to target file fname and set its mode. mode is a
432 (islink, isexec) tuple. If data is None, the file content should
433 (islink, isexec) tuple. If data is None, the file content should
433 be left unchanged. If the file is modified after being copied,
434 be left unchanged. If the file is modified after being copied,
434 copysource is set to the original file name.
435 copysource is set to the original file name.
435 """
436 """
436 raise NotImplementedError
437 raise NotImplementedError
437
438
438 def unlink(self, fname):
439 def unlink(self, fname):
439 """Unlink target file."""
440 """Unlink target file."""
440 raise NotImplementedError
441 raise NotImplementedError
441
442
442 def writerej(self, fname, failed, total, lines):
443 def writerej(self, fname, failed, total, lines):
443 """Write rejected lines for fname. total is the number of hunks
444 """Write rejected lines for fname. total is the number of hunks
444 which failed to apply and total the total number of hunks for this
445 which failed to apply and total the total number of hunks for this
445 files.
446 files.
446 """
447 """
447
448
448 def exists(self, fname):
449 def exists(self, fname):
449 raise NotImplementedError
450 raise NotImplementedError
450
451
451 def close(self):
452 def close(self):
452 raise NotImplementedError
453 raise NotImplementedError
453
454
454 class fsbackend(abstractbackend):
455 class fsbackend(abstractbackend):
455 def __init__(self, ui, basedir):
456 def __init__(self, ui, basedir):
456 super(fsbackend, self).__init__(ui)
457 super(fsbackend, self).__init__(ui)
457 self.opener = vfsmod.vfs(basedir)
458 self.opener = vfsmod.vfs(basedir)
458
459
459 def getfile(self, fname):
460 def getfile(self, fname):
460 if self.opener.islink(fname):
461 if self.opener.islink(fname):
461 return (self.opener.readlink(fname), (True, False))
462 return (self.opener.readlink(fname), (True, False))
462
463
463 isexec = False
464 isexec = False
464 try:
465 try:
465 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
466 except OSError as e:
467 except OSError as e:
467 if e.errno != errno.ENOENT:
468 if e.errno != errno.ENOENT:
468 raise
469 raise
469 try:
470 try:
470 return (self.opener.read(fname), (False, isexec))
471 return (self.opener.read(fname), (False, isexec))
471 except IOError as e:
472 except IOError as e:
472 if e.errno != errno.ENOENT:
473 if e.errno != errno.ENOENT:
473 raise
474 raise
474 return None, None
475 return None, None
475
476
476 def setfile(self, fname, data, mode, copysource):
477 def setfile(self, fname, data, mode, copysource):
477 islink, isexec = mode
478 islink, isexec = mode
478 if data is None:
479 if data is None:
479 self.opener.setflags(fname, islink, isexec)
480 self.opener.setflags(fname, islink, isexec)
480 return
481 return
481 if islink:
482 if islink:
482 self.opener.symlink(data, fname)
483 self.opener.symlink(data, fname)
483 else:
484 else:
484 self.opener.write(fname, data)
485 self.opener.write(fname, data)
485 if isexec:
486 if isexec:
486 self.opener.setflags(fname, False, True)
487 self.opener.setflags(fname, False, True)
487
488
488 def unlink(self, fname):
489 def unlink(self, fname):
489 self.opener.unlinkpath(fname, ignoremissing=True)
490 self.opener.unlinkpath(fname, ignoremissing=True)
490
491
491 def writerej(self, fname, failed, total, lines):
492 def writerej(self, fname, failed, total, lines):
492 fname = fname + ".rej"
493 fname = fname + ".rej"
493 self.ui.warn(
494 self.ui.warn(
494 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
495 (failed, total, fname))
496 (failed, total, fname))
496 fp = self.opener(fname, 'w')
497 fp = self.opener(fname, 'w')
497 fp.writelines(lines)
498 fp.writelines(lines)
498 fp.close()
499 fp.close()
499
500
500 def exists(self, fname):
501 def exists(self, fname):
501 return self.opener.lexists(fname)
502 return self.opener.lexists(fname)
502
503
503 class workingbackend(fsbackend):
504 class workingbackend(fsbackend):
504 def __init__(self, ui, repo, similarity):
505 def __init__(self, ui, repo, similarity):
505 super(workingbackend, self).__init__(ui, repo.root)
506 super(workingbackend, self).__init__(ui, repo.root)
506 self.repo = repo
507 self.repo = repo
507 self.similarity = similarity
508 self.similarity = similarity
508 self.removed = set()
509 self.removed = set()
509 self.changed = set()
510 self.changed = set()
510 self.copied = []
511 self.copied = []
511
512
512 def _checkknown(self, fname):
513 def _checkknown(self, fname):
513 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 if self.repo.dirstate[fname] == '?' and self.exists(fname):
514 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
515
516
516 def setfile(self, fname, data, mode, copysource):
517 def setfile(self, fname, data, mode, copysource):
517 self._checkknown(fname)
518 self._checkknown(fname)
518 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 super(workingbackend, self).setfile(fname, data, mode, copysource)
519 if copysource is not None:
520 if copysource is not None:
520 self.copied.append((copysource, fname))
521 self.copied.append((copysource, fname))
521 self.changed.add(fname)
522 self.changed.add(fname)
522
523
523 def unlink(self, fname):
524 def unlink(self, fname):
524 self._checkknown(fname)
525 self._checkknown(fname)
525 super(workingbackend, self).unlink(fname)
526 super(workingbackend, self).unlink(fname)
526 self.removed.add(fname)
527 self.removed.add(fname)
527 self.changed.add(fname)
528 self.changed.add(fname)
528
529
529 def close(self):
530 def close(self):
530 wctx = self.repo[None]
531 wctx = self.repo[None]
531 changed = set(self.changed)
532 changed = set(self.changed)
532 for src, dst in self.copied:
533 for src, dst in self.copied:
533 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
534 if self.removed:
535 if self.removed:
535 wctx.forget(sorted(self.removed))
536 wctx.forget(sorted(self.removed))
536 for f in self.removed:
537 for f in self.removed:
537 if f not in self.repo.dirstate:
538 if f not in self.repo.dirstate:
538 # File was deleted and no longer belongs to the
539 # File was deleted and no longer belongs to the
539 # dirstate, it was probably marked added then
540 # dirstate, it was probably marked added then
540 # deleted, and should not be considered by
541 # deleted, and should not be considered by
541 # marktouched().
542 # marktouched().
542 changed.discard(f)
543 changed.discard(f)
543 if changed:
544 if changed:
544 scmutil.marktouched(self.repo, changed, self.similarity)
545 scmutil.marktouched(self.repo, changed, self.similarity)
545 return sorted(self.changed)
546 return sorted(self.changed)
546
547
547 class filestore(object):
548 class filestore(object):
548 def __init__(self, maxsize=None):
549 def __init__(self, maxsize=None):
549 self.opener = None
550 self.opener = None
550 self.files = {}
551 self.files = {}
551 self.created = 0
552 self.created = 0
552 self.maxsize = maxsize
553 self.maxsize = maxsize
553 if self.maxsize is None:
554 if self.maxsize is None:
554 self.maxsize = 4*(2**20)
555 self.maxsize = 4*(2**20)
555 self.size = 0
556 self.size = 0
556 self.data = {}
557 self.data = {}
557
558
558 def setfile(self, fname, data, mode, copied=None):
559 def setfile(self, fname, data, mode, copied=None):
559 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
560 self.data[fname] = (data, mode, copied)
561 self.data[fname] = (data, mode, copied)
561 self.size += len(data)
562 self.size += len(data)
562 else:
563 else:
563 if self.opener is None:
564 if self.opener is None:
564 root = tempfile.mkdtemp(prefix='hg-patch-')
565 root = tempfile.mkdtemp(prefix='hg-patch-')
565 self.opener = vfsmod.vfs(root)
566 self.opener = vfsmod.vfs(root)
566 # Avoid filename issues with these simple names
567 # Avoid filename issues with these simple names
567 fn = str(self.created)
568 fn = str(self.created)
568 self.opener.write(fn, data)
569 self.opener.write(fn, data)
569 self.created += 1
570 self.created += 1
570 self.files[fname] = (fn, mode, copied)
571 self.files[fname] = (fn, mode, copied)
571
572
572 def getfile(self, fname):
573 def getfile(self, fname):
573 if fname in self.data:
574 if fname in self.data:
574 return self.data[fname]
575 return self.data[fname]
575 if not self.opener or fname not in self.files:
576 if not self.opener or fname not in self.files:
576 return None, None, None
577 return None, None, None
577 fn, mode, copied = self.files[fname]
578 fn, mode, copied = self.files[fname]
578 return self.opener.read(fn), mode, copied
579 return self.opener.read(fn), mode, copied
579
580
580 def close(self):
581 def close(self):
581 if self.opener:
582 if self.opener:
582 shutil.rmtree(self.opener.base)
583 shutil.rmtree(self.opener.base)
583
584
584 class repobackend(abstractbackend):
585 class repobackend(abstractbackend):
585 def __init__(self, ui, repo, ctx, store):
586 def __init__(self, ui, repo, ctx, store):
586 super(repobackend, self).__init__(ui)
587 super(repobackend, self).__init__(ui)
587 self.repo = repo
588 self.repo = repo
588 self.ctx = ctx
589 self.ctx = ctx
589 self.store = store
590 self.store = store
590 self.changed = set()
591 self.changed = set()
591 self.removed = set()
592 self.removed = set()
592 self.copied = {}
593 self.copied = {}
593
594
594 def _checkknown(self, fname):
595 def _checkknown(self, fname):
595 if fname not in self.ctx:
596 if fname not in self.ctx:
596 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
597
598
598 def getfile(self, fname):
599 def getfile(self, fname):
599 try:
600 try:
600 fctx = self.ctx[fname]
601 fctx = self.ctx[fname]
601 except error.LookupError:
602 except error.LookupError:
602 return None, None
603 return None, None
603 flags = fctx.flags()
604 flags = fctx.flags()
604 return fctx.data(), ('l' in flags, 'x' in flags)
605 return fctx.data(), ('l' in flags, 'x' in flags)
605
606
606 def setfile(self, fname, data, mode, copysource):
607 def setfile(self, fname, data, mode, copysource):
607 if copysource:
608 if copysource:
608 self._checkknown(copysource)
609 self._checkknown(copysource)
609 if data is None:
610 if data is None:
610 data = self.ctx[fname].data()
611 data = self.ctx[fname].data()
611 self.store.setfile(fname, data, mode, copysource)
612 self.store.setfile(fname, data, mode, copysource)
612 self.changed.add(fname)
613 self.changed.add(fname)
613 if copysource:
614 if copysource:
614 self.copied[fname] = copysource
615 self.copied[fname] = copysource
615
616
616 def unlink(self, fname):
617 def unlink(self, fname):
617 self._checkknown(fname)
618 self._checkknown(fname)
618 self.removed.add(fname)
619 self.removed.add(fname)
619
620
620 def exists(self, fname):
621 def exists(self, fname):
621 return fname in self.ctx
622 return fname in self.ctx
622
623
623 def close(self):
624 def close(self):
624 return self.changed | self.removed
625 return self.changed | self.removed
625
626
626 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
627 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 unidesc = re.compile('@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
628 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 contextdesc = re.compile('(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
629 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630 eolmodes = ['strict', 'crlf', 'lf', 'auto']
630
631
631 class patchfile(object):
632 class patchfile(object):
632 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 def __init__(self, ui, gp, backend, store, eolmode='strict'):
633 self.fname = gp.path
634 self.fname = gp.path
634 self.eolmode = eolmode
635 self.eolmode = eolmode
635 self.eol = None
636 self.eol = None
636 self.backend = backend
637 self.backend = backend
637 self.ui = ui
638 self.ui = ui
638 self.lines = []
639 self.lines = []
639 self.exists = False
640 self.exists = False
640 self.missing = True
641 self.missing = True
641 self.mode = gp.mode
642 self.mode = gp.mode
642 self.copysource = gp.oldpath
643 self.copysource = gp.oldpath
643 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
644 self.remove = gp.op == 'DELETE'
645 self.remove = gp.op == 'DELETE'
645 if self.copysource is None:
646 if self.copysource is None:
646 data, mode = backend.getfile(self.fname)
647 data, mode = backend.getfile(self.fname)
647 else:
648 else:
648 data, mode = store.getfile(self.copysource)[:2]
649 data, mode = store.getfile(self.copysource)[:2]
649 if data is not None:
650 if data is not None:
650 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.exists = self.copysource is None or backend.exists(self.fname)
651 self.missing = False
652 self.missing = False
652 if data:
653 if data:
653 self.lines = mdiff.splitnewlines(data)
654 self.lines = mdiff.splitnewlines(data)
654 if self.mode is None:
655 if self.mode is None:
655 self.mode = mode
656 self.mode = mode
656 if self.lines:
657 if self.lines:
657 # Normalize line endings
658 # Normalize line endings
658 if self.lines[0].endswith('\r\n'):
659 if self.lines[0].endswith('\r\n'):
659 self.eol = '\r\n'
660 self.eol = '\r\n'
660 elif self.lines[0].endswith('\n'):
661 elif self.lines[0].endswith('\n'):
661 self.eol = '\n'
662 self.eol = '\n'
662 if eolmode != 'strict':
663 if eolmode != 'strict':
663 nlines = []
664 nlines = []
664 for l in self.lines:
665 for l in self.lines:
665 if l.endswith('\r\n'):
666 if l.endswith('\r\n'):
666 l = l[:-2] + '\n'
667 l = l[:-2] + '\n'
667 nlines.append(l)
668 nlines.append(l)
668 self.lines = nlines
669 self.lines = nlines
669 else:
670 else:
670 if self.create:
671 if self.create:
671 self.missing = False
672 self.missing = False
672 if self.mode is None:
673 if self.mode is None:
673 self.mode = (False, False)
674 self.mode = (False, False)
674 if self.missing:
675 if self.missing:
675 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
676 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
677 "current directory)\n"))
678 "current directory)\n"))
678
679
679 self.hash = {}
680 self.hash = {}
680 self.dirty = 0
681 self.dirty = 0
681 self.offset = 0
682 self.offset = 0
682 self.skew = 0
683 self.skew = 0
683 self.rej = []
684 self.rej = []
684 self.fileprinted = False
685 self.fileprinted = False
685 self.printfile(False)
686 self.printfile(False)
686 self.hunks = 0
687 self.hunks = 0
687
688
688 def writelines(self, fname, lines, mode):
689 def writelines(self, fname, lines, mode):
689 if self.eolmode == 'auto':
690 if self.eolmode == 'auto':
690 eol = self.eol
691 eol = self.eol
691 elif self.eolmode == 'crlf':
692 elif self.eolmode == 'crlf':
692 eol = '\r\n'
693 eol = '\r\n'
693 else:
694 else:
694 eol = '\n'
695 eol = '\n'
695
696
696 if self.eolmode != 'strict' and eol and eol != '\n':
697 if self.eolmode != 'strict' and eol and eol != '\n':
697 rawlines = []
698 rawlines = []
698 for l in lines:
699 for l in lines:
699 if l and l[-1] == '\n':
700 if l and l[-1] == '\n':
700 l = l[:-1] + eol
701 l = l[:-1] + eol
701 rawlines.append(l)
702 rawlines.append(l)
702 lines = rawlines
703 lines = rawlines
703
704
704 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
705
706
706 def printfile(self, warn):
707 def printfile(self, warn):
707 if self.fileprinted:
708 if self.fileprinted:
708 return
709 return
709 if warn or self.ui.verbose:
710 if warn or self.ui.verbose:
710 self.fileprinted = True
711 self.fileprinted = True
711 s = _("patching file %s\n") % self.fname
712 s = _("patching file %s\n") % self.fname
712 if warn:
713 if warn:
713 self.ui.warn(s)
714 self.ui.warn(s)
714 else:
715 else:
715 self.ui.note(s)
716 self.ui.note(s)
716
717
717
718
718 def findlines(self, l, linenum):
719 def findlines(self, l, linenum):
719 # looks through the hash and finds candidate lines. The
720 # looks through the hash and finds candidate lines. The
720 # result is a list of line numbers sorted based on distance
721 # result is a list of line numbers sorted based on distance
721 # from linenum
722 # from linenum
722
723
723 cand = self.hash.get(l, [])
724 cand = self.hash.get(l, [])
724 if len(cand) > 1:
725 if len(cand) > 1:
725 # resort our list of potentials forward then back.
726 # resort our list of potentials forward then back.
726 cand.sort(key=lambda x: abs(x - linenum))
727 cand.sort(key=lambda x: abs(x - linenum))
727 return cand
728 return cand
728
729
729 def write_rej(self):
730 def write_rej(self):
730 # our rejects are a little different from patch(1). This always
731 # our rejects are a little different from patch(1). This always
731 # creates rejects in the same form as the original patch. A file
732 # creates rejects in the same form as the original patch. A file
732 # header is inserted so that you can run the reject through patch again
733 # header is inserted so that you can run the reject through patch again
733 # without having to type the filename.
734 # without having to type the filename.
734 if not self.rej:
735 if not self.rej:
735 return
736 return
736 base = os.path.basename(self.fname)
737 base = os.path.basename(self.fname)
737 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 lines = ["--- %s\n+++ %s\n" % (base, base)]
738 for x in self.rej:
739 for x in self.rej:
739 for l in x.hunk:
740 for l in x.hunk:
740 lines.append(l)
741 lines.append(l)
741 if l[-1:] != '\n':
742 if l[-1:] != '\n':
742 lines.append("\n\ No newline at end of file\n")
743 lines.append("\n\ No newline at end of file\n")
743 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
744
745
745 def apply(self, h):
746 def apply(self, h):
746 if not h.complete():
747 if not h.complete():
747 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
748 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 (h.number, h.desc, len(h.a), h.lena, len(h.b),
749 h.lenb))
750 h.lenb))
750
751
751 self.hunks += 1
752 self.hunks += 1
752
753
753 if self.missing:
754 if self.missing:
754 self.rej.append(h)
755 self.rej.append(h)
755 return -1
756 return -1
756
757
757 if self.exists and self.create:
758 if self.exists and self.create:
758 if self.copysource:
759 if self.copysource:
759 self.ui.warn(_("cannot create %s: destination already "
760 self.ui.warn(_("cannot create %s: destination already "
760 "exists\n") % self.fname)
761 "exists\n") % self.fname)
761 else:
762 else:
762 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.ui.warn(_("file %s already exists\n") % self.fname)
763 self.rej.append(h)
764 self.rej.append(h)
764 return -1
765 return -1
765
766
766 if isinstance(h, binhunk):
767 if isinstance(h, binhunk):
767 if self.remove:
768 if self.remove:
768 self.backend.unlink(self.fname)
769 self.backend.unlink(self.fname)
769 else:
770 else:
770 l = h.new(self.lines)
771 l = h.new(self.lines)
771 self.lines[:] = l
772 self.lines[:] = l
772 self.offset += len(l)
773 self.offset += len(l)
773 self.dirty = True
774 self.dirty = True
774 return 0
775 return 0
775
776
776 horig = h
777 horig = h
777 if (self.eolmode in ('crlf', 'lf')
778 if (self.eolmode in ('crlf', 'lf')
778 or self.eolmode == 'auto' and self.eol):
779 or self.eolmode == 'auto' and self.eol):
779 # If new eols are going to be normalized, then normalize
780 # If new eols are going to be normalized, then normalize
780 # hunk data before patching. Otherwise, preserve input
781 # hunk data before patching. Otherwise, preserve input
781 # line-endings.
782 # line-endings.
782 h = h.getnormalized()
783 h = h.getnormalized()
783
784
784 # fast case first, no offsets, no fuzz
785 # fast case first, no offsets, no fuzz
785 old, oldstart, new, newstart = h.fuzzit(0, False)
786 old, oldstart, new, newstart = h.fuzzit(0, False)
786 oldstart += self.offset
787 oldstart += self.offset
787 orig_start = oldstart
788 orig_start = oldstart
788 # if there's skew we want to emit the "(offset %d lines)" even
789 # if there's skew we want to emit the "(offset %d lines)" even
789 # when the hunk cleanly applies at start + skew, so skip the
790 # when the hunk cleanly applies at start + skew, so skip the
790 # fast case code
791 # fast case code
791 if (self.skew == 0 and
792 if (self.skew == 0 and
792 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 diffhelpers.testhunk(old, self.lines, oldstart) == 0):
793 if self.remove:
794 if self.remove:
794 self.backend.unlink(self.fname)
795 self.backend.unlink(self.fname)
795 else:
796 else:
796 self.lines[oldstart:oldstart + len(old)] = new
797 self.lines[oldstart:oldstart + len(old)] = new
797 self.offset += len(new) - len(old)
798 self.offset += len(new) - len(old)
798 self.dirty = True
799 self.dirty = True
799 return 0
800 return 0
800
801
801 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
802 self.hash = {}
803 self.hash = {}
803 for x, s in enumerate(self.lines):
804 for x, s in enumerate(self.lines):
804 self.hash.setdefault(s, []).append(x)
805 self.hash.setdefault(s, []).append(x)
805
806
806 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for fuzzlen in xrange(self.ui.configint("patch", "fuzz") + 1):
807 for toponly in [True, False]:
808 for toponly in [True, False]:
808 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
809 oldstart = oldstart + self.offset + self.skew
810 oldstart = oldstart + self.offset + self.skew
810 oldstart = min(oldstart, len(self.lines))
811 oldstart = min(oldstart, len(self.lines))
811 if old:
812 if old:
812 cand = self.findlines(old[0][1:], oldstart)
813 cand = self.findlines(old[0][1:], oldstart)
813 else:
814 else:
814 # Only adding lines with no or fuzzed context, just
815 # Only adding lines with no or fuzzed context, just
815 # take the skew in account
816 # take the skew in account
816 cand = [oldstart]
817 cand = [oldstart]
817
818
818 for l in cand:
819 for l in cand:
819 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 if not old or diffhelpers.testhunk(old, self.lines, l) == 0:
820 self.lines[l : l + len(old)] = new
821 self.lines[l : l + len(old)] = new
821 self.offset += len(new) - len(old)
822 self.offset += len(new) - len(old)
822 self.skew = l - orig_start
823 self.skew = l - orig_start
823 self.dirty = True
824 self.dirty = True
824 offset = l - orig_start - fuzzlen
825 offset = l - orig_start - fuzzlen
825 if fuzzlen:
826 if fuzzlen:
826 msg = _("Hunk #%d succeeded at %d "
827 msg = _("Hunk #%d succeeded at %d "
827 "with fuzz %d "
828 "with fuzz %d "
828 "(offset %d lines).\n")
829 "(offset %d lines).\n")
829 self.printfile(True)
830 self.printfile(True)
830 self.ui.warn(msg %
831 self.ui.warn(msg %
831 (h.number, l + 1, fuzzlen, offset))
832 (h.number, l + 1, fuzzlen, offset))
832 else:
833 else:
833 msg = _("Hunk #%d succeeded at %d "
834 msg = _("Hunk #%d succeeded at %d "
834 "(offset %d lines).\n")
835 "(offset %d lines).\n")
835 self.ui.note(msg % (h.number, l + 1, offset))
836 self.ui.note(msg % (h.number, l + 1, offset))
836 return fuzzlen
837 return fuzzlen
837 self.printfile(True)
838 self.printfile(True)
838 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
839 self.rej.append(horig)
840 self.rej.append(horig)
840 return -1
841 return -1
841
842
842 def close(self):
843 def close(self):
843 if self.dirty:
844 if self.dirty:
844 self.writelines(self.fname, self.lines, self.mode)
845 self.writelines(self.fname, self.lines, self.mode)
845 self.write_rej()
846 self.write_rej()
846 return len(self.rej)
847 return len(self.rej)
847
848
848 class header(object):
849 class header(object):
849 """patch header
850 """patch header
850 """
851 """
851 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
852 diff_re = re.compile('diff -r .* (.*)$')
853 diff_re = re.compile('diff -r .* (.*)$')
853 allhunks_re = re.compile('(?:index|deleted file) ')
854 allhunks_re = re.compile('(?:index|deleted file) ')
854 pretty_re = re.compile('(?:new file|deleted file) ')
855 pretty_re = re.compile('(?:new file|deleted file) ')
855 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 special_re = re.compile('(?:index|deleted|copy|rename) ')
856 newfile_re = re.compile('(?:new file)')
857 newfile_re = re.compile('(?:new file)')
857
858
858 def __init__(self, header):
859 def __init__(self, header):
859 self.header = header
860 self.header = header
860 self.hunks = []
861 self.hunks = []
861
862
862 def binary(self):
863 def binary(self):
863 return any(h.startswith('index ') for h in self.header)
864 return any(h.startswith('index ') for h in self.header)
864
865
865 def pretty(self, fp):
866 def pretty(self, fp):
866 for h in self.header:
867 for h in self.header:
867 if h.startswith('index '):
868 if h.startswith('index '):
868 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 fp.write(_('this modifies a binary file (all or nothing)\n'))
869 break
870 break
870 if self.pretty_re.match(h):
871 if self.pretty_re.match(h):
871 fp.write(h)
872 fp.write(h)
872 if self.binary():
873 if self.binary():
873 fp.write(_('this is a binary file\n'))
874 fp.write(_('this is a binary file\n'))
874 break
875 break
875 if h.startswith('---'):
876 if h.startswith('---'):
876 fp.write(_('%d hunks, %d lines changed\n') %
877 fp.write(_('%d hunks, %d lines changed\n') %
877 (len(self.hunks),
878 (len(self.hunks),
878 sum([max(h.added, h.removed) for h in self.hunks])))
879 sum([max(h.added, h.removed) for h in self.hunks])))
879 break
880 break
880 fp.write(h)
881 fp.write(h)
881
882
882 def write(self, fp):
883 def write(self, fp):
883 fp.write(''.join(self.header))
884 fp.write(''.join(self.header))
884
885
885 def allhunks(self):
886 def allhunks(self):
886 return any(self.allhunks_re.match(h) for h in self.header)
887 return any(self.allhunks_re.match(h) for h in self.header)
887
888
888 def files(self):
889 def files(self):
889 match = self.diffgit_re.match(self.header[0])
890 match = self.diffgit_re.match(self.header[0])
890 if match:
891 if match:
891 fromfile, tofile = match.groups()
892 fromfile, tofile = match.groups()
892 if fromfile == tofile:
893 if fromfile == tofile:
893 return [fromfile]
894 return [fromfile]
894 return [fromfile, tofile]
895 return [fromfile, tofile]
895 else:
896 else:
896 return self.diff_re.match(self.header[0]).groups()
897 return self.diff_re.match(self.header[0]).groups()
897
898
898 def filename(self):
899 def filename(self):
899 return self.files()[-1]
900 return self.files()[-1]
900
901
901 def __repr__(self):
902 def __repr__(self):
902 return '<header %s>' % (' '.join(map(repr, self.files())))
903 return '<header %s>' % (' '.join(map(repr, self.files())))
903
904
904 def isnewfile(self):
905 def isnewfile(self):
905 return any(self.newfile_re.match(h) for h in self.header)
906 return any(self.newfile_re.match(h) for h in self.header)
906
907
907 def special(self):
908 def special(self):
908 # Special files are shown only at the header level and not at the hunk
909 # Special files are shown only at the header level and not at the hunk
909 # level for example a file that has been deleted is a special file.
910 # level for example a file that has been deleted is a special file.
910 # The user cannot change the content of the operation, in the case of
911 # The user cannot change the content of the operation, in the case of
911 # the deleted file he has to take the deletion or not take it, he
912 # the deleted file he has to take the deletion or not take it, he
912 # cannot take some of it.
913 # cannot take some of it.
913 # Newly added files are special if they are empty, they are not special
914 # Newly added files are special if they are empty, they are not special
914 # if they have some content as we want to be able to change it
915 # if they have some content as we want to be able to change it
915 nocontent = len(self.header) == 2
916 nocontent = len(self.header) == 2
916 emptynewfile = self.isnewfile() and nocontent
917 emptynewfile = self.isnewfile() and nocontent
917 return emptynewfile or \
918 return emptynewfile or \
918 any(self.special_re.match(h) for h in self.header)
919 any(self.special_re.match(h) for h in self.header)
919
920
920 class recordhunk(object):
921 class recordhunk(object):
921 """patch hunk
922 """patch hunk
922
923
923 XXX shouldn't we merge this with the other hunk class?
924 XXX shouldn't we merge this with the other hunk class?
924 """
925 """
925
926
926 def __init__(self, header, fromline, toline, proc, before, hunk, after,
927 def __init__(self, header, fromline, toline, proc, before, hunk, after,
927 maxcontext=None):
928 maxcontext=None):
928 def trimcontext(lines, reverse=False):
929 def trimcontext(lines, reverse=False):
929 if maxcontext is not None:
930 if maxcontext is not None:
930 delta = len(lines) - maxcontext
931 delta = len(lines) - maxcontext
931 if delta > 0:
932 if delta > 0:
932 if reverse:
933 if reverse:
933 return delta, lines[delta:]
934 return delta, lines[delta:]
934 else:
935 else:
935 return delta, lines[:maxcontext]
936 return delta, lines[:maxcontext]
936 return 0, lines
937 return 0, lines
937
938
938 self.header = header
939 self.header = header
939 trimedbefore, self.before = trimcontext(before, True)
940 trimedbefore, self.before = trimcontext(before, True)
940 self.fromline = fromline + trimedbefore
941 self.fromline = fromline + trimedbefore
941 self.toline = toline + trimedbefore
942 self.toline = toline + trimedbefore
942 _trimedafter, self.after = trimcontext(after, False)
943 _trimedafter, self.after = trimcontext(after, False)
943 self.proc = proc
944 self.proc = proc
944 self.hunk = hunk
945 self.hunk = hunk
945 self.added, self.removed = self.countchanges(self.hunk)
946 self.added, self.removed = self.countchanges(self.hunk)
946
947
947 def __eq__(self, v):
948 def __eq__(self, v):
948 if not isinstance(v, recordhunk):
949 if not isinstance(v, recordhunk):
949 return False
950 return False
950
951
951 return ((v.hunk == self.hunk) and
952 return ((v.hunk == self.hunk) and
952 (v.proc == self.proc) and
953 (v.proc == self.proc) and
953 (self.fromline == v.fromline) and
954 (self.fromline == v.fromline) and
954 (self.header.files() == v.header.files()))
955 (self.header.files() == v.header.files()))
955
956
956 def __hash__(self):
957 def __hash__(self):
957 return hash((tuple(self.hunk),
958 return hash((tuple(self.hunk),
958 tuple(self.header.files()),
959 tuple(self.header.files()),
959 self.fromline,
960 self.fromline,
960 self.proc))
961 self.proc))
961
962
962 def countchanges(self, hunk):
963 def countchanges(self, hunk):
963 """hunk -> (n+,n-)"""
964 """hunk -> (n+,n-)"""
964 add = len([h for h in hunk if h.startswith('+')])
965 add = len([h for h in hunk if h.startswith('+')])
965 rem = len([h for h in hunk if h.startswith('-')])
966 rem = len([h for h in hunk if h.startswith('-')])
966 return add, rem
967 return add, rem
967
968
968 def reversehunk(self):
969 def reversehunk(self):
969 """return another recordhunk which is the reverse of the hunk
970 """return another recordhunk which is the reverse of the hunk
970
971
971 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
972 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
972 that, swap fromline/toline and +/- signs while keep other things
973 that, swap fromline/toline and +/- signs while keep other things
973 unchanged.
974 unchanged.
974 """
975 """
975 m = {'+': '-', '-': '+', '\\': '\\'}
976 m = {'+': '-', '-': '+', '\\': '\\'}
976 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
977 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
977 return recordhunk(self.header, self.toline, self.fromline, self.proc,
978 return recordhunk(self.header, self.toline, self.fromline, self.proc,
978 self.before, hunk, self.after)
979 self.before, hunk, self.after)
979
980
980 def write(self, fp):
981 def write(self, fp):
981 delta = len(self.before) + len(self.after)
982 delta = len(self.before) + len(self.after)
982 if self.after and self.after[-1] == '\\ No newline at end of file\n':
983 if self.after and self.after[-1] == '\\ No newline at end of file\n':
983 delta -= 1
984 delta -= 1
984 fromlen = delta + self.removed
985 fromlen = delta + self.removed
985 tolen = delta + self.added
986 tolen = delta + self.added
986 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
987 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
987 (self.fromline, fromlen, self.toline, tolen,
988 (self.fromline, fromlen, self.toline, tolen,
988 self.proc and (' ' + self.proc)))
989 self.proc and (' ' + self.proc)))
989 fp.write(''.join(self.before + self.hunk + self.after))
990 fp.write(''.join(self.before + self.hunk + self.after))
990
991
991 pretty = write
992 pretty = write
992
993
993 def filename(self):
994 def filename(self):
994 return self.header.filename()
995 return self.header.filename()
995
996
996 def __repr__(self):
997 def __repr__(self):
997 return '<hunk %r@%d>' % (self.filename(), self.fromline)
998 return '<hunk %r@%d>' % (self.filename(), self.fromline)
998
999
999 def getmessages():
1000 def getmessages():
1000 return {
1001 return {
1001 'multiple': {
1002 'multiple': {
1002 'apply': _("apply change %d/%d to '%s'?"),
1003 'apply': _("apply change %d/%d to '%s'?"),
1003 'discard': _("discard change %d/%d to '%s'?"),
1004 'discard': _("discard change %d/%d to '%s'?"),
1004 'record': _("record change %d/%d to '%s'?"),
1005 'record': _("record change %d/%d to '%s'?"),
1005 },
1006 },
1006 'single': {
1007 'single': {
1007 'apply': _("apply this change to '%s'?"),
1008 'apply': _("apply this change to '%s'?"),
1008 'discard': _("discard this change to '%s'?"),
1009 'discard': _("discard this change to '%s'?"),
1009 'record': _("record this change to '%s'?"),
1010 'record': _("record this change to '%s'?"),
1010 },
1011 },
1011 'help': {
1012 'help': {
1012 'apply': _('[Ynesfdaq?]'
1013 'apply': _('[Ynesfdaq?]'
1013 '$$ &Yes, apply this change'
1014 '$$ &Yes, apply this change'
1014 '$$ &No, skip this change'
1015 '$$ &No, skip this change'
1015 '$$ &Edit this change manually'
1016 '$$ &Edit this change manually'
1016 '$$ &Skip remaining changes to this file'
1017 '$$ &Skip remaining changes to this file'
1017 '$$ Apply remaining changes to this &file'
1018 '$$ Apply remaining changes to this &file'
1018 '$$ &Done, skip remaining changes and files'
1019 '$$ &Done, skip remaining changes and files'
1019 '$$ Apply &all changes to all remaining files'
1020 '$$ Apply &all changes to all remaining files'
1020 '$$ &Quit, applying no changes'
1021 '$$ &Quit, applying no changes'
1021 '$$ &? (display help)'),
1022 '$$ &? (display help)'),
1022 'discard': _('[Ynesfdaq?]'
1023 'discard': _('[Ynesfdaq?]'
1023 '$$ &Yes, discard this change'
1024 '$$ &Yes, discard this change'
1024 '$$ &No, skip this change'
1025 '$$ &No, skip this change'
1025 '$$ &Edit this change manually'
1026 '$$ &Edit this change manually'
1026 '$$ &Skip remaining changes to this file'
1027 '$$ &Skip remaining changes to this file'
1027 '$$ Discard remaining changes to this &file'
1028 '$$ Discard remaining changes to this &file'
1028 '$$ &Done, skip remaining changes and files'
1029 '$$ &Done, skip remaining changes and files'
1029 '$$ Discard &all changes to all remaining files'
1030 '$$ Discard &all changes to all remaining files'
1030 '$$ &Quit, discarding no changes'
1031 '$$ &Quit, discarding no changes'
1031 '$$ &? (display help)'),
1032 '$$ &? (display help)'),
1032 'record': _('[Ynesfdaq?]'
1033 'record': _('[Ynesfdaq?]'
1033 '$$ &Yes, record this change'
1034 '$$ &Yes, record this change'
1034 '$$ &No, skip this change'
1035 '$$ &No, skip this change'
1035 '$$ &Edit this change manually'
1036 '$$ &Edit this change manually'
1036 '$$ &Skip remaining changes to this file'
1037 '$$ &Skip remaining changes to this file'
1037 '$$ Record remaining changes to this &file'
1038 '$$ Record remaining changes to this &file'
1038 '$$ &Done, skip remaining changes and files'
1039 '$$ &Done, skip remaining changes and files'
1039 '$$ Record &all changes to all remaining files'
1040 '$$ Record &all changes to all remaining files'
1040 '$$ &Quit, recording no changes'
1041 '$$ &Quit, recording no changes'
1041 '$$ &? (display help)'),
1042 '$$ &? (display help)'),
1042 }
1043 }
1043 }
1044 }
1044
1045
1045 def filterpatch(ui, headers, operation=None):
1046 def filterpatch(ui, headers, operation=None):
1046 """Interactively filter patch chunks into applied-only chunks"""
1047 """Interactively filter patch chunks into applied-only chunks"""
1047 messages = getmessages()
1048 messages = getmessages()
1048
1049
1049 if operation is None:
1050 if operation is None:
1050 operation = 'record'
1051 operation = 'record'
1051
1052
1052 def prompt(skipfile, skipall, query, chunk):
1053 def prompt(skipfile, skipall, query, chunk):
1053 """prompt query, and process base inputs
1054 """prompt query, and process base inputs
1054
1055
1055 - y/n for the rest of file
1056 - y/n for the rest of file
1056 - y/n for the rest
1057 - y/n for the rest
1057 - ? (help)
1058 - ? (help)
1058 - q (quit)
1059 - q (quit)
1059
1060
1060 Return True/False and possibly updated skipfile and skipall.
1061 Return True/False and possibly updated skipfile and skipall.
1061 """
1062 """
1062 newpatches = None
1063 newpatches = None
1063 if skipall is not None:
1064 if skipall is not None:
1064 return skipall, skipfile, skipall, newpatches
1065 return skipall, skipfile, skipall, newpatches
1065 if skipfile is not None:
1066 if skipfile is not None:
1066 return skipfile, skipfile, skipall, newpatches
1067 return skipfile, skipfile, skipall, newpatches
1067 while True:
1068 while True:
1068 resps = messages['help'][operation]
1069 resps = messages['help'][operation]
1069 r = ui.promptchoice("%s %s" % (query, resps))
1070 r = ui.promptchoice("%s %s" % (query, resps))
1070 ui.write("\n")
1071 ui.write("\n")
1071 if r == 8: # ?
1072 if r == 8: # ?
1072 for c, t in ui.extractchoices(resps)[1]:
1073 for c, t in ui.extractchoices(resps)[1]:
1073 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1074 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1074 continue
1075 continue
1075 elif r == 0: # yes
1076 elif r == 0: # yes
1076 ret = True
1077 ret = True
1077 elif r == 1: # no
1078 elif r == 1: # no
1078 ret = False
1079 ret = False
1079 elif r == 2: # Edit patch
1080 elif r == 2: # Edit patch
1080 if chunk is None:
1081 if chunk is None:
1081 ui.write(_('cannot edit patch for whole file'))
1082 ui.write(_('cannot edit patch for whole file'))
1082 ui.write("\n")
1083 ui.write("\n")
1083 continue
1084 continue
1084 if chunk.header.binary():
1085 if chunk.header.binary():
1085 ui.write(_('cannot edit patch for binary file'))
1086 ui.write(_('cannot edit patch for binary file'))
1086 ui.write("\n")
1087 ui.write("\n")
1087 continue
1088 continue
1088 # Patch comment based on the Git one (based on comment at end of
1089 # Patch comment based on the Git one (based on comment at end of
1089 # https://mercurial-scm.org/wiki/RecordExtension)
1090 # https://mercurial-scm.org/wiki/RecordExtension)
1090 phelp = '---' + _("""
1091 phelp = '---' + _("""
1091 To remove '-' lines, make them ' ' lines (context).
1092 To remove '-' lines, make them ' ' lines (context).
1092 To remove '+' lines, delete them.
1093 To remove '+' lines, delete them.
1093 Lines starting with # will be removed from the patch.
1094 Lines starting with # will be removed from the patch.
1094
1095
1095 If the patch applies cleanly, the edited hunk will immediately be
1096 If the patch applies cleanly, the edited hunk will immediately be
1096 added to the record list. If it does not apply cleanly, a rejects
1097 added to the record list. If it does not apply cleanly, a rejects
1097 file will be generated: you can use that when you try again. If
1098 file will be generated: you can use that when you try again. If
1098 all lines of the hunk are removed, then the edit is aborted and
1099 all lines of the hunk are removed, then the edit is aborted and
1099 the hunk is left unchanged.
1100 the hunk is left unchanged.
1100 """)
1101 """)
1101 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1102 (patchfd, patchfn) = tempfile.mkstemp(prefix="hg-editor-",
1102 suffix=".diff", text=True)
1103 suffix=".diff", text=True)
1103 ncpatchfp = None
1104 ncpatchfp = None
1104 try:
1105 try:
1105 # Write the initial patch
1106 # Write the initial patch
1106 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1107 f = os.fdopen(patchfd, pycompat.sysstr("w"))
1107 chunk.header.write(f)
1108 chunk.header.write(f)
1108 chunk.write(f)
1109 chunk.write(f)
1109 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1110 f.write('\n'.join(['# ' + i for i in phelp.splitlines()]))
1110 f.close()
1111 f.close()
1111 # Start the editor and wait for it to complete
1112 # Start the editor and wait for it to complete
1112 editor = ui.geteditor()
1113 editor = ui.geteditor()
1113 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1114 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1114 environ={'HGUSER': ui.username()},
1115 environ={'HGUSER': ui.username()},
1115 blockedtag='filterpatch')
1116 blockedtag='filterpatch')
1116 if ret != 0:
1117 if ret != 0:
1117 ui.warn(_("editor exited with exit code %d\n") % ret)
1118 ui.warn(_("editor exited with exit code %d\n") % ret)
1118 continue
1119 continue
1119 # Remove comment lines
1120 # Remove comment lines
1120 patchfp = open(patchfn)
1121 patchfp = open(patchfn)
1121 ncpatchfp = stringio()
1122 ncpatchfp = stringio()
1122 for line in util.iterfile(patchfp):
1123 for line in util.iterfile(patchfp):
1123 if not line.startswith('#'):
1124 if not line.startswith('#'):
1124 ncpatchfp.write(line)
1125 ncpatchfp.write(line)
1125 patchfp.close()
1126 patchfp.close()
1126 ncpatchfp.seek(0)
1127 ncpatchfp.seek(0)
1127 newpatches = parsepatch(ncpatchfp)
1128 newpatches = parsepatch(ncpatchfp)
1128 finally:
1129 finally:
1129 os.unlink(patchfn)
1130 os.unlink(patchfn)
1130 del ncpatchfp
1131 del ncpatchfp
1131 # Signal that the chunk shouldn't be applied as-is, but
1132 # Signal that the chunk shouldn't be applied as-is, but
1132 # provide the new patch to be used instead.
1133 # provide the new patch to be used instead.
1133 ret = False
1134 ret = False
1134 elif r == 3: # Skip
1135 elif r == 3: # Skip
1135 ret = skipfile = False
1136 ret = skipfile = False
1136 elif r == 4: # file (Record remaining)
1137 elif r == 4: # file (Record remaining)
1137 ret = skipfile = True
1138 ret = skipfile = True
1138 elif r == 5: # done, skip remaining
1139 elif r == 5: # done, skip remaining
1139 ret = skipall = False
1140 ret = skipall = False
1140 elif r == 6: # all
1141 elif r == 6: # all
1141 ret = skipall = True
1142 ret = skipall = True
1142 elif r == 7: # quit
1143 elif r == 7: # quit
1143 raise error.Abort(_('user quit'))
1144 raise error.Abort(_('user quit'))
1144 return ret, skipfile, skipall, newpatches
1145 return ret, skipfile, skipall, newpatches
1145
1146
1146 seen = set()
1147 seen = set()
1147 applied = {} # 'filename' -> [] of chunks
1148 applied = {} # 'filename' -> [] of chunks
1148 skipfile, skipall = None, None
1149 skipfile, skipall = None, None
1149 pos, total = 1, sum(len(h.hunks) for h in headers)
1150 pos, total = 1, sum(len(h.hunks) for h in headers)
1150 for h in headers:
1151 for h in headers:
1151 pos += len(h.hunks)
1152 pos += len(h.hunks)
1152 skipfile = None
1153 skipfile = None
1153 fixoffset = 0
1154 fixoffset = 0
1154 hdr = ''.join(h.header)
1155 hdr = ''.join(h.header)
1155 if hdr in seen:
1156 if hdr in seen:
1156 continue
1157 continue
1157 seen.add(hdr)
1158 seen.add(hdr)
1158 if skipall is None:
1159 if skipall is None:
1159 h.pretty(ui)
1160 h.pretty(ui)
1160 msg = (_('examine changes to %s?') %
1161 msg = (_('examine changes to %s?') %
1161 _(' and ').join("'%s'" % f for f in h.files()))
1162 _(' and ').join("'%s'" % f for f in h.files()))
1162 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1163 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1163 if not r:
1164 if not r:
1164 continue
1165 continue
1165 applied[h.filename()] = [h]
1166 applied[h.filename()] = [h]
1166 if h.allhunks():
1167 if h.allhunks():
1167 applied[h.filename()] += h.hunks
1168 applied[h.filename()] += h.hunks
1168 continue
1169 continue
1169 for i, chunk in enumerate(h.hunks):
1170 for i, chunk in enumerate(h.hunks):
1170 if skipfile is None and skipall is None:
1171 if skipfile is None and skipall is None:
1171 chunk.pretty(ui)
1172 chunk.pretty(ui)
1172 if total == 1:
1173 if total == 1:
1173 msg = messages['single'][operation] % chunk.filename()
1174 msg = messages['single'][operation] % chunk.filename()
1174 else:
1175 else:
1175 idx = pos - len(h.hunks) + i
1176 idx = pos - len(h.hunks) + i
1176 msg = messages['multiple'][operation] % (idx, total,
1177 msg = messages['multiple'][operation] % (idx, total,
1177 chunk.filename())
1178 chunk.filename())
1178 r, skipfile, skipall, newpatches = prompt(skipfile,
1179 r, skipfile, skipall, newpatches = prompt(skipfile,
1179 skipall, msg, chunk)
1180 skipall, msg, chunk)
1180 if r:
1181 if r:
1181 if fixoffset:
1182 if fixoffset:
1182 chunk = copy.copy(chunk)
1183 chunk = copy.copy(chunk)
1183 chunk.toline += fixoffset
1184 chunk.toline += fixoffset
1184 applied[chunk.filename()].append(chunk)
1185 applied[chunk.filename()].append(chunk)
1185 elif newpatches is not None:
1186 elif newpatches is not None:
1186 for newpatch in newpatches:
1187 for newpatch in newpatches:
1187 for newhunk in newpatch.hunks:
1188 for newhunk in newpatch.hunks:
1188 if fixoffset:
1189 if fixoffset:
1189 newhunk.toline += fixoffset
1190 newhunk.toline += fixoffset
1190 applied[newhunk.filename()].append(newhunk)
1191 applied[newhunk.filename()].append(newhunk)
1191 else:
1192 else:
1192 fixoffset += chunk.removed - chunk.added
1193 fixoffset += chunk.removed - chunk.added
1193 return (sum([h for h in applied.itervalues()
1194 return (sum([h for h in applied.itervalues()
1194 if h[0].special() or len(h) > 1], []), {})
1195 if h[0].special() or len(h) > 1], []), {})
1195 class hunk(object):
1196 class hunk(object):
1196 def __init__(self, desc, num, lr, context):
1197 def __init__(self, desc, num, lr, context):
1197 self.number = num
1198 self.number = num
1198 self.desc = desc
1199 self.desc = desc
1199 self.hunk = [desc]
1200 self.hunk = [desc]
1200 self.a = []
1201 self.a = []
1201 self.b = []
1202 self.b = []
1202 self.starta = self.lena = None
1203 self.starta = self.lena = None
1203 self.startb = self.lenb = None
1204 self.startb = self.lenb = None
1204 if lr is not None:
1205 if lr is not None:
1205 if context:
1206 if context:
1206 self.read_context_hunk(lr)
1207 self.read_context_hunk(lr)
1207 else:
1208 else:
1208 self.read_unified_hunk(lr)
1209 self.read_unified_hunk(lr)
1209
1210
1210 def getnormalized(self):
1211 def getnormalized(self):
1211 """Return a copy with line endings normalized to LF."""
1212 """Return a copy with line endings normalized to LF."""
1212
1213
1213 def normalize(lines):
1214 def normalize(lines):
1214 nlines = []
1215 nlines = []
1215 for line in lines:
1216 for line in lines:
1216 if line.endswith('\r\n'):
1217 if line.endswith('\r\n'):
1217 line = line[:-2] + '\n'
1218 line = line[:-2] + '\n'
1218 nlines.append(line)
1219 nlines.append(line)
1219 return nlines
1220 return nlines
1220
1221
1221 # Dummy object, it is rebuilt manually
1222 # Dummy object, it is rebuilt manually
1222 nh = hunk(self.desc, self.number, None, None)
1223 nh = hunk(self.desc, self.number, None, None)
1223 nh.number = self.number
1224 nh.number = self.number
1224 nh.desc = self.desc
1225 nh.desc = self.desc
1225 nh.hunk = self.hunk
1226 nh.hunk = self.hunk
1226 nh.a = normalize(self.a)
1227 nh.a = normalize(self.a)
1227 nh.b = normalize(self.b)
1228 nh.b = normalize(self.b)
1228 nh.starta = self.starta
1229 nh.starta = self.starta
1229 nh.startb = self.startb
1230 nh.startb = self.startb
1230 nh.lena = self.lena
1231 nh.lena = self.lena
1231 nh.lenb = self.lenb
1232 nh.lenb = self.lenb
1232 return nh
1233 return nh
1233
1234
1234 def read_unified_hunk(self, lr):
1235 def read_unified_hunk(self, lr):
1235 m = unidesc.match(self.desc)
1236 m = unidesc.match(self.desc)
1236 if not m:
1237 if not m:
1237 raise PatchError(_("bad hunk #%d") % self.number)
1238 raise PatchError(_("bad hunk #%d") % self.number)
1238 self.starta, self.lena, self.startb, self.lenb = m.groups()
1239 self.starta, self.lena, self.startb, self.lenb = m.groups()
1239 if self.lena is None:
1240 if self.lena is None:
1240 self.lena = 1
1241 self.lena = 1
1241 else:
1242 else:
1242 self.lena = int(self.lena)
1243 self.lena = int(self.lena)
1243 if self.lenb is None:
1244 if self.lenb is None:
1244 self.lenb = 1
1245 self.lenb = 1
1245 else:
1246 else:
1246 self.lenb = int(self.lenb)
1247 self.lenb = int(self.lenb)
1247 self.starta = int(self.starta)
1248 self.starta = int(self.starta)
1248 self.startb = int(self.startb)
1249 self.startb = int(self.startb)
1249 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1250 diffhelpers.addlines(lr, self.hunk, self.lena, self.lenb, self.a,
1250 self.b)
1251 self.b)
1251 # if we hit eof before finishing out the hunk, the last line will
1252 # if we hit eof before finishing out the hunk, the last line will
1252 # be zero length. Lets try to fix it up.
1253 # be zero length. Lets try to fix it up.
1253 while len(self.hunk[-1]) == 0:
1254 while len(self.hunk[-1]) == 0:
1254 del self.hunk[-1]
1255 del self.hunk[-1]
1255 del self.a[-1]
1256 del self.a[-1]
1256 del self.b[-1]
1257 del self.b[-1]
1257 self.lena -= 1
1258 self.lena -= 1
1258 self.lenb -= 1
1259 self.lenb -= 1
1259 self._fixnewline(lr)
1260 self._fixnewline(lr)
1260
1261
1261 def read_context_hunk(self, lr):
1262 def read_context_hunk(self, lr):
1262 self.desc = lr.readline()
1263 self.desc = lr.readline()
1263 m = contextdesc.match(self.desc)
1264 m = contextdesc.match(self.desc)
1264 if not m:
1265 if not m:
1265 raise PatchError(_("bad hunk #%d") % self.number)
1266 raise PatchError(_("bad hunk #%d") % self.number)
1266 self.starta, aend = m.groups()
1267 self.starta, aend = m.groups()
1267 self.starta = int(self.starta)
1268 self.starta = int(self.starta)
1268 if aend is None:
1269 if aend is None:
1269 aend = self.starta
1270 aend = self.starta
1270 self.lena = int(aend) - self.starta
1271 self.lena = int(aend) - self.starta
1271 if self.starta:
1272 if self.starta:
1272 self.lena += 1
1273 self.lena += 1
1273 for x in xrange(self.lena):
1274 for x in xrange(self.lena):
1274 l = lr.readline()
1275 l = lr.readline()
1275 if l.startswith('---'):
1276 if l.startswith('---'):
1276 # lines addition, old block is empty
1277 # lines addition, old block is empty
1277 lr.push(l)
1278 lr.push(l)
1278 break
1279 break
1279 s = l[2:]
1280 s = l[2:]
1280 if l.startswith('- ') or l.startswith('! '):
1281 if l.startswith('- ') or l.startswith('! '):
1281 u = '-' + s
1282 u = '-' + s
1282 elif l.startswith(' '):
1283 elif l.startswith(' '):
1283 u = ' ' + s
1284 u = ' ' + s
1284 else:
1285 else:
1285 raise PatchError(_("bad hunk #%d old text line %d") %
1286 raise PatchError(_("bad hunk #%d old text line %d") %
1286 (self.number, x))
1287 (self.number, x))
1287 self.a.append(u)
1288 self.a.append(u)
1288 self.hunk.append(u)
1289 self.hunk.append(u)
1289
1290
1290 l = lr.readline()
1291 l = lr.readline()
1291 if l.startswith('\ '):
1292 if l.startswith('\ '):
1292 s = self.a[-1][:-1]
1293 s = self.a[-1][:-1]
1293 self.a[-1] = s
1294 self.a[-1] = s
1294 self.hunk[-1] = s
1295 self.hunk[-1] = s
1295 l = lr.readline()
1296 l = lr.readline()
1296 m = contextdesc.match(l)
1297 m = contextdesc.match(l)
1297 if not m:
1298 if not m:
1298 raise PatchError(_("bad hunk #%d") % self.number)
1299 raise PatchError(_("bad hunk #%d") % self.number)
1299 self.startb, bend = m.groups()
1300 self.startb, bend = m.groups()
1300 self.startb = int(self.startb)
1301 self.startb = int(self.startb)
1301 if bend is None:
1302 if bend is None:
1302 bend = self.startb
1303 bend = self.startb
1303 self.lenb = int(bend) - self.startb
1304 self.lenb = int(bend) - self.startb
1304 if self.startb:
1305 if self.startb:
1305 self.lenb += 1
1306 self.lenb += 1
1306 hunki = 1
1307 hunki = 1
1307 for x in xrange(self.lenb):
1308 for x in xrange(self.lenb):
1308 l = lr.readline()
1309 l = lr.readline()
1309 if l.startswith('\ '):
1310 if l.startswith('\ '):
1310 # XXX: the only way to hit this is with an invalid line range.
1311 # XXX: the only way to hit this is with an invalid line range.
1311 # The no-eol marker is not counted in the line range, but I
1312 # The no-eol marker is not counted in the line range, but I
1312 # guess there are diff(1) out there which behave differently.
1313 # guess there are diff(1) out there which behave differently.
1313 s = self.b[-1][:-1]
1314 s = self.b[-1][:-1]
1314 self.b[-1] = s
1315 self.b[-1] = s
1315 self.hunk[hunki - 1] = s
1316 self.hunk[hunki - 1] = s
1316 continue
1317 continue
1317 if not l:
1318 if not l:
1318 # line deletions, new block is empty and we hit EOF
1319 # line deletions, new block is empty and we hit EOF
1319 lr.push(l)
1320 lr.push(l)
1320 break
1321 break
1321 s = l[2:]
1322 s = l[2:]
1322 if l.startswith('+ ') or l.startswith('! '):
1323 if l.startswith('+ ') or l.startswith('! '):
1323 u = '+' + s
1324 u = '+' + s
1324 elif l.startswith(' '):
1325 elif l.startswith(' '):
1325 u = ' ' + s
1326 u = ' ' + s
1326 elif len(self.b) == 0:
1327 elif len(self.b) == 0:
1327 # line deletions, new block is empty
1328 # line deletions, new block is empty
1328 lr.push(l)
1329 lr.push(l)
1329 break
1330 break
1330 else:
1331 else:
1331 raise PatchError(_("bad hunk #%d old text line %d") %
1332 raise PatchError(_("bad hunk #%d old text line %d") %
1332 (self.number, x))
1333 (self.number, x))
1333 self.b.append(s)
1334 self.b.append(s)
1334 while True:
1335 while True:
1335 if hunki >= len(self.hunk):
1336 if hunki >= len(self.hunk):
1336 h = ""
1337 h = ""
1337 else:
1338 else:
1338 h = self.hunk[hunki]
1339 h = self.hunk[hunki]
1339 hunki += 1
1340 hunki += 1
1340 if h == u:
1341 if h == u:
1341 break
1342 break
1342 elif h.startswith('-'):
1343 elif h.startswith('-'):
1343 continue
1344 continue
1344 else:
1345 else:
1345 self.hunk.insert(hunki - 1, u)
1346 self.hunk.insert(hunki - 1, u)
1346 break
1347 break
1347
1348
1348 if not self.a:
1349 if not self.a:
1349 # this happens when lines were only added to the hunk
1350 # this happens when lines were only added to the hunk
1350 for x in self.hunk:
1351 for x in self.hunk:
1351 if x.startswith('-') or x.startswith(' '):
1352 if x.startswith('-') or x.startswith(' '):
1352 self.a.append(x)
1353 self.a.append(x)
1353 if not self.b:
1354 if not self.b:
1354 # this happens when lines were only deleted from the hunk
1355 # this happens when lines were only deleted from the hunk
1355 for x in self.hunk:
1356 for x in self.hunk:
1356 if x.startswith('+') or x.startswith(' '):
1357 if x.startswith('+') or x.startswith(' '):
1357 self.b.append(x[1:])
1358 self.b.append(x[1:])
1358 # @@ -start,len +start,len @@
1359 # @@ -start,len +start,len @@
1359 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1360 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1360 self.startb, self.lenb)
1361 self.startb, self.lenb)
1361 self.hunk[0] = self.desc
1362 self.hunk[0] = self.desc
1362 self._fixnewline(lr)
1363 self._fixnewline(lr)
1363
1364
1364 def _fixnewline(self, lr):
1365 def _fixnewline(self, lr):
1365 l = lr.readline()
1366 l = lr.readline()
1366 if l.startswith('\ '):
1367 if l.startswith('\ '):
1367 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1368 diffhelpers.fix_newline(self.hunk, self.a, self.b)
1368 else:
1369 else:
1369 lr.push(l)
1370 lr.push(l)
1370
1371
1371 def complete(self):
1372 def complete(self):
1372 return len(self.a) == self.lena and len(self.b) == self.lenb
1373 return len(self.a) == self.lena and len(self.b) == self.lenb
1373
1374
1374 def _fuzzit(self, old, new, fuzz, toponly):
1375 def _fuzzit(self, old, new, fuzz, toponly):
1375 # this removes context lines from the top and bottom of list 'l'. It
1376 # this removes context lines from the top and bottom of list 'l'. It
1376 # checks the hunk to make sure only context lines are removed, and then
1377 # checks the hunk to make sure only context lines are removed, and then
1377 # returns a new shortened list of lines.
1378 # returns a new shortened list of lines.
1378 fuzz = min(fuzz, len(old))
1379 fuzz = min(fuzz, len(old))
1379 if fuzz:
1380 if fuzz:
1380 top = 0
1381 top = 0
1381 bot = 0
1382 bot = 0
1382 hlen = len(self.hunk)
1383 hlen = len(self.hunk)
1383 for x in xrange(hlen - 1):
1384 for x in xrange(hlen - 1):
1384 # the hunk starts with the @@ line, so use x+1
1385 # the hunk starts with the @@ line, so use x+1
1385 if self.hunk[x + 1][0] == ' ':
1386 if self.hunk[x + 1][0] == ' ':
1386 top += 1
1387 top += 1
1387 else:
1388 else:
1388 break
1389 break
1389 if not toponly:
1390 if not toponly:
1390 for x in xrange(hlen - 1):
1391 for x in xrange(hlen - 1):
1391 if self.hunk[hlen - bot - 1][0] == ' ':
1392 if self.hunk[hlen - bot - 1][0] == ' ':
1392 bot += 1
1393 bot += 1
1393 else:
1394 else:
1394 break
1395 break
1395
1396
1396 bot = min(fuzz, bot)
1397 bot = min(fuzz, bot)
1397 top = min(fuzz, top)
1398 top = min(fuzz, top)
1398 return old[top:len(old) - bot], new[top:len(new) - bot], top
1399 return old[top:len(old) - bot], new[top:len(new) - bot], top
1399 return old, new, 0
1400 return old, new, 0
1400
1401
1401 def fuzzit(self, fuzz, toponly):
1402 def fuzzit(self, fuzz, toponly):
1402 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1403 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1403 oldstart = self.starta + top
1404 oldstart = self.starta + top
1404 newstart = self.startb + top
1405 newstart = self.startb + top
1405 # zero length hunk ranges already have their start decremented
1406 # zero length hunk ranges already have their start decremented
1406 if self.lena and oldstart > 0:
1407 if self.lena and oldstart > 0:
1407 oldstart -= 1
1408 oldstart -= 1
1408 if self.lenb and newstart > 0:
1409 if self.lenb and newstart > 0:
1409 newstart -= 1
1410 newstart -= 1
1410 return old, oldstart, new, newstart
1411 return old, oldstart, new, newstart
1411
1412
1412 class binhunk(object):
1413 class binhunk(object):
1413 'A binary patch file.'
1414 'A binary patch file.'
1414 def __init__(self, lr, fname):
1415 def __init__(self, lr, fname):
1415 self.text = None
1416 self.text = None
1416 self.delta = False
1417 self.delta = False
1417 self.hunk = ['GIT binary patch\n']
1418 self.hunk = ['GIT binary patch\n']
1418 self._fname = fname
1419 self._fname = fname
1419 self._read(lr)
1420 self._read(lr)
1420
1421
1421 def complete(self):
1422 def complete(self):
1422 return self.text is not None
1423 return self.text is not None
1423
1424
1424 def new(self, lines):
1425 def new(self, lines):
1425 if self.delta:
1426 if self.delta:
1426 return [applybindelta(self.text, ''.join(lines))]
1427 return [applybindelta(self.text, ''.join(lines))]
1427 return [self.text]
1428 return [self.text]
1428
1429
1429 def _read(self, lr):
1430 def _read(self, lr):
1430 def getline(lr, hunk):
1431 def getline(lr, hunk):
1431 l = lr.readline()
1432 l = lr.readline()
1432 hunk.append(l)
1433 hunk.append(l)
1433 return l.rstrip('\r\n')
1434 return l.rstrip('\r\n')
1434
1435
1435 size = 0
1436 size = 0
1436 while True:
1437 while True:
1437 line = getline(lr, self.hunk)
1438 line = getline(lr, self.hunk)
1438 if not line:
1439 if not line:
1439 raise PatchError(_('could not extract "%s" binary data')
1440 raise PatchError(_('could not extract "%s" binary data')
1440 % self._fname)
1441 % self._fname)
1441 if line.startswith('literal '):
1442 if line.startswith('literal '):
1442 size = int(line[8:].rstrip())
1443 size = int(line[8:].rstrip())
1443 break
1444 break
1444 if line.startswith('delta '):
1445 if line.startswith('delta '):
1445 size = int(line[6:].rstrip())
1446 size = int(line[6:].rstrip())
1446 self.delta = True
1447 self.delta = True
1447 break
1448 break
1448 dec = []
1449 dec = []
1449 line = getline(lr, self.hunk)
1450 line = getline(lr, self.hunk)
1450 while len(line) > 1:
1451 while len(line) > 1:
1451 l = line[0]
1452 l = line[0]
1452 if l <= 'Z' and l >= 'A':
1453 if l <= 'Z' and l >= 'A':
1453 l = ord(l) - ord('A') + 1
1454 l = ord(l) - ord('A') + 1
1454 else:
1455 else:
1455 l = ord(l) - ord('a') + 27
1456 l = ord(l) - ord('a') + 27
1456 try:
1457 try:
1457 dec.append(util.b85decode(line[1:])[:l])
1458 dec.append(util.b85decode(line[1:])[:l])
1458 except ValueError as e:
1459 except ValueError as e:
1459 raise PatchError(_('could not decode "%s" binary patch: %s')
1460 raise PatchError(_('could not decode "%s" binary patch: %s')
1460 % (self._fname, str(e)))
1461 % (self._fname, str(e)))
1461 line = getline(lr, self.hunk)
1462 line = getline(lr, self.hunk)
1462 text = zlib.decompress(''.join(dec))
1463 text = zlib.decompress(''.join(dec))
1463 if len(text) != size:
1464 if len(text) != size:
1464 raise PatchError(_('"%s" length is %d bytes, should be %d')
1465 raise PatchError(_('"%s" length is %d bytes, should be %d')
1465 % (self._fname, len(text), size))
1466 % (self._fname, len(text), size))
1466 self.text = text
1467 self.text = text
1467
1468
1468 def parsefilename(str):
1469 def parsefilename(str):
1469 # --- filename \t|space stuff
1470 # --- filename \t|space stuff
1470 s = str[4:].rstrip('\r\n')
1471 s = str[4:].rstrip('\r\n')
1471 i = s.find('\t')
1472 i = s.find('\t')
1472 if i < 0:
1473 if i < 0:
1473 i = s.find(' ')
1474 i = s.find(' ')
1474 if i < 0:
1475 if i < 0:
1475 return s
1476 return s
1476 return s[:i]
1477 return s[:i]
1477
1478
1478 def reversehunks(hunks):
1479 def reversehunks(hunks):
1479 '''reverse the signs in the hunks given as argument
1480 '''reverse the signs in the hunks given as argument
1480
1481
1481 This function operates on hunks coming out of patch.filterpatch, that is
1482 This function operates on hunks coming out of patch.filterpatch, that is
1482 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1483 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1483
1484
1484 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1485 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1485 ... --- a/folder1/g
1486 ... --- a/folder1/g
1486 ... +++ b/folder1/g
1487 ... +++ b/folder1/g
1487 ... @@ -1,7 +1,7 @@
1488 ... @@ -1,7 +1,7 @@
1488 ... +firstline
1489 ... +firstline
1489 ... c
1490 ... c
1490 ... 1
1491 ... 1
1491 ... 2
1492 ... 2
1492 ... + 3
1493 ... + 3
1493 ... -4
1494 ... -4
1494 ... 5
1495 ... 5
1495 ... d
1496 ... d
1496 ... +lastline"""
1497 ... +lastline"""
1497 >>> hunks = parsepatch([rawpatch])
1498 >>> hunks = parsepatch([rawpatch])
1498 >>> hunkscomingfromfilterpatch = []
1499 >>> hunkscomingfromfilterpatch = []
1499 >>> for h in hunks:
1500 >>> for h in hunks:
1500 ... hunkscomingfromfilterpatch.append(h)
1501 ... hunkscomingfromfilterpatch.append(h)
1501 ... hunkscomingfromfilterpatch.extend(h.hunks)
1502 ... hunkscomingfromfilterpatch.extend(h.hunks)
1502
1503
1503 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1504 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1504 >>> from . import util
1505 >>> from . import util
1505 >>> fp = util.stringio()
1506 >>> fp = util.stringio()
1506 >>> for c in reversedhunks:
1507 >>> for c in reversedhunks:
1507 ... c.write(fp)
1508 ... c.write(fp)
1508 >>> fp.seek(0) or None
1509 >>> fp.seek(0) or None
1509 >>> reversedpatch = fp.read()
1510 >>> reversedpatch = fp.read()
1510 >>> print(pycompat.sysstr(reversedpatch))
1511 >>> print(pycompat.sysstr(reversedpatch))
1511 diff --git a/folder1/g b/folder1/g
1512 diff --git a/folder1/g b/folder1/g
1512 --- a/folder1/g
1513 --- a/folder1/g
1513 +++ b/folder1/g
1514 +++ b/folder1/g
1514 @@ -1,4 +1,3 @@
1515 @@ -1,4 +1,3 @@
1515 -firstline
1516 -firstline
1516 c
1517 c
1517 1
1518 1
1518 2
1519 2
1519 @@ -2,6 +1,6 @@
1520 @@ -2,6 +1,6 @@
1520 c
1521 c
1521 1
1522 1
1522 2
1523 2
1523 - 3
1524 - 3
1524 +4
1525 +4
1525 5
1526 5
1526 d
1527 d
1527 @@ -6,3 +5,2 @@
1528 @@ -6,3 +5,2 @@
1528 5
1529 5
1529 d
1530 d
1530 -lastline
1531 -lastline
1531
1532
1532 '''
1533 '''
1533
1534
1534 newhunks = []
1535 newhunks = []
1535 for c in hunks:
1536 for c in hunks:
1536 if util.safehasattr(c, 'reversehunk'):
1537 if util.safehasattr(c, 'reversehunk'):
1537 c = c.reversehunk()
1538 c = c.reversehunk()
1538 newhunks.append(c)
1539 newhunks.append(c)
1539 return newhunks
1540 return newhunks
1540
1541
1541 def parsepatch(originalchunks, maxcontext=None):
1542 def parsepatch(originalchunks, maxcontext=None):
1542 """patch -> [] of headers -> [] of hunks
1543 """patch -> [] of headers -> [] of hunks
1543
1544
1544 If maxcontext is not None, trim context lines if necessary.
1545 If maxcontext is not None, trim context lines if necessary.
1545
1546
1546 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1547 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1547 ... --- a/folder1/g
1548 ... --- a/folder1/g
1548 ... +++ b/folder1/g
1549 ... +++ b/folder1/g
1549 ... @@ -1,8 +1,10 @@
1550 ... @@ -1,8 +1,10 @@
1550 ... 1
1551 ... 1
1551 ... 2
1552 ... 2
1552 ... -3
1553 ... -3
1553 ... 4
1554 ... 4
1554 ... 5
1555 ... 5
1555 ... 6
1556 ... 6
1556 ... +6.1
1557 ... +6.1
1557 ... +6.2
1558 ... +6.2
1558 ... 7
1559 ... 7
1559 ... 8
1560 ... 8
1560 ... +9'''
1561 ... +9'''
1561 >>> out = util.stringio()
1562 >>> out = util.stringio()
1562 >>> headers = parsepatch([rawpatch], maxcontext=1)
1563 >>> headers = parsepatch([rawpatch], maxcontext=1)
1563 >>> for header in headers:
1564 >>> for header in headers:
1564 ... header.write(out)
1565 ... header.write(out)
1565 ... for hunk in header.hunks:
1566 ... for hunk in header.hunks:
1566 ... hunk.write(out)
1567 ... hunk.write(out)
1567 >>> print(pycompat.sysstr(out.getvalue()))
1568 >>> print(pycompat.sysstr(out.getvalue()))
1568 diff --git a/folder1/g b/folder1/g
1569 diff --git a/folder1/g b/folder1/g
1569 --- a/folder1/g
1570 --- a/folder1/g
1570 +++ b/folder1/g
1571 +++ b/folder1/g
1571 @@ -2,3 +2,2 @@
1572 @@ -2,3 +2,2 @@
1572 2
1573 2
1573 -3
1574 -3
1574 4
1575 4
1575 @@ -6,2 +5,4 @@
1576 @@ -6,2 +5,4 @@
1576 6
1577 6
1577 +6.1
1578 +6.1
1578 +6.2
1579 +6.2
1579 7
1580 7
1580 @@ -8,1 +9,2 @@
1581 @@ -8,1 +9,2 @@
1581 8
1582 8
1582 +9
1583 +9
1583 """
1584 """
1584 class parser(object):
1585 class parser(object):
1585 """patch parsing state machine"""
1586 """patch parsing state machine"""
1586 def __init__(self):
1587 def __init__(self):
1587 self.fromline = 0
1588 self.fromline = 0
1588 self.toline = 0
1589 self.toline = 0
1589 self.proc = ''
1590 self.proc = ''
1590 self.header = None
1591 self.header = None
1591 self.context = []
1592 self.context = []
1592 self.before = []
1593 self.before = []
1593 self.hunk = []
1594 self.hunk = []
1594 self.headers = []
1595 self.headers = []
1595
1596
1596 def addrange(self, limits):
1597 def addrange(self, limits):
1597 fromstart, fromend, tostart, toend, proc = limits
1598 fromstart, fromend, tostart, toend, proc = limits
1598 self.fromline = int(fromstart)
1599 self.fromline = int(fromstart)
1599 self.toline = int(tostart)
1600 self.toline = int(tostart)
1600 self.proc = proc
1601 self.proc = proc
1601
1602
1602 def addcontext(self, context):
1603 def addcontext(self, context):
1603 if self.hunk:
1604 if self.hunk:
1604 h = recordhunk(self.header, self.fromline, self.toline,
1605 h = recordhunk(self.header, self.fromline, self.toline,
1605 self.proc, self.before, self.hunk, context, maxcontext)
1606 self.proc, self.before, self.hunk, context, maxcontext)
1606 self.header.hunks.append(h)
1607 self.header.hunks.append(h)
1607 self.fromline += len(self.before) + h.removed
1608 self.fromline += len(self.before) + h.removed
1608 self.toline += len(self.before) + h.added
1609 self.toline += len(self.before) + h.added
1609 self.before = []
1610 self.before = []
1610 self.hunk = []
1611 self.hunk = []
1611 self.context = context
1612 self.context = context
1612
1613
1613 def addhunk(self, hunk):
1614 def addhunk(self, hunk):
1614 if self.context:
1615 if self.context:
1615 self.before = self.context
1616 self.before = self.context
1616 self.context = []
1617 self.context = []
1617 self.hunk = hunk
1618 self.hunk = hunk
1618
1619
1619 def newfile(self, hdr):
1620 def newfile(self, hdr):
1620 self.addcontext([])
1621 self.addcontext([])
1621 h = header(hdr)
1622 h = header(hdr)
1622 self.headers.append(h)
1623 self.headers.append(h)
1623 self.header = h
1624 self.header = h
1624
1625
1625 def addother(self, line):
1626 def addother(self, line):
1626 pass # 'other' lines are ignored
1627 pass # 'other' lines are ignored
1627
1628
1628 def finished(self):
1629 def finished(self):
1629 self.addcontext([])
1630 self.addcontext([])
1630 return self.headers
1631 return self.headers
1631
1632
1632 transitions = {
1633 transitions = {
1633 'file': {'context': addcontext,
1634 'file': {'context': addcontext,
1634 'file': newfile,
1635 'file': newfile,
1635 'hunk': addhunk,
1636 'hunk': addhunk,
1636 'range': addrange},
1637 'range': addrange},
1637 'context': {'file': newfile,
1638 'context': {'file': newfile,
1638 'hunk': addhunk,
1639 'hunk': addhunk,
1639 'range': addrange,
1640 'range': addrange,
1640 'other': addother},
1641 'other': addother},
1641 'hunk': {'context': addcontext,
1642 'hunk': {'context': addcontext,
1642 'file': newfile,
1643 'file': newfile,
1643 'range': addrange},
1644 'range': addrange},
1644 'range': {'context': addcontext,
1645 'range': {'context': addcontext,
1645 'hunk': addhunk},
1646 'hunk': addhunk},
1646 'other': {'other': addother},
1647 'other': {'other': addother},
1647 }
1648 }
1648
1649
1649 p = parser()
1650 p = parser()
1650 fp = stringio()
1651 fp = stringio()
1651 fp.write(''.join(originalchunks))
1652 fp.write(''.join(originalchunks))
1652 fp.seek(0)
1653 fp.seek(0)
1653
1654
1654 state = 'context'
1655 state = 'context'
1655 for newstate, data in scanpatch(fp):
1656 for newstate, data in scanpatch(fp):
1656 try:
1657 try:
1657 p.transitions[state][newstate](p, data)
1658 p.transitions[state][newstate](p, data)
1658 except KeyError:
1659 except KeyError:
1659 raise PatchError('unhandled transition: %s -> %s' %
1660 raise PatchError('unhandled transition: %s -> %s' %
1660 (state, newstate))
1661 (state, newstate))
1661 state = newstate
1662 state = newstate
1662 del fp
1663 del fp
1663 return p.finished()
1664 return p.finished()
1664
1665
1665 def pathtransform(path, strip, prefix):
1666 def pathtransform(path, strip, prefix):
1666 '''turn a path from a patch into a path suitable for the repository
1667 '''turn a path from a patch into a path suitable for the repository
1667
1668
1668 prefix, if not empty, is expected to be normalized with a / at the end.
1669 prefix, if not empty, is expected to be normalized with a / at the end.
1669
1670
1670 Returns (stripped components, path in repository).
1671 Returns (stripped components, path in repository).
1671
1672
1672 >>> pathtransform(b'a/b/c', 0, b'')
1673 >>> pathtransform(b'a/b/c', 0, b'')
1673 ('', 'a/b/c')
1674 ('', 'a/b/c')
1674 >>> pathtransform(b' a/b/c ', 0, b'')
1675 >>> pathtransform(b' a/b/c ', 0, b'')
1675 ('', ' a/b/c')
1676 ('', ' a/b/c')
1676 >>> pathtransform(b' a/b/c ', 2, b'')
1677 >>> pathtransform(b' a/b/c ', 2, b'')
1677 ('a/b/', 'c')
1678 ('a/b/', 'c')
1678 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1679 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1679 ('', 'd/e/a/b/c')
1680 ('', 'd/e/a/b/c')
1680 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1681 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1681 ('a//b/', 'd/e/c')
1682 ('a//b/', 'd/e/c')
1682 >>> pathtransform(b'a/b/c', 3, b'')
1683 >>> pathtransform(b'a/b/c', 3, b'')
1683 Traceback (most recent call last):
1684 Traceback (most recent call last):
1684 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1685 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1685 '''
1686 '''
1686 pathlen = len(path)
1687 pathlen = len(path)
1687 i = 0
1688 i = 0
1688 if strip == 0:
1689 if strip == 0:
1689 return '', prefix + path.rstrip()
1690 return '', prefix + path.rstrip()
1690 count = strip
1691 count = strip
1691 while count > 0:
1692 while count > 0:
1692 i = path.find('/', i)
1693 i = path.find('/', i)
1693 if i == -1:
1694 if i == -1:
1694 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1695 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1695 (count, strip, path))
1696 (count, strip, path))
1696 i += 1
1697 i += 1
1697 # consume '//' in the path
1698 # consume '//' in the path
1698 while i < pathlen - 1 and path[i:i + 1] == '/':
1699 while i < pathlen - 1 and path[i:i + 1] == '/':
1699 i += 1
1700 i += 1
1700 count -= 1
1701 count -= 1
1701 return path[:i].lstrip(), prefix + path[i:].rstrip()
1702 return path[:i].lstrip(), prefix + path[i:].rstrip()
1702
1703
1703 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1704 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1704 nulla = afile_orig == "/dev/null"
1705 nulla = afile_orig == "/dev/null"
1705 nullb = bfile_orig == "/dev/null"
1706 nullb = bfile_orig == "/dev/null"
1706 create = nulla and hunk.starta == 0 and hunk.lena == 0
1707 create = nulla and hunk.starta == 0 and hunk.lena == 0
1707 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1708 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1708 abase, afile = pathtransform(afile_orig, strip, prefix)
1709 abase, afile = pathtransform(afile_orig, strip, prefix)
1709 gooda = not nulla and backend.exists(afile)
1710 gooda = not nulla and backend.exists(afile)
1710 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1711 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1711 if afile == bfile:
1712 if afile == bfile:
1712 goodb = gooda
1713 goodb = gooda
1713 else:
1714 else:
1714 goodb = not nullb and backend.exists(bfile)
1715 goodb = not nullb and backend.exists(bfile)
1715 missing = not goodb and not gooda and not create
1716 missing = not goodb and not gooda and not create
1716
1717
1717 # some diff programs apparently produce patches where the afile is
1718 # some diff programs apparently produce patches where the afile is
1718 # not /dev/null, but afile starts with bfile
1719 # not /dev/null, but afile starts with bfile
1719 abasedir = afile[:afile.rfind('/') + 1]
1720 abasedir = afile[:afile.rfind('/') + 1]
1720 bbasedir = bfile[:bfile.rfind('/') + 1]
1721 bbasedir = bfile[:bfile.rfind('/') + 1]
1721 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1722 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1722 and hunk.starta == 0 and hunk.lena == 0):
1723 and hunk.starta == 0 and hunk.lena == 0):
1723 create = True
1724 create = True
1724 missing = False
1725 missing = False
1725
1726
1726 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1727 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1727 # diff is between a file and its backup. In this case, the original
1728 # diff is between a file and its backup. In this case, the original
1728 # file should be patched (see original mpatch code).
1729 # file should be patched (see original mpatch code).
1729 isbackup = (abase == bbase and bfile.startswith(afile))
1730 isbackup = (abase == bbase and bfile.startswith(afile))
1730 fname = None
1731 fname = None
1731 if not missing:
1732 if not missing:
1732 if gooda and goodb:
1733 if gooda and goodb:
1733 if isbackup:
1734 if isbackup:
1734 fname = afile
1735 fname = afile
1735 else:
1736 else:
1736 fname = bfile
1737 fname = bfile
1737 elif gooda:
1738 elif gooda:
1738 fname = afile
1739 fname = afile
1739
1740
1740 if not fname:
1741 if not fname:
1741 if not nullb:
1742 if not nullb:
1742 if isbackup:
1743 if isbackup:
1743 fname = afile
1744 fname = afile
1744 else:
1745 else:
1745 fname = bfile
1746 fname = bfile
1746 elif not nulla:
1747 elif not nulla:
1747 fname = afile
1748 fname = afile
1748 else:
1749 else:
1749 raise PatchError(_("undefined source and destination files"))
1750 raise PatchError(_("undefined source and destination files"))
1750
1751
1751 gp = patchmeta(fname)
1752 gp = patchmeta(fname)
1752 if create:
1753 if create:
1753 gp.op = 'ADD'
1754 gp.op = 'ADD'
1754 elif remove:
1755 elif remove:
1755 gp.op = 'DELETE'
1756 gp.op = 'DELETE'
1756 return gp
1757 return gp
1757
1758
1758 def scanpatch(fp):
1759 def scanpatch(fp):
1759 """like patch.iterhunks, but yield different events
1760 """like patch.iterhunks, but yield different events
1760
1761
1761 - ('file', [header_lines + fromfile + tofile])
1762 - ('file', [header_lines + fromfile + tofile])
1762 - ('context', [context_lines])
1763 - ('context', [context_lines])
1763 - ('hunk', [hunk_lines])
1764 - ('hunk', [hunk_lines])
1764 - ('range', (-start,len, +start,len, proc))
1765 - ('range', (-start,len, +start,len, proc))
1765 """
1766 """
1766 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1767 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1767 lr = linereader(fp)
1768 lr = linereader(fp)
1768
1769
1769 def scanwhile(first, p):
1770 def scanwhile(first, p):
1770 """scan lr while predicate holds"""
1771 """scan lr while predicate holds"""
1771 lines = [first]
1772 lines = [first]
1772 for line in iter(lr.readline, ''):
1773 for line in iter(lr.readline, ''):
1773 if p(line):
1774 if p(line):
1774 lines.append(line)
1775 lines.append(line)
1775 else:
1776 else:
1776 lr.push(line)
1777 lr.push(line)
1777 break
1778 break
1778 return lines
1779 return lines
1779
1780
1780 for line in iter(lr.readline, ''):
1781 for line in iter(lr.readline, ''):
1781 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1782 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1782 def notheader(line):
1783 def notheader(line):
1783 s = line.split(None, 1)
1784 s = line.split(None, 1)
1784 return not s or s[0] not in ('---', 'diff')
1785 return not s or s[0] not in ('---', 'diff')
1785 header = scanwhile(line, notheader)
1786 header = scanwhile(line, notheader)
1786 fromfile = lr.readline()
1787 fromfile = lr.readline()
1787 if fromfile.startswith('---'):
1788 if fromfile.startswith('---'):
1788 tofile = lr.readline()
1789 tofile = lr.readline()
1789 header += [fromfile, tofile]
1790 header += [fromfile, tofile]
1790 else:
1791 else:
1791 lr.push(fromfile)
1792 lr.push(fromfile)
1792 yield 'file', header
1793 yield 'file', header
1793 elif line[0:1] == ' ':
1794 elif line[0:1] == ' ':
1794 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1795 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
1795 elif line[0] in '-+':
1796 elif line[0] in '-+':
1796 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1797 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
1797 else:
1798 else:
1798 m = lines_re.match(line)
1799 m = lines_re.match(line)
1799 if m:
1800 if m:
1800 yield 'range', m.groups()
1801 yield 'range', m.groups()
1801 else:
1802 else:
1802 yield 'other', line
1803 yield 'other', line
1803
1804
1804 def scangitpatch(lr, firstline):
1805 def scangitpatch(lr, firstline):
1805 """
1806 """
1806 Git patches can emit:
1807 Git patches can emit:
1807 - rename a to b
1808 - rename a to b
1808 - change b
1809 - change b
1809 - copy a to c
1810 - copy a to c
1810 - change c
1811 - change c
1811
1812
1812 We cannot apply this sequence as-is, the renamed 'a' could not be
1813 We cannot apply this sequence as-is, the renamed 'a' could not be
1813 found for it would have been renamed already. And we cannot copy
1814 found for it would have been renamed already. And we cannot copy
1814 from 'b' instead because 'b' would have been changed already. So
1815 from 'b' instead because 'b' would have been changed already. So
1815 we scan the git patch for copy and rename commands so we can
1816 we scan the git patch for copy and rename commands so we can
1816 perform the copies ahead of time.
1817 perform the copies ahead of time.
1817 """
1818 """
1818 pos = 0
1819 pos = 0
1819 try:
1820 try:
1820 pos = lr.fp.tell()
1821 pos = lr.fp.tell()
1821 fp = lr.fp
1822 fp = lr.fp
1822 except IOError:
1823 except IOError:
1823 fp = stringio(lr.fp.read())
1824 fp = stringio(lr.fp.read())
1824 gitlr = linereader(fp)
1825 gitlr = linereader(fp)
1825 gitlr.push(firstline)
1826 gitlr.push(firstline)
1826 gitpatches = readgitpatch(gitlr)
1827 gitpatches = readgitpatch(gitlr)
1827 fp.seek(pos)
1828 fp.seek(pos)
1828 return gitpatches
1829 return gitpatches
1829
1830
1830 def iterhunks(fp):
1831 def iterhunks(fp):
1831 """Read a patch and yield the following events:
1832 """Read a patch and yield the following events:
1832 - ("file", afile, bfile, firsthunk): select a new target file.
1833 - ("file", afile, bfile, firsthunk): select a new target file.
1833 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1834 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1834 "file" event.
1835 "file" event.
1835 - ("git", gitchanges): current diff is in git format, gitchanges
1836 - ("git", gitchanges): current diff is in git format, gitchanges
1836 maps filenames to gitpatch records. Unique event.
1837 maps filenames to gitpatch records. Unique event.
1837 """
1838 """
1838 afile = ""
1839 afile = ""
1839 bfile = ""
1840 bfile = ""
1840 state = None
1841 state = None
1841 hunknum = 0
1842 hunknum = 0
1842 emitfile = newfile = False
1843 emitfile = newfile = False
1843 gitpatches = None
1844 gitpatches = None
1844
1845
1845 # our states
1846 # our states
1846 BFILE = 1
1847 BFILE = 1
1847 context = None
1848 context = None
1848 lr = linereader(fp)
1849 lr = linereader(fp)
1849
1850
1850 for x in iter(lr.readline, ''):
1851 for x in iter(lr.readline, ''):
1851 if state == BFILE and (
1852 if state == BFILE and (
1852 (not context and x[0] == '@')
1853 (not context and x[0] == '@')
1853 or (context is not False and x.startswith('***************'))
1854 or (context is not False and x.startswith('***************'))
1854 or x.startswith('GIT binary patch')):
1855 or x.startswith('GIT binary patch')):
1855 gp = None
1856 gp = None
1856 if (gitpatches and
1857 if (gitpatches and
1857 gitpatches[-1].ispatching(afile, bfile)):
1858 gitpatches[-1].ispatching(afile, bfile)):
1858 gp = gitpatches.pop()
1859 gp = gitpatches.pop()
1859 if x.startswith('GIT binary patch'):
1860 if x.startswith('GIT binary patch'):
1860 h = binhunk(lr, gp.path)
1861 h = binhunk(lr, gp.path)
1861 else:
1862 else:
1862 if context is None and x.startswith('***************'):
1863 if context is None and x.startswith('***************'):
1863 context = True
1864 context = True
1864 h = hunk(x, hunknum + 1, lr, context)
1865 h = hunk(x, hunknum + 1, lr, context)
1865 hunknum += 1
1866 hunknum += 1
1866 if emitfile:
1867 if emitfile:
1867 emitfile = False
1868 emitfile = False
1868 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1869 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1869 yield 'hunk', h
1870 yield 'hunk', h
1870 elif x.startswith('diff --git a/'):
1871 elif x.startswith('diff --git a/'):
1871 m = gitre.match(x.rstrip(' \r\n'))
1872 m = gitre.match(x.rstrip(' \r\n'))
1872 if not m:
1873 if not m:
1873 continue
1874 continue
1874 if gitpatches is None:
1875 if gitpatches is None:
1875 # scan whole input for git metadata
1876 # scan whole input for git metadata
1876 gitpatches = scangitpatch(lr, x)
1877 gitpatches = scangitpatch(lr, x)
1877 yield 'git', [g.copy() for g in gitpatches
1878 yield 'git', [g.copy() for g in gitpatches
1878 if g.op in ('COPY', 'RENAME')]
1879 if g.op in ('COPY', 'RENAME')]
1879 gitpatches.reverse()
1880 gitpatches.reverse()
1880 afile = 'a/' + m.group(1)
1881 afile = 'a/' + m.group(1)
1881 bfile = 'b/' + m.group(2)
1882 bfile = 'b/' + m.group(2)
1882 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1883 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1883 gp = gitpatches.pop()
1884 gp = gitpatches.pop()
1884 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1885 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1885 if not gitpatches:
1886 if not gitpatches:
1886 raise PatchError(_('failed to synchronize metadata for "%s"')
1887 raise PatchError(_('failed to synchronize metadata for "%s"')
1887 % afile[2:])
1888 % afile[2:])
1888 gp = gitpatches[-1]
1889 gp = gitpatches[-1]
1889 newfile = True
1890 newfile = True
1890 elif x.startswith('---'):
1891 elif x.startswith('---'):
1891 # check for a unified diff
1892 # check for a unified diff
1892 l2 = lr.readline()
1893 l2 = lr.readline()
1893 if not l2.startswith('+++'):
1894 if not l2.startswith('+++'):
1894 lr.push(l2)
1895 lr.push(l2)
1895 continue
1896 continue
1896 newfile = True
1897 newfile = True
1897 context = False
1898 context = False
1898 afile = parsefilename(x)
1899 afile = parsefilename(x)
1899 bfile = parsefilename(l2)
1900 bfile = parsefilename(l2)
1900 elif x.startswith('***'):
1901 elif x.startswith('***'):
1901 # check for a context diff
1902 # check for a context diff
1902 l2 = lr.readline()
1903 l2 = lr.readline()
1903 if not l2.startswith('---'):
1904 if not l2.startswith('---'):
1904 lr.push(l2)
1905 lr.push(l2)
1905 continue
1906 continue
1906 l3 = lr.readline()
1907 l3 = lr.readline()
1907 lr.push(l3)
1908 lr.push(l3)
1908 if not l3.startswith("***************"):
1909 if not l3.startswith("***************"):
1909 lr.push(l2)
1910 lr.push(l2)
1910 continue
1911 continue
1911 newfile = True
1912 newfile = True
1912 context = True
1913 context = True
1913 afile = parsefilename(x)
1914 afile = parsefilename(x)
1914 bfile = parsefilename(l2)
1915 bfile = parsefilename(l2)
1915
1916
1916 if newfile:
1917 if newfile:
1917 newfile = False
1918 newfile = False
1918 emitfile = True
1919 emitfile = True
1919 state = BFILE
1920 state = BFILE
1920 hunknum = 0
1921 hunknum = 0
1921
1922
1922 while gitpatches:
1923 while gitpatches:
1923 gp = gitpatches.pop()
1924 gp = gitpatches.pop()
1924 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1925 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1925
1926
1926 def applybindelta(binchunk, data):
1927 def applybindelta(binchunk, data):
1927 """Apply a binary delta hunk
1928 """Apply a binary delta hunk
1928 The algorithm used is the algorithm from git's patch-delta.c
1929 The algorithm used is the algorithm from git's patch-delta.c
1929 """
1930 """
1930 def deltahead(binchunk):
1931 def deltahead(binchunk):
1931 i = 0
1932 i = 0
1932 for c in binchunk:
1933 for c in binchunk:
1933 i += 1
1934 i += 1
1934 if not (ord(c) & 0x80):
1935 if not (ord(c) & 0x80):
1935 return i
1936 return i
1936 return i
1937 return i
1937 out = ""
1938 out = ""
1938 s = deltahead(binchunk)
1939 s = deltahead(binchunk)
1939 binchunk = binchunk[s:]
1940 binchunk = binchunk[s:]
1940 s = deltahead(binchunk)
1941 s = deltahead(binchunk)
1941 binchunk = binchunk[s:]
1942 binchunk = binchunk[s:]
1942 i = 0
1943 i = 0
1943 while i < len(binchunk):
1944 while i < len(binchunk):
1944 cmd = ord(binchunk[i])
1945 cmd = ord(binchunk[i])
1945 i += 1
1946 i += 1
1946 if (cmd & 0x80):
1947 if (cmd & 0x80):
1947 offset = 0
1948 offset = 0
1948 size = 0
1949 size = 0
1949 if (cmd & 0x01):
1950 if (cmd & 0x01):
1950 offset = ord(binchunk[i])
1951 offset = ord(binchunk[i])
1951 i += 1
1952 i += 1
1952 if (cmd & 0x02):
1953 if (cmd & 0x02):
1953 offset |= ord(binchunk[i]) << 8
1954 offset |= ord(binchunk[i]) << 8
1954 i += 1
1955 i += 1
1955 if (cmd & 0x04):
1956 if (cmd & 0x04):
1956 offset |= ord(binchunk[i]) << 16
1957 offset |= ord(binchunk[i]) << 16
1957 i += 1
1958 i += 1
1958 if (cmd & 0x08):
1959 if (cmd & 0x08):
1959 offset |= ord(binchunk[i]) << 24
1960 offset |= ord(binchunk[i]) << 24
1960 i += 1
1961 i += 1
1961 if (cmd & 0x10):
1962 if (cmd & 0x10):
1962 size = ord(binchunk[i])
1963 size = ord(binchunk[i])
1963 i += 1
1964 i += 1
1964 if (cmd & 0x20):
1965 if (cmd & 0x20):
1965 size |= ord(binchunk[i]) << 8
1966 size |= ord(binchunk[i]) << 8
1966 i += 1
1967 i += 1
1967 if (cmd & 0x40):
1968 if (cmd & 0x40):
1968 size |= ord(binchunk[i]) << 16
1969 size |= ord(binchunk[i]) << 16
1969 i += 1
1970 i += 1
1970 if size == 0:
1971 if size == 0:
1971 size = 0x10000
1972 size = 0x10000
1972 offset_end = offset + size
1973 offset_end = offset + size
1973 out += data[offset:offset_end]
1974 out += data[offset:offset_end]
1974 elif cmd != 0:
1975 elif cmd != 0:
1975 offset_end = i + cmd
1976 offset_end = i + cmd
1976 out += binchunk[i:offset_end]
1977 out += binchunk[i:offset_end]
1977 i += cmd
1978 i += cmd
1978 else:
1979 else:
1979 raise PatchError(_('unexpected delta opcode 0'))
1980 raise PatchError(_('unexpected delta opcode 0'))
1980 return out
1981 return out
1981
1982
1982 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1983 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
1983 """Reads a patch from fp and tries to apply it.
1984 """Reads a patch from fp and tries to apply it.
1984
1985
1985 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1986 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
1986 there was any fuzz.
1987 there was any fuzz.
1987
1988
1988 If 'eolmode' is 'strict', the patch content and patched file are
1989 If 'eolmode' is 'strict', the patch content and patched file are
1989 read in binary mode. Otherwise, line endings are ignored when
1990 read in binary mode. Otherwise, line endings are ignored when
1990 patching then normalized according to 'eolmode'.
1991 patching then normalized according to 'eolmode'.
1991 """
1992 """
1992 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1993 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
1993 prefix=prefix, eolmode=eolmode)
1994 prefix=prefix, eolmode=eolmode)
1994
1995
1995 def _canonprefix(repo, prefix):
1996 def _canonprefix(repo, prefix):
1996 if prefix:
1997 if prefix:
1997 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1998 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
1998 if prefix != '':
1999 if prefix != '':
1999 prefix += '/'
2000 prefix += '/'
2000 return prefix
2001 return prefix
2001
2002
2002 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2003 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2003 eolmode='strict'):
2004 eolmode='strict'):
2004 prefix = _canonprefix(backend.repo, prefix)
2005 prefix = _canonprefix(backend.repo, prefix)
2005 def pstrip(p):
2006 def pstrip(p):
2006 return pathtransform(p, strip - 1, prefix)[1]
2007 return pathtransform(p, strip - 1, prefix)[1]
2007
2008
2008 rejects = 0
2009 rejects = 0
2009 err = 0
2010 err = 0
2010 current_file = None
2011 current_file = None
2011
2012
2012 for state, values in iterhunks(fp):
2013 for state, values in iterhunks(fp):
2013 if state == 'hunk':
2014 if state == 'hunk':
2014 if not current_file:
2015 if not current_file:
2015 continue
2016 continue
2016 ret = current_file.apply(values)
2017 ret = current_file.apply(values)
2017 if ret > 0:
2018 if ret > 0:
2018 err = 1
2019 err = 1
2019 elif state == 'file':
2020 elif state == 'file':
2020 if current_file:
2021 if current_file:
2021 rejects += current_file.close()
2022 rejects += current_file.close()
2022 current_file = None
2023 current_file = None
2023 afile, bfile, first_hunk, gp = values
2024 afile, bfile, first_hunk, gp = values
2024 if gp:
2025 if gp:
2025 gp.path = pstrip(gp.path)
2026 gp.path = pstrip(gp.path)
2026 if gp.oldpath:
2027 if gp.oldpath:
2027 gp.oldpath = pstrip(gp.oldpath)
2028 gp.oldpath = pstrip(gp.oldpath)
2028 else:
2029 else:
2029 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2030 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2030 prefix)
2031 prefix)
2031 if gp.op == 'RENAME':
2032 if gp.op == 'RENAME':
2032 backend.unlink(gp.oldpath)
2033 backend.unlink(gp.oldpath)
2033 if not first_hunk:
2034 if not first_hunk:
2034 if gp.op == 'DELETE':
2035 if gp.op == 'DELETE':
2035 backend.unlink(gp.path)
2036 backend.unlink(gp.path)
2036 continue
2037 continue
2037 data, mode = None, None
2038 data, mode = None, None
2038 if gp.op in ('RENAME', 'COPY'):
2039 if gp.op in ('RENAME', 'COPY'):
2039 data, mode = store.getfile(gp.oldpath)[:2]
2040 data, mode = store.getfile(gp.oldpath)[:2]
2040 if data is None:
2041 if data is None:
2041 # This means that the old path does not exist
2042 # This means that the old path does not exist
2042 raise PatchError(_("source file '%s' does not exist")
2043 raise PatchError(_("source file '%s' does not exist")
2043 % gp.oldpath)
2044 % gp.oldpath)
2044 if gp.mode:
2045 if gp.mode:
2045 mode = gp.mode
2046 mode = gp.mode
2046 if gp.op == 'ADD':
2047 if gp.op == 'ADD':
2047 # Added files without content have no hunk and
2048 # Added files without content have no hunk and
2048 # must be created
2049 # must be created
2049 data = ''
2050 data = ''
2050 if data or mode:
2051 if data or mode:
2051 if (gp.op in ('ADD', 'RENAME', 'COPY')
2052 if (gp.op in ('ADD', 'RENAME', 'COPY')
2052 and backend.exists(gp.path)):
2053 and backend.exists(gp.path)):
2053 raise PatchError(_("cannot create %s: destination "
2054 raise PatchError(_("cannot create %s: destination "
2054 "already exists") % gp.path)
2055 "already exists") % gp.path)
2055 backend.setfile(gp.path, data, mode, gp.oldpath)
2056 backend.setfile(gp.path, data, mode, gp.oldpath)
2056 continue
2057 continue
2057 try:
2058 try:
2058 current_file = patcher(ui, gp, backend, store,
2059 current_file = patcher(ui, gp, backend, store,
2059 eolmode=eolmode)
2060 eolmode=eolmode)
2060 except PatchError as inst:
2061 except PatchError as inst:
2061 ui.warn(str(inst) + '\n')
2062 ui.warn(str(inst) + '\n')
2062 current_file = None
2063 current_file = None
2063 rejects += 1
2064 rejects += 1
2064 continue
2065 continue
2065 elif state == 'git':
2066 elif state == 'git':
2066 for gp in values:
2067 for gp in values:
2067 path = pstrip(gp.oldpath)
2068 path = pstrip(gp.oldpath)
2068 data, mode = backend.getfile(path)
2069 data, mode = backend.getfile(path)
2069 if data is None:
2070 if data is None:
2070 # The error ignored here will trigger a getfile()
2071 # The error ignored here will trigger a getfile()
2071 # error in a place more appropriate for error
2072 # error in a place more appropriate for error
2072 # handling, and will not interrupt the patching
2073 # handling, and will not interrupt the patching
2073 # process.
2074 # process.
2074 pass
2075 pass
2075 else:
2076 else:
2076 store.setfile(path, data, mode)
2077 store.setfile(path, data, mode)
2077 else:
2078 else:
2078 raise error.Abort(_('unsupported parser state: %s') % state)
2079 raise error.Abort(_('unsupported parser state: %s') % state)
2079
2080
2080 if current_file:
2081 if current_file:
2081 rejects += current_file.close()
2082 rejects += current_file.close()
2082
2083
2083 if rejects:
2084 if rejects:
2084 return -1
2085 return -1
2085 return err
2086 return err
2086
2087
2087 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2088 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2088 similarity):
2089 similarity):
2089 """use <patcher> to apply <patchname> to the working directory.
2090 """use <patcher> to apply <patchname> to the working directory.
2090 returns whether patch was applied with fuzz factor."""
2091 returns whether patch was applied with fuzz factor."""
2091
2092
2092 fuzz = False
2093 fuzz = False
2093 args = []
2094 args = []
2094 cwd = repo.root
2095 cwd = repo.root
2095 if cwd:
2096 if cwd:
2096 args.append('-d %s' % util.shellquote(cwd))
2097 args.append('-d %s' % util.shellquote(cwd))
2097 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2098 fp = util.popen('%s %s -p%d < %s' % (patcher, ' '.join(args), strip,
2098 util.shellquote(patchname)))
2099 util.shellquote(patchname)))
2099 try:
2100 try:
2100 for line in util.iterfile(fp):
2101 for line in util.iterfile(fp):
2101 line = line.rstrip()
2102 line = line.rstrip()
2102 ui.note(line + '\n')
2103 ui.note(line + '\n')
2103 if line.startswith('patching file '):
2104 if line.startswith('patching file '):
2104 pf = util.parsepatchoutput(line)
2105 pf = util.parsepatchoutput(line)
2105 printed_file = False
2106 printed_file = False
2106 files.add(pf)
2107 files.add(pf)
2107 elif line.find('with fuzz') >= 0:
2108 elif line.find('with fuzz') >= 0:
2108 fuzz = True
2109 fuzz = True
2109 if not printed_file:
2110 if not printed_file:
2110 ui.warn(pf + '\n')
2111 ui.warn(pf + '\n')
2111 printed_file = True
2112 printed_file = True
2112 ui.warn(line + '\n')
2113 ui.warn(line + '\n')
2113 elif line.find('saving rejects to file') >= 0:
2114 elif line.find('saving rejects to file') >= 0:
2114 ui.warn(line + '\n')
2115 ui.warn(line + '\n')
2115 elif line.find('FAILED') >= 0:
2116 elif line.find('FAILED') >= 0:
2116 if not printed_file:
2117 if not printed_file:
2117 ui.warn(pf + '\n')
2118 ui.warn(pf + '\n')
2118 printed_file = True
2119 printed_file = True
2119 ui.warn(line + '\n')
2120 ui.warn(line + '\n')
2120 finally:
2121 finally:
2121 if files:
2122 if files:
2122 scmutil.marktouched(repo, files, similarity)
2123 scmutil.marktouched(repo, files, similarity)
2123 code = fp.close()
2124 code = fp.close()
2124 if code:
2125 if code:
2125 raise PatchError(_("patch command failed: %s") %
2126 raise PatchError(_("patch command failed: %s") %
2126 util.explainexit(code)[0])
2127 util.explainexit(code)[0])
2127 return fuzz
2128 return fuzz
2128
2129
2129 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2130 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2130 eolmode='strict'):
2131 eolmode='strict'):
2131 if files is None:
2132 if files is None:
2132 files = set()
2133 files = set()
2133 if eolmode is None:
2134 if eolmode is None:
2134 eolmode = ui.config('patch', 'eol')
2135 eolmode = ui.config('patch', 'eol')
2135 if eolmode.lower() not in eolmodes:
2136 if eolmode.lower() not in eolmodes:
2136 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2137 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2137 eolmode = eolmode.lower()
2138 eolmode = eolmode.lower()
2138
2139
2139 store = filestore()
2140 store = filestore()
2140 try:
2141 try:
2141 fp = open(patchobj, 'rb')
2142 fp = open(patchobj, 'rb')
2142 except TypeError:
2143 except TypeError:
2143 fp = patchobj
2144 fp = patchobj
2144 try:
2145 try:
2145 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2146 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2146 eolmode=eolmode)
2147 eolmode=eolmode)
2147 finally:
2148 finally:
2148 if fp != patchobj:
2149 if fp != patchobj:
2149 fp.close()
2150 fp.close()
2150 files.update(backend.close())
2151 files.update(backend.close())
2151 store.close()
2152 store.close()
2152 if ret < 0:
2153 if ret < 0:
2153 raise PatchError(_('patch failed to apply'))
2154 raise PatchError(_('patch failed to apply'))
2154 return ret > 0
2155 return ret > 0
2155
2156
2156 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2157 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2157 eolmode='strict', similarity=0):
2158 eolmode='strict', similarity=0):
2158 """use builtin patch to apply <patchobj> to the working directory.
2159 """use builtin patch to apply <patchobj> to the working directory.
2159 returns whether patch was applied with fuzz factor."""
2160 returns whether patch was applied with fuzz factor."""
2160 backend = workingbackend(ui, repo, similarity)
2161 backend = workingbackend(ui, repo, similarity)
2161 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2162
2163
2163 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2164 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2164 eolmode='strict'):
2165 eolmode='strict'):
2165 backend = repobackend(ui, repo, ctx, store)
2166 backend = repobackend(ui, repo, ctx, store)
2166 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2167 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2167
2168
2168 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2169 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2169 similarity=0):
2170 similarity=0):
2170 """Apply <patchname> to the working directory.
2171 """Apply <patchname> to the working directory.
2171
2172
2172 'eolmode' specifies how end of lines should be handled. It can be:
2173 'eolmode' specifies how end of lines should be handled. It can be:
2173 - 'strict': inputs are read in binary mode, EOLs are preserved
2174 - 'strict': inputs are read in binary mode, EOLs are preserved
2174 - 'crlf': EOLs are ignored when patching and reset to CRLF
2175 - 'crlf': EOLs are ignored when patching and reset to CRLF
2175 - 'lf': EOLs are ignored when patching and reset to LF
2176 - 'lf': EOLs are ignored when patching and reset to LF
2176 - None: get it from user settings, default to 'strict'
2177 - None: get it from user settings, default to 'strict'
2177 'eolmode' is ignored when using an external patcher program.
2178 'eolmode' is ignored when using an external patcher program.
2178
2179
2179 Returns whether patch was applied with fuzz factor.
2180 Returns whether patch was applied with fuzz factor.
2180 """
2181 """
2181 patcher = ui.config('ui', 'patch')
2182 patcher = ui.config('ui', 'patch')
2182 if files is None:
2183 if files is None:
2183 files = set()
2184 files = set()
2184 if patcher:
2185 if patcher:
2185 return _externalpatch(ui, repo, patcher, patchname, strip,
2186 return _externalpatch(ui, repo, patcher, patchname, strip,
2186 files, similarity)
2187 files, similarity)
2187 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2188 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2188 similarity)
2189 similarity)
2189
2190
2190 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2191 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2191 backend = fsbackend(ui, repo.root)
2192 backend = fsbackend(ui, repo.root)
2192 prefix = _canonprefix(repo, prefix)
2193 prefix = _canonprefix(repo, prefix)
2193 with open(patchpath, 'rb') as fp:
2194 with open(patchpath, 'rb') as fp:
2194 changed = set()
2195 changed = set()
2195 for state, values in iterhunks(fp):
2196 for state, values in iterhunks(fp):
2196 if state == 'file':
2197 if state == 'file':
2197 afile, bfile, first_hunk, gp = values
2198 afile, bfile, first_hunk, gp = values
2198 if gp:
2199 if gp:
2199 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2200 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2200 if gp.oldpath:
2201 if gp.oldpath:
2201 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2202 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2202 prefix)[1]
2203 prefix)[1]
2203 else:
2204 else:
2204 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2205 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2205 prefix)
2206 prefix)
2206 changed.add(gp.path)
2207 changed.add(gp.path)
2207 if gp.op == 'RENAME':
2208 if gp.op == 'RENAME':
2208 changed.add(gp.oldpath)
2209 changed.add(gp.oldpath)
2209 elif state not in ('hunk', 'git'):
2210 elif state not in ('hunk', 'git'):
2210 raise error.Abort(_('unsupported parser state: %s') % state)
2211 raise error.Abort(_('unsupported parser state: %s') % state)
2211 return changed
2212 return changed
2212
2213
2213 class GitDiffRequired(Exception):
2214 class GitDiffRequired(Exception):
2214 pass
2215 pass
2215
2216
2216 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2217 def diffallopts(ui, opts=None, untrusted=False, section='diff'):
2217 '''return diffopts with all features supported and parsed'''
2218 '''return diffopts with all features supported and parsed'''
2218 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2219 return difffeatureopts(ui, opts=opts, untrusted=untrusted, section=section,
2219 git=True, whitespace=True, formatchanging=True)
2220 git=True, whitespace=True, formatchanging=True)
2220
2221
2221 diffopts = diffallopts
2222 diffopts = diffallopts
2222
2223
2223 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2224 def difffeatureopts(ui, opts=None, untrusted=False, section='diff', git=False,
2224 whitespace=False, formatchanging=False):
2225 whitespace=False, formatchanging=False):
2225 '''return diffopts with only opted-in features parsed
2226 '''return diffopts with only opted-in features parsed
2226
2227
2227 Features:
2228 Features:
2228 - git: git-style diffs
2229 - git: git-style diffs
2229 - whitespace: whitespace options like ignoreblanklines and ignorews
2230 - whitespace: whitespace options like ignoreblanklines and ignorews
2230 - formatchanging: options that will likely break or cause correctness issues
2231 - formatchanging: options that will likely break or cause correctness issues
2231 with most diff parsers
2232 with most diff parsers
2232 '''
2233 '''
2233 def get(key, name=None, getter=ui.configbool, forceplain=None):
2234 def get(key, name=None, getter=ui.configbool, forceplain=None):
2234 if opts:
2235 if opts:
2235 v = opts.get(key)
2236 v = opts.get(key)
2236 # diffopts flags are either None-default (which is passed
2237 # diffopts flags are either None-default (which is passed
2237 # through unchanged, so we can identify unset values), or
2238 # through unchanged, so we can identify unset values), or
2238 # some other falsey default (eg --unified, which defaults
2239 # some other falsey default (eg --unified, which defaults
2239 # to an empty string). We only want to override the config
2240 # to an empty string). We only want to override the config
2240 # entries from hgrc with command line values if they
2241 # entries from hgrc with command line values if they
2241 # appear to have been set, which is any truthy value,
2242 # appear to have been set, which is any truthy value,
2242 # True, or False.
2243 # True, or False.
2243 if v or isinstance(v, bool):
2244 if v or isinstance(v, bool):
2244 return v
2245 return v
2245 if forceplain is not None and ui.plain():
2246 if forceplain is not None and ui.plain():
2246 return forceplain
2247 return forceplain
2247 return getter(section, name or key, untrusted=untrusted)
2248 return getter(section, name or key, untrusted=untrusted)
2248
2249
2249 # core options, expected to be understood by every diff parser
2250 # core options, expected to be understood by every diff parser
2250 buildopts = {
2251 buildopts = {
2251 'nodates': get('nodates'),
2252 'nodates': get('nodates'),
2252 'showfunc': get('show_function', 'showfunc'),
2253 'showfunc': get('show_function', 'showfunc'),
2253 'context': get('unified', getter=ui.config),
2254 'context': get('unified', getter=ui.config),
2254 }
2255 }
2256 buildopts['worddiff'] = ui.configbool('experimental', 'worddiff')
2255
2257
2256 if git:
2258 if git:
2257 buildopts['git'] = get('git')
2259 buildopts['git'] = get('git')
2258
2260
2259 # since this is in the experimental section, we need to call
2261 # since this is in the experimental section, we need to call
2260 # ui.configbool directory
2262 # ui.configbool directory
2261 buildopts['showsimilarity'] = ui.configbool('experimental',
2263 buildopts['showsimilarity'] = ui.configbool('experimental',
2262 'extendedheader.similarity')
2264 'extendedheader.similarity')
2263
2265
2264 # need to inspect the ui object instead of using get() since we want to
2266 # need to inspect the ui object instead of using get() since we want to
2265 # test for an int
2267 # test for an int
2266 hconf = ui.config('experimental', 'extendedheader.index')
2268 hconf = ui.config('experimental', 'extendedheader.index')
2267 if hconf is not None:
2269 if hconf is not None:
2268 hlen = None
2270 hlen = None
2269 try:
2271 try:
2270 # the hash config could be an integer (for length of hash) or a
2272 # the hash config could be an integer (for length of hash) or a
2271 # word (e.g. short, full, none)
2273 # word (e.g. short, full, none)
2272 hlen = int(hconf)
2274 hlen = int(hconf)
2273 if hlen < 0 or hlen > 40:
2275 if hlen < 0 or hlen > 40:
2274 msg = _("invalid length for extendedheader.index: '%d'\n")
2276 msg = _("invalid length for extendedheader.index: '%d'\n")
2275 ui.warn(msg % hlen)
2277 ui.warn(msg % hlen)
2276 except ValueError:
2278 except ValueError:
2277 # default value
2279 # default value
2278 if hconf == 'short' or hconf == '':
2280 if hconf == 'short' or hconf == '':
2279 hlen = 12
2281 hlen = 12
2280 elif hconf == 'full':
2282 elif hconf == 'full':
2281 hlen = 40
2283 hlen = 40
2282 elif hconf != 'none':
2284 elif hconf != 'none':
2283 msg = _("invalid value for extendedheader.index: '%s'\n")
2285 msg = _("invalid value for extendedheader.index: '%s'\n")
2284 ui.warn(msg % hconf)
2286 ui.warn(msg % hconf)
2285 finally:
2287 finally:
2286 buildopts['index'] = hlen
2288 buildopts['index'] = hlen
2287
2289
2288 if whitespace:
2290 if whitespace:
2289 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2291 buildopts['ignorews'] = get('ignore_all_space', 'ignorews')
2290 buildopts['ignorewsamount'] = get('ignore_space_change',
2292 buildopts['ignorewsamount'] = get('ignore_space_change',
2291 'ignorewsamount')
2293 'ignorewsamount')
2292 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2294 buildopts['ignoreblanklines'] = get('ignore_blank_lines',
2293 'ignoreblanklines')
2295 'ignoreblanklines')
2294 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2296 buildopts['ignorewseol'] = get('ignore_space_at_eol', 'ignorewseol')
2295 if formatchanging:
2297 if formatchanging:
2296 buildopts['text'] = opts and opts.get('text')
2298 buildopts['text'] = opts and opts.get('text')
2297 binary = None if opts is None else opts.get('binary')
2299 binary = None if opts is None else opts.get('binary')
2298 buildopts['nobinary'] = (not binary if binary is not None
2300 buildopts['nobinary'] = (not binary if binary is not None
2299 else get('nobinary', forceplain=False))
2301 else get('nobinary', forceplain=False))
2300 buildopts['noprefix'] = get('noprefix', forceplain=False)
2302 buildopts['noprefix'] = get('noprefix', forceplain=False)
2301
2303
2302 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2304 return mdiff.diffopts(**pycompat.strkwargs(buildopts))
2303
2305
2304 def diff(repo, node1=None, node2=None, match=None, changes=None,
2306 def diff(repo, node1=None, node2=None, match=None, changes=None,
2305 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2307 opts=None, losedatafn=None, prefix='', relroot='', copy=None,
2306 hunksfilterfn=None):
2308 hunksfilterfn=None):
2307 '''yields diff of changes to files between two nodes, or node and
2309 '''yields diff of changes to files between two nodes, or node and
2308 working directory.
2310 working directory.
2309
2311
2310 if node1 is None, use first dirstate parent instead.
2312 if node1 is None, use first dirstate parent instead.
2311 if node2 is None, compare node1 with working directory.
2313 if node2 is None, compare node1 with working directory.
2312
2314
2313 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2315 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2314 every time some change cannot be represented with the current
2316 every time some change cannot be represented with the current
2315 patch format. Return False to upgrade to git patch format, True to
2317 patch format. Return False to upgrade to git patch format, True to
2316 accept the loss or raise an exception to abort the diff. It is
2318 accept the loss or raise an exception to abort the diff. It is
2317 called with the name of current file being diffed as 'fn'. If set
2319 called with the name of current file being diffed as 'fn'. If set
2318 to None, patches will always be upgraded to git format when
2320 to None, patches will always be upgraded to git format when
2319 necessary.
2321 necessary.
2320
2322
2321 prefix is a filename prefix that is prepended to all filenames on
2323 prefix is a filename prefix that is prepended to all filenames on
2322 display (used for subrepos).
2324 display (used for subrepos).
2323
2325
2324 relroot, if not empty, must be normalized with a trailing /. Any match
2326 relroot, if not empty, must be normalized with a trailing /. Any match
2325 patterns that fall outside it will be ignored.
2327 patterns that fall outside it will be ignored.
2326
2328
2327 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2329 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2328 information.
2330 information.
2329
2331
2330 hunksfilterfn, if not None, should be a function taking a filectx and
2332 hunksfilterfn, if not None, should be a function taking a filectx and
2331 hunks generator that may yield filtered hunks.
2333 hunks generator that may yield filtered hunks.
2332 '''
2334 '''
2333 for fctx1, fctx2, hdr, hunks in diffhunks(
2335 for fctx1, fctx2, hdr, hunks in diffhunks(
2334 repo, node1=node1, node2=node2,
2336 repo, node1=node1, node2=node2,
2335 match=match, changes=changes, opts=opts,
2337 match=match, changes=changes, opts=opts,
2336 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2338 losedatafn=losedatafn, prefix=prefix, relroot=relroot, copy=copy,
2337 ):
2339 ):
2338 if hunksfilterfn is not None:
2340 if hunksfilterfn is not None:
2339 # If the file has been removed, fctx2 is None; but this should
2341 # If the file has been removed, fctx2 is None; but this should
2340 # not occur here since we catch removed files early in
2342 # not occur here since we catch removed files early in
2341 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2343 # cmdutil.getloglinerangerevs() for 'hg log -L'.
2342 assert fctx2 is not None, \
2344 assert fctx2 is not None, \
2343 'fctx2 unexpectly None in diff hunks filtering'
2345 'fctx2 unexpectly None in diff hunks filtering'
2344 hunks = hunksfilterfn(fctx2, hunks)
2346 hunks = hunksfilterfn(fctx2, hunks)
2345 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2347 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2346 if hdr and (text or len(hdr) > 1):
2348 if hdr and (text or len(hdr) > 1):
2347 yield '\n'.join(hdr) + '\n'
2349 yield '\n'.join(hdr) + '\n'
2348 if text:
2350 if text:
2349 yield text
2351 yield text
2350
2352
2351 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2353 def diffhunks(repo, node1=None, node2=None, match=None, changes=None,
2352 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2354 opts=None, losedatafn=None, prefix='', relroot='', copy=None):
2353 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2355 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2354 where `header` is a list of diff headers and `hunks` is an iterable of
2356 where `header` is a list of diff headers and `hunks` is an iterable of
2355 (`hunkrange`, `hunklines`) tuples.
2357 (`hunkrange`, `hunklines`) tuples.
2356
2358
2357 See diff() for the meaning of parameters.
2359 See diff() for the meaning of parameters.
2358 """
2360 """
2359
2361
2360 if opts is None:
2362 if opts is None:
2361 opts = mdiff.defaultopts
2363 opts = mdiff.defaultopts
2362
2364
2363 if not node1 and not node2:
2365 if not node1 and not node2:
2364 node1 = repo.dirstate.p1()
2366 node1 = repo.dirstate.p1()
2365
2367
2366 def lrugetfilectx():
2368 def lrugetfilectx():
2367 cache = {}
2369 cache = {}
2368 order = collections.deque()
2370 order = collections.deque()
2369 def getfilectx(f, ctx):
2371 def getfilectx(f, ctx):
2370 fctx = ctx.filectx(f, filelog=cache.get(f))
2372 fctx = ctx.filectx(f, filelog=cache.get(f))
2371 if f not in cache:
2373 if f not in cache:
2372 if len(cache) > 20:
2374 if len(cache) > 20:
2373 del cache[order.popleft()]
2375 del cache[order.popleft()]
2374 cache[f] = fctx.filelog()
2376 cache[f] = fctx.filelog()
2375 else:
2377 else:
2376 order.remove(f)
2378 order.remove(f)
2377 order.append(f)
2379 order.append(f)
2378 return fctx
2380 return fctx
2379 return getfilectx
2381 return getfilectx
2380 getfilectx = lrugetfilectx()
2382 getfilectx = lrugetfilectx()
2381
2383
2382 ctx1 = repo[node1]
2384 ctx1 = repo[node1]
2383 ctx2 = repo[node2]
2385 ctx2 = repo[node2]
2384
2386
2385 relfiltered = False
2387 relfiltered = False
2386 if relroot != '' and match.always():
2388 if relroot != '' and match.always():
2387 # as a special case, create a new matcher with just the relroot
2389 # as a special case, create a new matcher with just the relroot
2388 pats = [relroot]
2390 pats = [relroot]
2389 match = scmutil.match(ctx2, pats, default='path')
2391 match = scmutil.match(ctx2, pats, default='path')
2390 relfiltered = True
2392 relfiltered = True
2391
2393
2392 if not changes:
2394 if not changes:
2393 changes = repo.status(ctx1, ctx2, match=match)
2395 changes = repo.status(ctx1, ctx2, match=match)
2394 modified, added, removed = changes[:3]
2396 modified, added, removed = changes[:3]
2395
2397
2396 if not modified and not added and not removed:
2398 if not modified and not added and not removed:
2397 return []
2399 return []
2398
2400
2399 if repo.ui.debugflag:
2401 if repo.ui.debugflag:
2400 hexfunc = hex
2402 hexfunc = hex
2401 else:
2403 else:
2402 hexfunc = short
2404 hexfunc = short
2403 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2405 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2404
2406
2405 if copy is None:
2407 if copy is None:
2406 copy = {}
2408 copy = {}
2407 if opts.git or opts.upgrade:
2409 if opts.git or opts.upgrade:
2408 copy = copies.pathcopies(ctx1, ctx2, match=match)
2410 copy = copies.pathcopies(ctx1, ctx2, match=match)
2409
2411
2410 if relroot is not None:
2412 if relroot is not None:
2411 if not relfiltered:
2413 if not relfiltered:
2412 # XXX this would ideally be done in the matcher, but that is
2414 # XXX this would ideally be done in the matcher, but that is
2413 # generally meant to 'or' patterns, not 'and' them. In this case we
2415 # generally meant to 'or' patterns, not 'and' them. In this case we
2414 # need to 'and' all the patterns from the matcher with relroot.
2416 # need to 'and' all the patterns from the matcher with relroot.
2415 def filterrel(l):
2417 def filterrel(l):
2416 return [f for f in l if f.startswith(relroot)]
2418 return [f for f in l if f.startswith(relroot)]
2417 modified = filterrel(modified)
2419 modified = filterrel(modified)
2418 added = filterrel(added)
2420 added = filterrel(added)
2419 removed = filterrel(removed)
2421 removed = filterrel(removed)
2420 relfiltered = True
2422 relfiltered = True
2421 # filter out copies where either side isn't inside the relative root
2423 # filter out copies where either side isn't inside the relative root
2422 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2424 copy = dict(((dst, src) for (dst, src) in copy.iteritems()
2423 if dst.startswith(relroot)
2425 if dst.startswith(relroot)
2424 and src.startswith(relroot)))
2426 and src.startswith(relroot)))
2425
2427
2426 modifiedset = set(modified)
2428 modifiedset = set(modified)
2427 addedset = set(added)
2429 addedset = set(added)
2428 removedset = set(removed)
2430 removedset = set(removed)
2429 for f in modified:
2431 for f in modified:
2430 if f not in ctx1:
2432 if f not in ctx1:
2431 # Fix up added, since merged-in additions appear as
2433 # Fix up added, since merged-in additions appear as
2432 # modifications during merges
2434 # modifications during merges
2433 modifiedset.remove(f)
2435 modifiedset.remove(f)
2434 addedset.add(f)
2436 addedset.add(f)
2435 for f in removed:
2437 for f in removed:
2436 if f not in ctx1:
2438 if f not in ctx1:
2437 # Merged-in additions that are then removed are reported as removed.
2439 # Merged-in additions that are then removed are reported as removed.
2438 # They are not in ctx1, so We don't want to show them in the diff.
2440 # They are not in ctx1, so We don't want to show them in the diff.
2439 removedset.remove(f)
2441 removedset.remove(f)
2440 modified = sorted(modifiedset)
2442 modified = sorted(modifiedset)
2441 added = sorted(addedset)
2443 added = sorted(addedset)
2442 removed = sorted(removedset)
2444 removed = sorted(removedset)
2443 for dst, src in copy.items():
2445 for dst, src in copy.items():
2444 if src not in ctx1:
2446 if src not in ctx1:
2445 # Files merged in during a merge and then copied/renamed are
2447 # Files merged in during a merge and then copied/renamed are
2446 # reported as copies. We want to show them in the diff as additions.
2448 # reported as copies. We want to show them in the diff as additions.
2447 del copy[dst]
2449 del copy[dst]
2448
2450
2449 def difffn(opts, losedata):
2451 def difffn(opts, losedata):
2450 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2452 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2451 copy, getfilectx, opts, losedata, prefix, relroot)
2453 copy, getfilectx, opts, losedata, prefix, relroot)
2452 if opts.upgrade and not opts.git:
2454 if opts.upgrade and not opts.git:
2453 try:
2455 try:
2454 def losedata(fn):
2456 def losedata(fn):
2455 if not losedatafn or not losedatafn(fn=fn):
2457 if not losedatafn or not losedatafn(fn=fn):
2456 raise GitDiffRequired
2458 raise GitDiffRequired
2457 # Buffer the whole output until we are sure it can be generated
2459 # Buffer the whole output until we are sure it can be generated
2458 return list(difffn(opts.copy(git=False), losedata))
2460 return list(difffn(opts.copy(git=False), losedata))
2459 except GitDiffRequired:
2461 except GitDiffRequired:
2460 return difffn(opts.copy(git=True), None)
2462 return difffn(opts.copy(git=True), None)
2461 else:
2463 else:
2462 return difffn(opts, None)
2464 return difffn(opts, None)
2463
2465
2464 def difflabel(func, *args, **kw):
2466 def difflabel(func, *args, **kw):
2465 '''yields 2-tuples of (output, label) based on the output of func()'''
2467 '''yields 2-tuples of (output, label) based on the output of func()'''
2468 inlinecolor = False
2469 if kw.get('opts'):
2470 inlinecolor = kw['opts'].worddiff
2466 headprefixes = [('diff', 'diff.diffline'),
2471 headprefixes = [('diff', 'diff.diffline'),
2467 ('copy', 'diff.extended'),
2472 ('copy', 'diff.extended'),
2468 ('rename', 'diff.extended'),
2473 ('rename', 'diff.extended'),
2469 ('old', 'diff.extended'),
2474 ('old', 'diff.extended'),
2470 ('new', 'diff.extended'),
2475 ('new', 'diff.extended'),
2471 ('deleted', 'diff.extended'),
2476 ('deleted', 'diff.extended'),
2472 ('index', 'diff.extended'),
2477 ('index', 'diff.extended'),
2473 ('similarity', 'diff.extended'),
2478 ('similarity', 'diff.extended'),
2474 ('---', 'diff.file_a'),
2479 ('---', 'diff.file_a'),
2475 ('+++', 'diff.file_b')]
2480 ('+++', 'diff.file_b')]
2476 textprefixes = [('@', 'diff.hunk'),
2481 textprefixes = [('@', 'diff.hunk'),
2477 ('-', 'diff.deleted'),
2482 ('-', 'diff.deleted'),
2478 ('+', 'diff.inserted')]
2483 ('+', 'diff.inserted')]
2479 head = False
2484 head = False
2480 for chunk in func(*args, **kw):
2485 for chunk in func(*args, **kw):
2481 lines = chunk.split('\n')
2486 lines = chunk.split('\n')
2487 matches = {}
2488 if inlinecolor:
2489 matches = _findmatches(lines)
2482 for i, line in enumerate(lines):
2490 for i, line in enumerate(lines):
2483 if i != 0:
2491 if i != 0:
2484 yield ('\n', '')
2492 yield ('\n', '')
2485 if head:
2493 if head:
2486 if line.startswith('@'):
2494 if line.startswith('@'):
2487 head = False
2495 head = False
2488 else:
2496 else:
2489 if line and line[0] not in ' +-@\\':
2497 if line and line[0] not in ' +-@\\':
2490 head = True
2498 head = True
2491 stripline = line
2499 stripline = line
2492 diffline = False
2500 diffline = False
2493 if not head and line and line[0] in '+-':
2501 if not head and line and line[0] in '+-':
2494 # highlight tabs and trailing whitespace, but only in
2502 # highlight tabs and trailing whitespace, but only in
2495 # changed lines
2503 # changed lines
2496 stripline = line.rstrip()
2504 stripline = line.rstrip()
2497 diffline = True
2505 diffline = True
2498
2506
2499 prefixes = textprefixes
2507 prefixes = textprefixes
2500 if head:
2508 if head:
2501 prefixes = headprefixes
2509 prefixes = headprefixes
2502 for prefix, label in prefixes:
2510 for prefix, label in prefixes:
2503 if stripline.startswith(prefix):
2511 if stripline.startswith(prefix):
2504 if diffline:
2512 if diffline:
2505 for token in tabsplitter.findall(stripline):
2513 for token in tabsplitter.findall(stripline):
2506 if '\t' == token[0]:
2514 if '\t' == token[0]:
2507 yield (token, 'diff.tab')
2515 yield (token, 'diff.tab')
2508 else:
2516 else:
2509 yield (token, label)
2517 if i in matches:
2518 for l, t in _inlinediff(
2519 lines[i].rstrip(),
2520 lines[matches[i]].rstrip(),
2521 label):
2522 yield (t, l)
2523 else:
2524 yield (token, label)
2510 else:
2525 else:
2511 yield (stripline, label)
2526 yield (stripline, label)
2512 break
2527 break
2513 else:
2528 else:
2514 yield (line, '')
2529 yield (line, '')
2515 if line != stripline:
2530 if line != stripline:
2516 yield (line[len(stripline):], 'diff.trailingwhitespace')
2531 yield (line[len(stripline):], 'diff.trailingwhitespace')
2517
2532
2533 def _findmatches(slist):
2534 '''Look for insertion matches to deletion and returns a dict of
2535 correspondences.
2536 '''
2537 lastmatch = 0
2538 matches = {}
2539 for i, line in enumerate(slist):
2540 if line == '':
2541 continue
2542 if line[0] == '-':
2543 lastmatch = max(lastmatch, i)
2544 newgroup = False
2545 for j, newline in enumerate(slist[lastmatch + 1:]):
2546 if newline == '':
2547 continue
2548 if newline[0] == '-' and newgroup: # too far, no match
2549 break
2550 if newline[0] == '+': # potential match
2551 newgroup = True
2552 sim = difflib.SequenceMatcher(None, line, newline).ratio()
2553 if sim > 0.7:
2554 lastmatch = lastmatch + 1 + j
2555 matches[i] = lastmatch
2556 matches[lastmatch] = i
2557 break
2558 return matches
2559
2560 def _inlinediff(s1, s2, operation):
2561 '''Perform string diff to highlight specific changes.'''
2562 operation_skip = '+?' if operation == 'diff.deleted' else '-?'
2563 if operation == 'diff.deleted':
2564 s2, s1 = s1, s2
2565
2566 buff = []
2567 # we never want to higlight the leading +-
2568 if operation == 'diff.deleted' and s2.startswith('-'):
2569 label = operation
2570 token = '-'
2571 s2 = s2[1:]
2572 s1 = s1[1:]
2573 elif operation == 'diff.inserted' and s1.startswith('+'):
2574 label = operation
2575 token = '+'
2576 s2 = s2[1:]
2577 s1 = s1[1:]
2578
2579 s = difflib.ndiff(re.split(br'(\W)', s2), re.split(br'(\W)', s1))
2580 for part in s:
2581 if part[0] in operation_skip:
2582 continue
2583 l = operation + '.highlight'
2584 if part[0] in ' ':
2585 l = operation
2586 if l == label: # contiguous token with same label
2587 token += part[2:]
2588 continue
2589 else:
2590 buff.append((label, token))
2591 label = l
2592 token = part[2:]
2593 buff.append((label, token))
2594
2595 return buff
2596
2518 def diffui(*args, **kw):
2597 def diffui(*args, **kw):
2519 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2598 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2520 return difflabel(diff, *args, **kw)
2599 return difflabel(diff, *args, **kw)
2521
2600
2522 def _filepairs(modified, added, removed, copy, opts):
2601 def _filepairs(modified, added, removed, copy, opts):
2523 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2602 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2524 before and f2 is the the name after. For added files, f1 will be None,
2603 before and f2 is the the name after. For added files, f1 will be None,
2525 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2604 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2526 or 'rename' (the latter two only if opts.git is set).'''
2605 or 'rename' (the latter two only if opts.git is set).'''
2527 gone = set()
2606 gone = set()
2528
2607
2529 copyto = dict([(v, k) for k, v in copy.items()])
2608 copyto = dict([(v, k) for k, v in copy.items()])
2530
2609
2531 addedset, removedset = set(added), set(removed)
2610 addedset, removedset = set(added), set(removed)
2532
2611
2533 for f in sorted(modified + added + removed):
2612 for f in sorted(modified + added + removed):
2534 copyop = None
2613 copyop = None
2535 f1, f2 = f, f
2614 f1, f2 = f, f
2536 if f in addedset:
2615 if f in addedset:
2537 f1 = None
2616 f1 = None
2538 if f in copy:
2617 if f in copy:
2539 if opts.git:
2618 if opts.git:
2540 f1 = copy[f]
2619 f1 = copy[f]
2541 if f1 in removedset and f1 not in gone:
2620 if f1 in removedset and f1 not in gone:
2542 copyop = 'rename'
2621 copyop = 'rename'
2543 gone.add(f1)
2622 gone.add(f1)
2544 else:
2623 else:
2545 copyop = 'copy'
2624 copyop = 'copy'
2546 elif f in removedset:
2625 elif f in removedset:
2547 f2 = None
2626 f2 = None
2548 if opts.git:
2627 if opts.git:
2549 # have we already reported a copy above?
2628 # have we already reported a copy above?
2550 if (f in copyto and copyto[f] in addedset
2629 if (f in copyto and copyto[f] in addedset
2551 and copy[copyto[f]] == f):
2630 and copy[copyto[f]] == f):
2552 continue
2631 continue
2553 yield f1, f2, copyop
2632 yield f1, f2, copyop
2554
2633
2555 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2634 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2556 copy, getfilectx, opts, losedatafn, prefix, relroot):
2635 copy, getfilectx, opts, losedatafn, prefix, relroot):
2557 '''given input data, generate a diff and yield it in blocks
2636 '''given input data, generate a diff and yield it in blocks
2558
2637
2559 If generating a diff would lose data like flags or binary data and
2638 If generating a diff would lose data like flags or binary data and
2560 losedatafn is not None, it will be called.
2639 losedatafn is not None, it will be called.
2561
2640
2562 relroot is removed and prefix is added to every path in the diff output.
2641 relroot is removed and prefix is added to every path in the diff output.
2563
2642
2564 If relroot is not empty, this function expects every path in modified,
2643 If relroot is not empty, this function expects every path in modified,
2565 added, removed and copy to start with it.'''
2644 added, removed and copy to start with it.'''
2566
2645
2567 def gitindex(text):
2646 def gitindex(text):
2568 if not text:
2647 if not text:
2569 text = ""
2648 text = ""
2570 l = len(text)
2649 l = len(text)
2571 s = hashlib.sha1('blob %d\0' % l)
2650 s = hashlib.sha1('blob %d\0' % l)
2572 s.update(text)
2651 s.update(text)
2573 return s.hexdigest()
2652 return s.hexdigest()
2574
2653
2575 if opts.noprefix:
2654 if opts.noprefix:
2576 aprefix = bprefix = ''
2655 aprefix = bprefix = ''
2577 else:
2656 else:
2578 aprefix = 'a/'
2657 aprefix = 'a/'
2579 bprefix = 'b/'
2658 bprefix = 'b/'
2580
2659
2581 def diffline(f, revs):
2660 def diffline(f, revs):
2582 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2661 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2583 return 'diff %s %s' % (revinfo, f)
2662 return 'diff %s %s' % (revinfo, f)
2584
2663
2585 def isempty(fctx):
2664 def isempty(fctx):
2586 return fctx is None or fctx.size() == 0
2665 return fctx is None or fctx.size() == 0
2587
2666
2588 date1 = util.datestr(ctx1.date())
2667 date1 = util.datestr(ctx1.date())
2589 date2 = util.datestr(ctx2.date())
2668 date2 = util.datestr(ctx2.date())
2590
2669
2591 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2670 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2592
2671
2593 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2672 if relroot != '' and (repo.ui.configbool('devel', 'all-warnings')
2594 or repo.ui.configbool('devel', 'check-relroot')):
2673 or repo.ui.configbool('devel', 'check-relroot')):
2595 for f in modified + added + removed + list(copy) + list(copy.values()):
2674 for f in modified + added + removed + list(copy) + list(copy.values()):
2596 if f is not None and not f.startswith(relroot):
2675 if f is not None and not f.startswith(relroot):
2597 raise AssertionError(
2676 raise AssertionError(
2598 "file %s doesn't start with relroot %s" % (f, relroot))
2677 "file %s doesn't start with relroot %s" % (f, relroot))
2599
2678
2600 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2679 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2601 content1 = None
2680 content1 = None
2602 content2 = None
2681 content2 = None
2603 fctx1 = None
2682 fctx1 = None
2604 fctx2 = None
2683 fctx2 = None
2605 flag1 = None
2684 flag1 = None
2606 flag2 = None
2685 flag2 = None
2607 if f1:
2686 if f1:
2608 fctx1 = getfilectx(f1, ctx1)
2687 fctx1 = getfilectx(f1, ctx1)
2609 if opts.git or losedatafn:
2688 if opts.git or losedatafn:
2610 flag1 = ctx1.flags(f1)
2689 flag1 = ctx1.flags(f1)
2611 if f2:
2690 if f2:
2612 fctx2 = getfilectx(f2, ctx2)
2691 fctx2 = getfilectx(f2, ctx2)
2613 if opts.git or losedatafn:
2692 if opts.git or losedatafn:
2614 flag2 = ctx2.flags(f2)
2693 flag2 = ctx2.flags(f2)
2615 # if binary is True, output "summary" or "base85", but not "text diff"
2694 # if binary is True, output "summary" or "base85", but not "text diff"
2616 binary = not opts.text and any(f.isbinary()
2695 binary = not opts.text and any(f.isbinary()
2617 for f in [fctx1, fctx2] if f is not None)
2696 for f in [fctx1, fctx2] if f is not None)
2618
2697
2619 if losedatafn and not opts.git:
2698 if losedatafn and not opts.git:
2620 if (binary or
2699 if (binary or
2621 # copy/rename
2700 # copy/rename
2622 f2 in copy or
2701 f2 in copy or
2623 # empty file creation
2702 # empty file creation
2624 (not f1 and isempty(fctx2)) or
2703 (not f1 and isempty(fctx2)) or
2625 # empty file deletion
2704 # empty file deletion
2626 (isempty(fctx1) and not f2) or
2705 (isempty(fctx1) and not f2) or
2627 # create with flags
2706 # create with flags
2628 (not f1 and flag2) or
2707 (not f1 and flag2) or
2629 # change flags
2708 # change flags
2630 (f1 and f2 and flag1 != flag2)):
2709 (f1 and f2 and flag1 != flag2)):
2631 losedatafn(f2 or f1)
2710 losedatafn(f2 or f1)
2632
2711
2633 path1 = f1 or f2
2712 path1 = f1 or f2
2634 path2 = f2 or f1
2713 path2 = f2 or f1
2635 path1 = posixpath.join(prefix, path1[len(relroot):])
2714 path1 = posixpath.join(prefix, path1[len(relroot):])
2636 path2 = posixpath.join(prefix, path2[len(relroot):])
2715 path2 = posixpath.join(prefix, path2[len(relroot):])
2637 header = []
2716 header = []
2638 if opts.git:
2717 if opts.git:
2639 header.append('diff --git %s%s %s%s' %
2718 header.append('diff --git %s%s %s%s' %
2640 (aprefix, path1, bprefix, path2))
2719 (aprefix, path1, bprefix, path2))
2641 if not f1: # added
2720 if not f1: # added
2642 header.append('new file mode %s' % gitmode[flag2])
2721 header.append('new file mode %s' % gitmode[flag2])
2643 elif not f2: # removed
2722 elif not f2: # removed
2644 header.append('deleted file mode %s' % gitmode[flag1])
2723 header.append('deleted file mode %s' % gitmode[flag1])
2645 else: # modified/copied/renamed
2724 else: # modified/copied/renamed
2646 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2725 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2647 if mode1 != mode2:
2726 if mode1 != mode2:
2648 header.append('old mode %s' % mode1)
2727 header.append('old mode %s' % mode1)
2649 header.append('new mode %s' % mode2)
2728 header.append('new mode %s' % mode2)
2650 if copyop is not None:
2729 if copyop is not None:
2651 if opts.showsimilarity:
2730 if opts.showsimilarity:
2652 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2731 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2653 header.append('similarity index %d%%' % sim)
2732 header.append('similarity index %d%%' % sim)
2654 header.append('%s from %s' % (copyop, path1))
2733 header.append('%s from %s' % (copyop, path1))
2655 header.append('%s to %s' % (copyop, path2))
2734 header.append('%s to %s' % (copyop, path2))
2656 elif revs and not repo.ui.quiet:
2735 elif revs and not repo.ui.quiet:
2657 header.append(diffline(path1, revs))
2736 header.append(diffline(path1, revs))
2658
2737
2659 # fctx.is | diffopts | what to | is fctx.data()
2738 # fctx.is | diffopts | what to | is fctx.data()
2660 # binary() | text nobinary git index | output? | outputted?
2739 # binary() | text nobinary git index | output? | outputted?
2661 # ------------------------------------|----------------------------
2740 # ------------------------------------|----------------------------
2662 # yes | no no no * | summary | no
2741 # yes | no no no * | summary | no
2663 # yes | no no yes * | base85 | yes
2742 # yes | no no yes * | base85 | yes
2664 # yes | no yes no * | summary | no
2743 # yes | no yes no * | summary | no
2665 # yes | no yes yes 0 | summary | no
2744 # yes | no yes yes 0 | summary | no
2666 # yes | no yes yes >0 | summary | semi [1]
2745 # yes | no yes yes >0 | summary | semi [1]
2667 # yes | yes * * * | text diff | yes
2746 # yes | yes * * * | text diff | yes
2668 # no | * * * * | text diff | yes
2747 # no | * * * * | text diff | yes
2669 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2748 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2670 if binary and (not opts.git or (opts.git and opts.nobinary and not
2749 if binary and (not opts.git or (opts.git and opts.nobinary and not
2671 opts.index)):
2750 opts.index)):
2672 # fast path: no binary content will be displayed, content1 and
2751 # fast path: no binary content will be displayed, content1 and
2673 # content2 are only used for equivalent test. cmp() could have a
2752 # content2 are only used for equivalent test. cmp() could have a
2674 # fast path.
2753 # fast path.
2675 if fctx1 is not None:
2754 if fctx1 is not None:
2676 content1 = b'\0'
2755 content1 = b'\0'
2677 if fctx2 is not None:
2756 if fctx2 is not None:
2678 if fctx1 is not None and not fctx1.cmp(fctx2):
2757 if fctx1 is not None and not fctx1.cmp(fctx2):
2679 content2 = b'\0' # not different
2758 content2 = b'\0' # not different
2680 else:
2759 else:
2681 content2 = b'\0\0'
2760 content2 = b'\0\0'
2682 else:
2761 else:
2683 # normal path: load contents
2762 # normal path: load contents
2684 if fctx1 is not None:
2763 if fctx1 is not None:
2685 content1 = fctx1.data()
2764 content1 = fctx1.data()
2686 if fctx2 is not None:
2765 if fctx2 is not None:
2687 content2 = fctx2.data()
2766 content2 = fctx2.data()
2688
2767
2689 if binary and opts.git and not opts.nobinary:
2768 if binary and opts.git and not opts.nobinary:
2690 text = mdiff.b85diff(content1, content2)
2769 text = mdiff.b85diff(content1, content2)
2691 if text:
2770 if text:
2692 header.append('index %s..%s' %
2771 header.append('index %s..%s' %
2693 (gitindex(content1), gitindex(content2)))
2772 (gitindex(content1), gitindex(content2)))
2694 hunks = (None, [text]),
2773 hunks = (None, [text]),
2695 else:
2774 else:
2696 if opts.git and opts.index > 0:
2775 if opts.git and opts.index > 0:
2697 flag = flag1
2776 flag = flag1
2698 if flag is None:
2777 if flag is None:
2699 flag = flag2
2778 flag = flag2
2700 header.append('index %s..%s %s' %
2779 header.append('index %s..%s %s' %
2701 (gitindex(content1)[0:opts.index],
2780 (gitindex(content1)[0:opts.index],
2702 gitindex(content2)[0:opts.index],
2781 gitindex(content2)[0:opts.index],
2703 gitmode[flag]))
2782 gitmode[flag]))
2704
2783
2705 uheaders, hunks = mdiff.unidiff(content1, date1,
2784 uheaders, hunks = mdiff.unidiff(content1, date1,
2706 content2, date2,
2785 content2, date2,
2707 path1, path2, opts=opts)
2786 path1, path2, opts=opts)
2708 header.extend(uheaders)
2787 header.extend(uheaders)
2709 yield fctx1, fctx2, header, hunks
2788 yield fctx1, fctx2, header, hunks
2710
2789
2711 def diffstatsum(stats):
2790 def diffstatsum(stats):
2712 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2791 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2713 for f, a, r, b in stats:
2792 for f, a, r, b in stats:
2714 maxfile = max(maxfile, encoding.colwidth(f))
2793 maxfile = max(maxfile, encoding.colwidth(f))
2715 maxtotal = max(maxtotal, a + r)
2794 maxtotal = max(maxtotal, a + r)
2716 addtotal += a
2795 addtotal += a
2717 removetotal += r
2796 removetotal += r
2718 binary = binary or b
2797 binary = binary or b
2719
2798
2720 return maxfile, maxtotal, addtotal, removetotal, binary
2799 return maxfile, maxtotal, addtotal, removetotal, binary
2721
2800
2722 def diffstatdata(lines):
2801 def diffstatdata(lines):
2723 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2802 diffre = re.compile('^diff .*-r [a-z0-9]+\s(.*)$')
2724
2803
2725 results = []
2804 results = []
2726 filename, adds, removes, isbinary = None, 0, 0, False
2805 filename, adds, removes, isbinary = None, 0, 0, False
2727
2806
2728 def addresult():
2807 def addresult():
2729 if filename:
2808 if filename:
2730 results.append((filename, adds, removes, isbinary))
2809 results.append((filename, adds, removes, isbinary))
2731
2810
2732 # inheader is used to track if a line is in the
2811 # inheader is used to track if a line is in the
2733 # header portion of the diff. This helps properly account
2812 # header portion of the diff. This helps properly account
2734 # for lines that start with '--' or '++'
2813 # for lines that start with '--' or '++'
2735 inheader = False
2814 inheader = False
2736
2815
2737 for line in lines:
2816 for line in lines:
2738 if line.startswith('diff'):
2817 if line.startswith('diff'):
2739 addresult()
2818 addresult()
2740 # starting a new file diff
2819 # starting a new file diff
2741 # set numbers to 0 and reset inheader
2820 # set numbers to 0 and reset inheader
2742 inheader = True
2821 inheader = True
2743 adds, removes, isbinary = 0, 0, False
2822 adds, removes, isbinary = 0, 0, False
2744 if line.startswith('diff --git a/'):
2823 if line.startswith('diff --git a/'):
2745 filename = gitre.search(line).group(2)
2824 filename = gitre.search(line).group(2)
2746 elif line.startswith('diff -r'):
2825 elif line.startswith('diff -r'):
2747 # format: "diff -r ... -r ... filename"
2826 # format: "diff -r ... -r ... filename"
2748 filename = diffre.search(line).group(1)
2827 filename = diffre.search(line).group(1)
2749 elif line.startswith('@@'):
2828 elif line.startswith('@@'):
2750 inheader = False
2829 inheader = False
2751 elif line.startswith('+') and not inheader:
2830 elif line.startswith('+') and not inheader:
2752 adds += 1
2831 adds += 1
2753 elif line.startswith('-') and not inheader:
2832 elif line.startswith('-') and not inheader:
2754 removes += 1
2833 removes += 1
2755 elif (line.startswith('GIT binary patch') or
2834 elif (line.startswith('GIT binary patch') or
2756 line.startswith('Binary file')):
2835 line.startswith('Binary file')):
2757 isbinary = True
2836 isbinary = True
2758 addresult()
2837 addresult()
2759 return results
2838 return results
2760
2839
2761 def diffstat(lines, width=80):
2840 def diffstat(lines, width=80):
2762 output = []
2841 output = []
2763 stats = diffstatdata(lines)
2842 stats = diffstatdata(lines)
2764 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2843 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2765
2844
2766 countwidth = len(str(maxtotal))
2845 countwidth = len(str(maxtotal))
2767 if hasbinary and countwidth < 3:
2846 if hasbinary and countwidth < 3:
2768 countwidth = 3
2847 countwidth = 3
2769 graphwidth = width - countwidth - maxname - 6
2848 graphwidth = width - countwidth - maxname - 6
2770 if graphwidth < 10:
2849 if graphwidth < 10:
2771 graphwidth = 10
2850 graphwidth = 10
2772
2851
2773 def scale(i):
2852 def scale(i):
2774 if maxtotal <= graphwidth:
2853 if maxtotal <= graphwidth:
2775 return i
2854 return i
2776 # If diffstat runs out of room it doesn't print anything,
2855 # If diffstat runs out of room it doesn't print anything,
2777 # which isn't very useful, so always print at least one + or -
2856 # which isn't very useful, so always print at least one + or -
2778 # if there were at least some changes.
2857 # if there were at least some changes.
2779 return max(i * graphwidth // maxtotal, int(bool(i)))
2858 return max(i * graphwidth // maxtotal, int(bool(i)))
2780
2859
2781 for filename, adds, removes, isbinary in stats:
2860 for filename, adds, removes, isbinary in stats:
2782 if isbinary:
2861 if isbinary:
2783 count = 'Bin'
2862 count = 'Bin'
2784 else:
2863 else:
2785 count = '%d' % (adds + removes)
2864 count = '%d' % (adds + removes)
2786 pluses = '+' * scale(adds)
2865 pluses = '+' * scale(adds)
2787 minuses = '-' * scale(removes)
2866 minuses = '-' * scale(removes)
2788 output.append(' %s%s | %*s %s%s\n' %
2867 output.append(' %s%s | %*s %s%s\n' %
2789 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2868 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2790 countwidth, count, pluses, minuses))
2869 countwidth, count, pluses, minuses))
2791
2870
2792 if stats:
2871 if stats:
2793 output.append(_(' %d files changed, %d insertions(+), '
2872 output.append(_(' %d files changed, %d insertions(+), '
2794 '%d deletions(-)\n')
2873 '%d deletions(-)\n')
2795 % (len(stats), totaladds, totalremoves))
2874 % (len(stats), totaladds, totalremoves))
2796
2875
2797 return ''.join(output)
2876 return ''.join(output)
2798
2877
2799 def diffstatui(*args, **kw):
2878 def diffstatui(*args, **kw):
2800 '''like diffstat(), but yields 2-tuples of (output, label) for
2879 '''like diffstat(), but yields 2-tuples of (output, label) for
2801 ui.write()
2880 ui.write()
2802 '''
2881 '''
2803
2882
2804 for line in diffstat(*args, **kw).splitlines():
2883 for line in diffstat(*args, **kw).splitlines():
2805 if line and line[-1] in '+-':
2884 if line and line[-1] in '+-':
2806 name, graph = line.rsplit(' ', 1)
2885 name, graph = line.rsplit(' ', 1)
2807 yield (name + ' ', '')
2886 yield (name + ' ', '')
2808 m = re.search(br'\++', graph)
2887 m = re.search(br'\++', graph)
2809 if m:
2888 if m:
2810 yield (m.group(0), 'diffstat.inserted')
2889 yield (m.group(0), 'diffstat.inserted')
2811 m = re.search(br'-+', graph)
2890 m = re.search(br'-+', graph)
2812 if m:
2891 if m:
2813 yield (m.group(0), 'diffstat.deleted')
2892 yield (m.group(0), 'diffstat.deleted')
2814 else:
2893 else:
2815 yield (line, '')
2894 yield (line, '')
2816 yield ('\n', '')
2895 yield ('\n', '')
@@ -1,261 +1,353 b''
1 Setup
1 Setup
2
2
3 $ cat <<EOF >> $HGRCPATH
3 $ cat <<EOF >> $HGRCPATH
4 > [ui]
4 > [ui]
5 > color = yes
5 > color = yes
6 > formatted = always
6 > formatted = always
7 > paginate = never
7 > paginate = never
8 > [color]
8 > [color]
9 > mode = ansi
9 > mode = ansi
10 > EOF
10 > EOF
11 $ hg init repo
11 $ hg init repo
12 $ cd repo
12 $ cd repo
13 $ cat > a <<EOF
13 $ cat > a <<EOF
14 > c
14 > c
15 > c
15 > c
16 > a
16 > a
17 > a
17 > a
18 > b
18 > b
19 > a
19 > a
20 > a
20 > a
21 > c
21 > c
22 > c
22 > c
23 > EOF
23 > EOF
24 $ hg ci -Am adda
24 $ hg ci -Am adda
25 adding a
25 adding a
26 $ cat > a <<EOF
26 $ cat > a <<EOF
27 > c
27 > c
28 > c
28 > c
29 > a
29 > a
30 > a
30 > a
31 > dd
31 > dd
32 > a
32 > a
33 > a
33 > a
34 > c
34 > c
35 > c
35 > c
36 > EOF
36 > EOF
37
37
38 default context
38 default context
39
39
40 $ hg diff --nodates
40 $ hg diff --nodates
41 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
41 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
42 \x1b[0;31;1m--- a/a\x1b[0m (esc)
42 \x1b[0;31;1m--- a/a\x1b[0m (esc)
43 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
43 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
44 \x1b[0;35m@@ -2,7 +2,7 @@\x1b[0m (esc)
44 \x1b[0;35m@@ -2,7 +2,7 @@\x1b[0m (esc)
45 c
45 c
46 a
46 a
47 a
47 a
48 \x1b[0;31m-b\x1b[0m (esc)
48 \x1b[0;31m-b\x1b[0m (esc)
49 \x1b[0;32m+dd\x1b[0m (esc)
49 \x1b[0;32m+dd\x1b[0m (esc)
50 a
50 a
51 a
51 a
52 c
52 c
53
53
54 (check that 'ui.color=yes' match '--color=auto')
54 (check that 'ui.color=yes' match '--color=auto')
55
55
56 $ hg diff --nodates --config ui.formatted=no
56 $ hg diff --nodates --config ui.formatted=no
57 diff -r cf9f4ba66af2 a
57 diff -r cf9f4ba66af2 a
58 --- a/a
58 --- a/a
59 +++ b/a
59 +++ b/a
60 @@ -2,7 +2,7 @@
60 @@ -2,7 +2,7 @@
61 c
61 c
62 a
62 a
63 a
63 a
64 -b
64 -b
65 +dd
65 +dd
66 a
66 a
67 a
67 a
68 c
68 c
69
69
70 (check that 'ui.color=no' disable color)
70 (check that 'ui.color=no' disable color)
71
71
72 $ hg diff --nodates --config ui.formatted=yes --config ui.color=no
72 $ hg diff --nodates --config ui.formatted=yes --config ui.color=no
73 diff -r cf9f4ba66af2 a
73 diff -r cf9f4ba66af2 a
74 --- a/a
74 --- a/a
75 +++ b/a
75 +++ b/a
76 @@ -2,7 +2,7 @@
76 @@ -2,7 +2,7 @@
77 c
77 c
78 a
78 a
79 a
79 a
80 -b
80 -b
81 +dd
81 +dd
82 a
82 a
83 a
83 a
84 c
84 c
85
85
86 (check that 'ui.color=always' force color)
86 (check that 'ui.color=always' force color)
87
87
88 $ hg diff --nodates --config ui.formatted=no --config ui.color=always
88 $ hg diff --nodates --config ui.formatted=no --config ui.color=always
89 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
89 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
90 \x1b[0;31;1m--- a/a\x1b[0m (esc)
90 \x1b[0;31;1m--- a/a\x1b[0m (esc)
91 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
91 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
92 \x1b[0;35m@@ -2,7 +2,7 @@\x1b[0m (esc)
92 \x1b[0;35m@@ -2,7 +2,7 @@\x1b[0m (esc)
93 c
93 c
94 a
94 a
95 a
95 a
96 \x1b[0;31m-b\x1b[0m (esc)
96 \x1b[0;31m-b\x1b[0m (esc)
97 \x1b[0;32m+dd\x1b[0m (esc)
97 \x1b[0;32m+dd\x1b[0m (esc)
98 a
98 a
99 a
99 a
100 c
100 c
101
101
102 --unified=2
102 --unified=2
103
103
104 $ hg diff --nodates -U 2
104 $ hg diff --nodates -U 2
105 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
105 \x1b[0;1mdiff -r cf9f4ba66af2 a\x1b[0m (esc)
106 \x1b[0;31;1m--- a/a\x1b[0m (esc)
106 \x1b[0;31;1m--- a/a\x1b[0m (esc)
107 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
107 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
108 \x1b[0;35m@@ -3,5 +3,5 @@\x1b[0m (esc)
108 \x1b[0;35m@@ -3,5 +3,5 @@\x1b[0m (esc)
109 a
109 a
110 a
110 a
111 \x1b[0;31m-b\x1b[0m (esc)
111 \x1b[0;31m-b\x1b[0m (esc)
112 \x1b[0;32m+dd\x1b[0m (esc)
112 \x1b[0;32m+dd\x1b[0m (esc)
113 a
113 a
114 a
114 a
115
115
116 diffstat
116 diffstat
117
117
118 $ hg diff --stat
118 $ hg diff --stat
119 a | 2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc)
119 a | 2 \x1b[0;32m+\x1b[0m\x1b[0;31m-\x1b[0m (esc)
120 1 files changed, 1 insertions(+), 1 deletions(-)
120 1 files changed, 1 insertions(+), 1 deletions(-)
121 $ cat <<EOF >> $HGRCPATH
121 $ cat <<EOF >> $HGRCPATH
122 > [extensions]
122 > [extensions]
123 > record =
123 > record =
124 > [ui]
124 > [ui]
125 > interactive = true
125 > interactive = true
126 > [diff]
126 > [diff]
127 > git = True
127 > git = True
128 > EOF
128 > EOF
129
129
130 #if execbit
130 #if execbit
131
131
132 record
132 record
133
133
134 $ chmod +x a
134 $ chmod +x a
135 $ hg record -m moda a <<EOF
135 $ hg record -m moda a <<EOF
136 > y
136 > y
137 > y
137 > y
138 > EOF
138 > EOF
139 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
139 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
140 \x1b[0;36;1mold mode 100644\x1b[0m (esc)
140 \x1b[0;36;1mold mode 100644\x1b[0m (esc)
141 \x1b[0;36;1mnew mode 100755\x1b[0m (esc)
141 \x1b[0;36;1mnew mode 100755\x1b[0m (esc)
142 1 hunks, 1 lines changed
142 1 hunks, 1 lines changed
143 \x1b[0;33mexamine changes to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
143 \x1b[0;33mexamine changes to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
144
144
145 \x1b[0;35m@@ -2,7 +2,7 @@ c\x1b[0m (esc)
145 \x1b[0;35m@@ -2,7 +2,7 @@ c\x1b[0m (esc)
146 c
146 c
147 a
147 a
148 a
148 a
149 \x1b[0;31m-b\x1b[0m (esc)
149 \x1b[0;31m-b\x1b[0m (esc)
150 \x1b[0;32m+dd\x1b[0m (esc)
150 \x1b[0;32m+dd\x1b[0m (esc)
151 a
151 a
152 a
152 a
153 c
153 c
154 \x1b[0;33mrecord this change to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
154 \x1b[0;33mrecord this change to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
155
155
156
156
157 $ echo "[extensions]" >> $HGRCPATH
157 $ echo "[extensions]" >> $HGRCPATH
158 $ echo "mq=" >> $HGRCPATH
158 $ echo "mq=" >> $HGRCPATH
159 $ hg rollback
159 $ hg rollback
160 repository tip rolled back to revision 0 (undo commit)
160 repository tip rolled back to revision 0 (undo commit)
161 working directory now based on revision 0
161 working directory now based on revision 0
162
162
163 qrecord
163 qrecord
164
164
165 $ hg qrecord -m moda patch <<EOF
165 $ hg qrecord -m moda patch <<EOF
166 > y
166 > y
167 > y
167 > y
168 > EOF
168 > EOF
169 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
169 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
170 \x1b[0;36;1mold mode 100644\x1b[0m (esc)
170 \x1b[0;36;1mold mode 100644\x1b[0m (esc)
171 \x1b[0;36;1mnew mode 100755\x1b[0m (esc)
171 \x1b[0;36;1mnew mode 100755\x1b[0m (esc)
172 1 hunks, 1 lines changed
172 1 hunks, 1 lines changed
173 \x1b[0;33mexamine changes to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
173 \x1b[0;33mexamine changes to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
174
174
175 \x1b[0;35m@@ -2,7 +2,7 @@ c\x1b[0m (esc)
175 \x1b[0;35m@@ -2,7 +2,7 @@ c\x1b[0m (esc)
176 c
176 c
177 a
177 a
178 a
178 a
179 \x1b[0;31m-b\x1b[0m (esc)
179 \x1b[0;31m-b\x1b[0m (esc)
180 \x1b[0;32m+dd\x1b[0m (esc)
180 \x1b[0;32m+dd\x1b[0m (esc)
181 a
181 a
182 a
182 a
183 c
183 c
184 \x1b[0;33mrecord this change to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
184 \x1b[0;33mrecord this change to 'a'? [Ynesfdaq?]\x1b[0m y (esc)
185
185
186
186
187 $ hg qpop -a
187 $ hg qpop -a
188 popping patch
188 popping patch
189 patch queue now empty
189 patch queue now empty
190
190
191 #endif
191 #endif
192
192
193 issue3712: test colorization of subrepo diff
193 issue3712: test colorization of subrepo diff
194
194
195 $ hg init sub
195 $ hg init sub
196 $ echo b > sub/b
196 $ echo b > sub/b
197 $ hg -R sub commit -Am 'create sub'
197 $ hg -R sub commit -Am 'create sub'
198 adding b
198 adding b
199 $ echo 'sub = sub' > .hgsub
199 $ echo 'sub = sub' > .hgsub
200 $ hg add .hgsub
200 $ hg add .hgsub
201 $ hg commit -m 'add subrepo sub'
201 $ hg commit -m 'add subrepo sub'
202 $ echo aa >> a
202 $ echo aa >> a
203 $ echo bb >> sub/b
203 $ echo bb >> sub/b
204
204
205 $ hg diff -S
205 $ hg diff -S
206 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
206 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
207 \x1b[0;31;1m--- a/a\x1b[0m (esc)
207 \x1b[0;31;1m--- a/a\x1b[0m (esc)
208 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
208 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
209 \x1b[0;35m@@ -7,3 +7,4 @@\x1b[0m (esc)
209 \x1b[0;35m@@ -7,3 +7,4 @@\x1b[0m (esc)
210 a
210 a
211 c
211 c
212 c
212 c
213 \x1b[0;32m+aa\x1b[0m (esc)
213 \x1b[0;32m+aa\x1b[0m (esc)
214 \x1b[0;1mdiff --git a/sub/b b/sub/b\x1b[0m (esc)
214 \x1b[0;1mdiff --git a/sub/b b/sub/b\x1b[0m (esc)
215 \x1b[0;31;1m--- a/sub/b\x1b[0m (esc)
215 \x1b[0;31;1m--- a/sub/b\x1b[0m (esc)
216 \x1b[0;32;1m+++ b/sub/b\x1b[0m (esc)
216 \x1b[0;32;1m+++ b/sub/b\x1b[0m (esc)
217 \x1b[0;35m@@ -1,1 +1,2 @@\x1b[0m (esc)
217 \x1b[0;35m@@ -1,1 +1,2 @@\x1b[0m (esc)
218 b
218 b
219 \x1b[0;32m+bb\x1b[0m (esc)
219 \x1b[0;32m+bb\x1b[0m (esc)
220
220
221 test tabs
221 test tabs
222
222
223 $ cat >> a <<EOF
223 $ cat >> a <<EOF
224 > one tab
224 > one tab
225 > two tabs
225 > two tabs
226 > end tab
226 > end tab
227 > mid tab
227 > mid tab
228 > all tabs
228 > all tabs
229 > EOF
229 > EOF
230 $ hg diff --nodates
230 $ hg diff --nodates
231 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
231 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
232 \x1b[0;31;1m--- a/a\x1b[0m (esc)
232 \x1b[0;31;1m--- a/a\x1b[0m (esc)
233 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
233 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
234 \x1b[0;35m@@ -7,3 +7,9 @@\x1b[0m (esc)
234 \x1b[0;35m@@ -7,3 +7,9 @@\x1b[0m (esc)
235 a
235 a
236 c
236 c
237 c
237 c
238 \x1b[0;32m+aa\x1b[0m (esc)
238 \x1b[0;32m+aa\x1b[0m (esc)
239 \x1b[0;32m+\x1b[0m \x1b[0;32mone tab\x1b[0m (esc)
239 \x1b[0;32m+\x1b[0m \x1b[0;32mone tab\x1b[0m (esc)
240 \x1b[0;32m+\x1b[0m \x1b[0;32mtwo tabs\x1b[0m (esc)
240 \x1b[0;32m+\x1b[0m \x1b[0;32mtwo tabs\x1b[0m (esc)
241 \x1b[0;32m+end tab\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
241 \x1b[0;32m+end tab\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
242 \x1b[0;32m+mid\x1b[0m \x1b[0;32mtab\x1b[0m (esc)
242 \x1b[0;32m+mid\x1b[0m \x1b[0;32mtab\x1b[0m (esc)
243 \x1b[0;32m+\x1b[0m \x1b[0;32mall\x1b[0m \x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
243 \x1b[0;32m+\x1b[0m \x1b[0;32mall\x1b[0m \x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
244 $ echo "[color]" >> $HGRCPATH
244 $ echo "[color]" >> $HGRCPATH
245 $ echo "diff.tab = bold magenta" >> $HGRCPATH
245 $ echo "diff.tab = bold magenta" >> $HGRCPATH
246 $ hg diff --nodates
246 $ hg diff --nodates
247 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
247 \x1b[0;1mdiff --git a/a b/a\x1b[0m (esc)
248 \x1b[0;31;1m--- a/a\x1b[0m (esc)
248 \x1b[0;31;1m--- a/a\x1b[0m (esc)
249 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
249 \x1b[0;32;1m+++ b/a\x1b[0m (esc)
250 \x1b[0;35m@@ -7,3 +7,9 @@\x1b[0m (esc)
250 \x1b[0;35m@@ -7,3 +7,9 @@\x1b[0m (esc)
251 a
251 a
252 c
252 c
253 c
253 c
254 \x1b[0;32m+aa\x1b[0m (esc)
254 \x1b[0;32m+aa\x1b[0m (esc)
255 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mone tab\x1b[0m (esc)
255 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mone tab\x1b[0m (esc)
256 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtwo tabs\x1b[0m (esc)
256 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtwo tabs\x1b[0m (esc)
257 \x1b[0;32m+end tab\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
257 \x1b[0;32m+end tab\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
258 \x1b[0;32m+mid\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtab\x1b[0m (esc)
258 \x1b[0;32m+mid\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtab\x1b[0m (esc)
259 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mall\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
259 \x1b[0;32m+\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mall\x1b[0m\x1b[0;1;35m \x1b[0m\x1b[0;32mtabs\x1b[0m\x1b[0;1;41m \x1b[0m (esc)
260
260
261 $ cd ..
261 $ cd ..
262
263 test inline color diff
264
265 $ hg init inline
266 $ cd inline
267 $ cat > file1 << EOF
268 > this is the first line
269 > this is the second line
270 > third line starts with space
271 > + starts with a plus sign
272 >
273 > this line won't change
274 >
275 > two lines are going to
276 > be changed into three!
277 >
278 > three of those lines will
279 > collapse onto one
280 > (to see if it works)
281 > EOF
282 $ hg add file1
283 $ hg ci -m 'commit'
284 $ cat > file1 << EOF
285 > that is the first paragraph
286 > this is the second line
287 > third line starts with space
288 > - starts with a minus sign
289 >
290 > this line won't change
291 >
292 > two lines are going to
293 > (entirely magically,
294 > assuming this works)
295 > be changed into four!
296 >
297 > three of those lines have
298 > collapsed onto one
299 > EOF
300 $ hg diff --config experimental.worddiff=False --color=debug
301 [diff.diffline|diff --git a/file1 b/file1]
302 [diff.file_a|--- a/file1]
303 [diff.file_b|+++ b/file1]
304 [diff.hunk|@@ -1,13 +1,14 @@]
305 [diff.deleted|-this is the first line]
306 [diff.deleted|-this is the second line]
307 [diff.deleted|- third line starts with space]
308 [diff.deleted|-+ starts with a plus sign]
309 [diff.inserted|+that is the first paragraph]
310 [diff.inserted|+ this is the second line]
311 [diff.inserted|+third line starts with space]
312 [diff.inserted|+- starts with a minus sign]
313
314 this line won't change
315
316 two lines are going to
317 [diff.deleted|-be changed into three!]
318 [diff.inserted|+(entirely magically,]
319 [diff.inserted|+ assuming this works)]
320 [diff.inserted|+be changed into four!]
321
322 [diff.deleted|-three of those lines will]
323 [diff.deleted|-collapse onto one]
324 [diff.deleted|-(to see if it works)]
325 [diff.inserted|+three of those lines have]
326 [diff.inserted|+collapsed onto one]
327 $ hg diff --config experimental.worddiff=True --color=debug
328 [diff.diffline|diff --git a/file1 b/file1]
329 [diff.file_a|--- a/file1]
330 [diff.file_b|+++ b/file1]
331 [diff.hunk|@@ -1,13 +1,14 @@]
332 [diff.deleted|-this is the ][diff.deleted.highlight|first][diff.deleted| line]
333 [diff.deleted|-this is the second line]
334 [diff.deleted|-][diff.deleted.highlight| ][diff.deleted|third line starts with space]
335 [diff.deleted|-][diff.deleted.highlight|+][diff.deleted| starts with a ][diff.deleted.highlight|plus][diff.deleted| sign]
336 [diff.inserted|+that is the first paragraph]
337 [diff.inserted|+][diff.inserted.highlight| ][diff.inserted|this is the ][diff.inserted.highlight|second][diff.inserted| line]
338 [diff.inserted|+third line starts with space]
339 [diff.inserted|+][diff.inserted.highlight|-][diff.inserted| starts with a ][diff.inserted.highlight|minus][diff.inserted| sign]
340
341 this line won't change
342
343 two lines are going to
344 [diff.deleted|-be changed into ][diff.deleted.highlight|three][diff.deleted|!]
345 [diff.inserted|+(entirely magically,]
346 [diff.inserted|+ assuming this works)]
347 [diff.inserted|+be changed into ][diff.inserted.highlight|four][diff.inserted|!]
348
349 [diff.deleted|-three of those lines ][diff.deleted.highlight|will]
350 [diff.deleted|-][diff.deleted.highlight|collapse][diff.deleted| onto one]
351 [diff.deleted|-(to see if it works)]
352 [diff.inserted|+three of those lines ][diff.inserted.highlight|have]
353 [diff.inserted|+][diff.inserted.highlight|collapsed][diff.inserted| onto one]
General Comments 0
You need to be logged in to leave comments. Login now