##// END OF EJS Templates
split: handle partial commit of renames when doing split or record (issue5723)...
Kyle Lippincott -
r43122:3cf09184 default
parent child Browse files
Show More
@@ -1,3427 +1,3439 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import copy as copymod
10 import copy as copymod
11 import errno
11 import errno
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullid,
18 nullid,
19 nullrev,
19 nullrev,
20 short,
20 short,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 bookmarks,
24 bookmarks,
25 changelog,
25 changelog,
26 copies,
26 copies,
27 crecord as crecordmod,
27 crecord as crecordmod,
28 dirstateguard,
28 dirstateguard,
29 encoding,
29 encoding,
30 error,
30 error,
31 formatter,
31 formatter,
32 logcmdutil,
32 logcmdutil,
33 match as matchmod,
33 match as matchmod,
34 merge as mergemod,
34 merge as mergemod,
35 mergeutil,
35 mergeutil,
36 obsolete,
36 obsolete,
37 patch,
37 patch,
38 pathutil,
38 pathutil,
39 phases,
39 phases,
40 pycompat,
40 pycompat,
41 repair,
41 repair,
42 revlog,
42 revlog,
43 rewriteutil,
43 rewriteutil,
44 scmutil,
44 scmutil,
45 smartset,
45 smartset,
46 state as statemod,
46 state as statemod,
47 subrepoutil,
47 subrepoutil,
48 templatekw,
48 templatekw,
49 templater,
49 templater,
50 util,
50 util,
51 vfs as vfsmod,
51 vfs as vfsmod,
52 )
52 )
53
53
54 from .utils import (
54 from .utils import (
55 dateutil,
55 dateutil,
56 stringutil,
56 stringutil,
57 )
57 )
58
58
59 stringio = util.stringio
59 stringio = util.stringio
60
60
61 # templates of common command options
61 # templates of common command options
62
62
63 dryrunopts = [
63 dryrunopts = [
64 ('n', 'dry-run', None,
64 ('n', 'dry-run', None,
65 _('do not perform actions, just print output')),
65 _('do not perform actions, just print output')),
66 ]
66 ]
67
67
68 confirmopts = [
68 confirmopts = [
69 ('', 'confirm', None,
69 ('', 'confirm', None,
70 _('ask before applying actions')),
70 _('ask before applying actions')),
71 ]
71 ]
72
72
73 remoteopts = [
73 remoteopts = [
74 ('e', 'ssh', '',
74 ('e', 'ssh', '',
75 _('specify ssh command to use'), _('CMD')),
75 _('specify ssh command to use'), _('CMD')),
76 ('', 'remotecmd', '',
76 ('', 'remotecmd', '',
77 _('specify hg command to run on the remote side'), _('CMD')),
77 _('specify hg command to run on the remote side'), _('CMD')),
78 ('', 'insecure', None,
78 ('', 'insecure', None,
79 _('do not verify server certificate (ignoring web.cacerts config)')),
79 _('do not verify server certificate (ignoring web.cacerts config)')),
80 ]
80 ]
81
81
82 walkopts = [
82 walkopts = [
83 ('I', 'include', [],
83 ('I', 'include', [],
84 _('include names matching the given patterns'), _('PATTERN')),
84 _('include names matching the given patterns'), _('PATTERN')),
85 ('X', 'exclude', [],
85 ('X', 'exclude', [],
86 _('exclude names matching the given patterns'), _('PATTERN')),
86 _('exclude names matching the given patterns'), _('PATTERN')),
87 ]
87 ]
88
88
89 commitopts = [
89 commitopts = [
90 ('m', 'message', '',
90 ('m', 'message', '',
91 _('use text as commit message'), _('TEXT')),
91 _('use text as commit message'), _('TEXT')),
92 ('l', 'logfile', '',
92 ('l', 'logfile', '',
93 _('read commit message from file'), _('FILE')),
93 _('read commit message from file'), _('FILE')),
94 ]
94 ]
95
95
96 commitopts2 = [
96 commitopts2 = [
97 ('d', 'date', '',
97 ('d', 'date', '',
98 _('record the specified date as commit date'), _('DATE')),
98 _('record the specified date as commit date'), _('DATE')),
99 ('u', 'user', '',
99 ('u', 'user', '',
100 _('record the specified user as committer'), _('USER')),
100 _('record the specified user as committer'), _('USER')),
101 ]
101 ]
102
102
103 formatteropts = [
103 formatteropts = [
104 ('T', 'template', '',
104 ('T', 'template', '',
105 _('display with template'), _('TEMPLATE')),
105 _('display with template'), _('TEMPLATE')),
106 ]
106 ]
107
107
108 templateopts = [
108 templateopts = [
109 ('', 'style', '',
109 ('', 'style', '',
110 _('display using template map file (DEPRECATED)'), _('STYLE')),
110 _('display using template map file (DEPRECATED)'), _('STYLE')),
111 ('T', 'template', '',
111 ('T', 'template', '',
112 _('display with template'), _('TEMPLATE')),
112 _('display with template'), _('TEMPLATE')),
113 ]
113 ]
114
114
115 logopts = [
115 logopts = [
116 ('p', 'patch', None, _('show patch')),
116 ('p', 'patch', None, _('show patch')),
117 ('g', 'git', None, _('use git extended diff format')),
117 ('g', 'git', None, _('use git extended diff format')),
118 ('l', 'limit', '',
118 ('l', 'limit', '',
119 _('limit number of changes displayed'), _('NUM')),
119 _('limit number of changes displayed'), _('NUM')),
120 ('M', 'no-merges', None, _('do not show merges')),
120 ('M', 'no-merges', None, _('do not show merges')),
121 ('', 'stat', None, _('output diffstat-style summary of changes')),
121 ('', 'stat', None, _('output diffstat-style summary of changes')),
122 ('G', 'graph', None, _("show the revision DAG")),
122 ('G', 'graph', None, _("show the revision DAG")),
123 ] + templateopts
123 ] + templateopts
124
124
125 diffopts = [
125 diffopts = [
126 ('a', 'text', None, _('treat all files as text')),
126 ('a', 'text', None, _('treat all files as text')),
127 ('g', 'git', None, _('use git extended diff format')),
127 ('g', 'git', None, _('use git extended diff format')),
128 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
128 ('', 'binary', None, _('generate binary diffs in git mode (default)')),
129 ('', 'nodates', None, _('omit dates from diff headers'))
129 ('', 'nodates', None, _('omit dates from diff headers'))
130 ]
130 ]
131
131
132 diffwsopts = [
132 diffwsopts = [
133 ('w', 'ignore-all-space', None,
133 ('w', 'ignore-all-space', None,
134 _('ignore white space when comparing lines')),
134 _('ignore white space when comparing lines')),
135 ('b', 'ignore-space-change', None,
135 ('b', 'ignore-space-change', None,
136 _('ignore changes in the amount of white space')),
136 _('ignore changes in the amount of white space')),
137 ('B', 'ignore-blank-lines', None,
137 ('B', 'ignore-blank-lines', None,
138 _('ignore changes whose lines are all blank')),
138 _('ignore changes whose lines are all blank')),
139 ('Z', 'ignore-space-at-eol', None,
139 ('Z', 'ignore-space-at-eol', None,
140 _('ignore changes in whitespace at EOL')),
140 _('ignore changes in whitespace at EOL')),
141 ]
141 ]
142
142
143 diffopts2 = [
143 diffopts2 = [
144 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
144 ('', 'noprefix', None, _('omit a/ and b/ prefixes from filenames')),
145 ('p', 'show-function', None, _('show which function each change is in')),
145 ('p', 'show-function', None, _('show which function each change is in')),
146 ('', 'reverse', None, _('produce a diff that undoes the changes')),
146 ('', 'reverse', None, _('produce a diff that undoes the changes')),
147 ] + diffwsopts + [
147 ] + diffwsopts + [
148 ('U', 'unified', '',
148 ('U', 'unified', '',
149 _('number of lines of context to show'), _('NUM')),
149 _('number of lines of context to show'), _('NUM')),
150 ('', 'stat', None, _('output diffstat-style summary of changes')),
150 ('', 'stat', None, _('output diffstat-style summary of changes')),
151 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
151 ('', 'root', '', _('produce diffs relative to subdirectory'), _('DIR')),
152 ]
152 ]
153
153
154 mergetoolopts = [
154 mergetoolopts = [
155 ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
155 ('t', 'tool', '', _('specify merge tool'), _('TOOL')),
156 ]
156 ]
157
157
158 similarityopts = [
158 similarityopts = [
159 ('s', 'similarity', '',
159 ('s', 'similarity', '',
160 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
160 _('guess renamed files by similarity (0<=s<=100)'), _('SIMILARITY'))
161 ]
161 ]
162
162
163 subrepoopts = [
163 subrepoopts = [
164 ('S', 'subrepos', None,
164 ('S', 'subrepos', None,
165 _('recurse into subrepositories'))
165 _('recurse into subrepositories'))
166 ]
166 ]
167
167
168 debugrevlogopts = [
168 debugrevlogopts = [
169 ('c', 'changelog', False, _('open changelog')),
169 ('c', 'changelog', False, _('open changelog')),
170 ('m', 'manifest', False, _('open manifest')),
170 ('m', 'manifest', False, _('open manifest')),
171 ('', 'dir', '', _('open directory manifest')),
171 ('', 'dir', '', _('open directory manifest')),
172 ]
172 ]
173
173
174 # special string such that everything below this line will be ingored in the
174 # special string such that everything below this line will be ingored in the
175 # editor text
175 # editor text
176 _linebelow = "^HG: ------------------------ >8 ------------------------$"
176 _linebelow = "^HG: ------------------------ >8 ------------------------$"
177
177
178 def ishunk(x):
178 def ishunk(x):
179 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
179 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
180 return isinstance(x, hunkclasses)
180 return isinstance(x, hunkclasses)
181
181
182 def newandmodified(chunks, originalchunks):
182 def newandmodified(chunks, originalchunks):
183 newlyaddedandmodifiedfiles = set()
183 newlyaddedandmodifiedfiles = set()
184 alsorestore = set()
184 for chunk in chunks:
185 for chunk in chunks:
185 if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in
186 if (ishunk(chunk) and chunk.header.isnewfile() and chunk not in
186 originalchunks):
187 originalchunks):
187 newlyaddedandmodifiedfiles.add(chunk.header.filename())
188 newlyaddedandmodifiedfiles.add(chunk.header.filename())
188 return newlyaddedandmodifiedfiles
189 alsorestore.update(set(chunk.header.files()) -
190 set([chunk.header.filename()]))
191 return newlyaddedandmodifiedfiles, alsorestore
189
192
190 def parsealiases(cmd):
193 def parsealiases(cmd):
191 return cmd.split("|")
194 return cmd.split("|")
192
195
193 def setupwrapcolorwrite(ui):
196 def setupwrapcolorwrite(ui):
194 # wrap ui.write so diff output can be labeled/colorized
197 # wrap ui.write so diff output can be labeled/colorized
195 def wrapwrite(orig, *args, **kw):
198 def wrapwrite(orig, *args, **kw):
196 label = kw.pop(r'label', '')
199 label = kw.pop(r'label', '')
197 for chunk, l in patch.difflabel(lambda: args):
200 for chunk, l in patch.difflabel(lambda: args):
198 orig(chunk, label=label + l)
201 orig(chunk, label=label + l)
199
202
200 oldwrite = ui.write
203 oldwrite = ui.write
201 def wrap(*args, **kwargs):
204 def wrap(*args, **kwargs):
202 return wrapwrite(oldwrite, *args, **kwargs)
205 return wrapwrite(oldwrite, *args, **kwargs)
203 setattr(ui, 'write', wrap)
206 setattr(ui, 'write', wrap)
204 return oldwrite
207 return oldwrite
205
208
206 def filterchunks(ui, originalhunks, usecurses, testfile, match,
209 def filterchunks(ui, originalhunks, usecurses, testfile, match,
207 operation=None):
210 operation=None):
208 try:
211 try:
209 if usecurses:
212 if usecurses:
210 if testfile:
213 if testfile:
211 recordfn = crecordmod.testdecorator(
214 recordfn = crecordmod.testdecorator(
212 testfile, crecordmod.testchunkselector)
215 testfile, crecordmod.testchunkselector)
213 else:
216 else:
214 recordfn = crecordmod.chunkselector
217 recordfn = crecordmod.chunkselector
215
218
216 return crecordmod.filterpatch(ui, originalhunks, recordfn,
219 return crecordmod.filterpatch(ui, originalhunks, recordfn,
217 operation)
220 operation)
218 except crecordmod.fallbackerror as e:
221 except crecordmod.fallbackerror as e:
219 ui.warn('%s\n' % e.message)
222 ui.warn('%s\n' % e.message)
220 ui.warn(_('falling back to text mode\n'))
223 ui.warn(_('falling back to text mode\n'))
221
224
222 return patch.filterpatch(ui, originalhunks, match, operation)
225 return patch.filterpatch(ui, originalhunks, match, operation)
223
226
224 def recordfilter(ui, originalhunks, match, operation=None):
227 def recordfilter(ui, originalhunks, match, operation=None):
225 """ Prompts the user to filter the originalhunks and return a list of
228 """ Prompts the user to filter the originalhunks and return a list of
226 selected hunks.
229 selected hunks.
227 *operation* is used for to build ui messages to indicate the user what
230 *operation* is used for to build ui messages to indicate the user what
228 kind of filtering they are doing: reverting, committing, shelving, etc.
231 kind of filtering they are doing: reverting, committing, shelving, etc.
229 (see patch.filterpatch).
232 (see patch.filterpatch).
230 """
233 """
231 usecurses = crecordmod.checkcurses(ui)
234 usecurses = crecordmod.checkcurses(ui)
232 testfile = ui.config('experimental', 'crecordtest')
235 testfile = ui.config('experimental', 'crecordtest')
233 oldwrite = setupwrapcolorwrite(ui)
236 oldwrite = setupwrapcolorwrite(ui)
234 try:
237 try:
235 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
238 newchunks, newopts = filterchunks(ui, originalhunks, usecurses,
236 testfile, match, operation)
239 testfile, match, operation)
237 finally:
240 finally:
238 ui.write = oldwrite
241 ui.write = oldwrite
239 return newchunks, newopts
242 return newchunks, newopts
240
243
241 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
244 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
242 filterfn, *pats, **opts):
245 filterfn, *pats, **opts):
243 opts = pycompat.byteskwargs(opts)
246 opts = pycompat.byteskwargs(opts)
244 if not ui.interactive():
247 if not ui.interactive():
245 if cmdsuggest:
248 if cmdsuggest:
246 msg = _('running non-interactively, use %s instead') % cmdsuggest
249 msg = _('running non-interactively, use %s instead') % cmdsuggest
247 else:
250 else:
248 msg = _('running non-interactively')
251 msg = _('running non-interactively')
249 raise error.Abort(msg)
252 raise error.Abort(msg)
250
253
251 # make sure username is set before going interactive
254 # make sure username is set before going interactive
252 if not opts.get('user'):
255 if not opts.get('user'):
253 ui.username() # raise exception, username not provided
256 ui.username() # raise exception, username not provided
254
257
255 def recordfunc(ui, repo, message, match, opts):
258 def recordfunc(ui, repo, message, match, opts):
256 """This is generic record driver.
259 """This is generic record driver.
257
260
258 Its job is to interactively filter local changes, and
261 Its job is to interactively filter local changes, and
259 accordingly prepare working directory into a state in which the
262 accordingly prepare working directory into a state in which the
260 job can be delegated to a non-interactive commit command such as
263 job can be delegated to a non-interactive commit command such as
261 'commit' or 'qrefresh'.
264 'commit' or 'qrefresh'.
262
265
263 After the actual job is done by non-interactive command, the
266 After the actual job is done by non-interactive command, the
264 working directory is restored to its original state.
267 working directory is restored to its original state.
265
268
266 In the end we'll record interesting changes, and everything else
269 In the end we'll record interesting changes, and everything else
267 will be left in place, so the user can continue working.
270 will be left in place, so the user can continue working.
268 """
271 """
269 if not opts.get('interactive-unshelve'):
272 if not opts.get('interactive-unshelve'):
270 checkunfinished(repo, commit=True)
273 checkunfinished(repo, commit=True)
271 wctx = repo[None]
274 wctx = repo[None]
272 merge = len(wctx.parents()) > 1
275 merge = len(wctx.parents()) > 1
273 if merge:
276 if merge:
274 raise error.Abort(_('cannot partially commit a merge '
277 raise error.Abort(_('cannot partially commit a merge '
275 '(use "hg commit" instead)'))
278 '(use "hg commit" instead)'))
276
279
277 def fail(f, msg):
280 def fail(f, msg):
278 raise error.Abort('%s: %s' % (f, msg))
281 raise error.Abort('%s: %s' % (f, msg))
279
282
280 force = opts.get('force')
283 force = opts.get('force')
281 if not force:
284 if not force:
282 vdirs = []
285 vdirs = []
283 match = matchmod.badmatch(match, fail)
286 match = matchmod.badmatch(match, fail)
284 match.explicitdir = vdirs.append
287 match.explicitdir = vdirs.append
285
288
286 status = repo.status(match=match)
289 status = repo.status(match=match)
287
290
288 overrides = {(b'ui', b'commitsubrepos'): True}
291 overrides = {(b'ui', b'commitsubrepos'): True}
289
292
290 with repo.ui.configoverride(overrides, b'record'):
293 with repo.ui.configoverride(overrides, b'record'):
291 # subrepoutil.precommit() modifies the status
294 # subrepoutil.precommit() modifies the status
292 tmpstatus = scmutil.status(copymod.copy(status[0]),
295 tmpstatus = scmutil.status(copymod.copy(status[0]),
293 copymod.copy(status[1]),
296 copymod.copy(status[1]),
294 copymod.copy(status[2]),
297 copymod.copy(status[2]),
295 copymod.copy(status[3]),
298 copymod.copy(status[3]),
296 copymod.copy(status[4]),
299 copymod.copy(status[4]),
297 copymod.copy(status[5]),
300 copymod.copy(status[5]),
298 copymod.copy(status[6]))
301 copymod.copy(status[6]))
299
302
300 # Force allows -X subrepo to skip the subrepo.
303 # Force allows -X subrepo to skip the subrepo.
301 subs, commitsubs, newstate = subrepoutil.precommit(
304 subs, commitsubs, newstate = subrepoutil.precommit(
302 repo.ui, wctx, tmpstatus, match, force=True)
305 repo.ui, wctx, tmpstatus, match, force=True)
303 for s in subs:
306 for s in subs:
304 if s in commitsubs:
307 if s in commitsubs:
305 dirtyreason = wctx.sub(s).dirtyreason(True)
308 dirtyreason = wctx.sub(s).dirtyreason(True)
306 raise error.Abort(dirtyreason)
309 raise error.Abort(dirtyreason)
307
310
308 if not force:
311 if not force:
309 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
312 repo.checkcommitpatterns(wctx, vdirs, match, status, fail)
310 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True,
313 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True,
311 section='commands',
314 section='commands',
312 configprefix='commit.interactive.')
315 configprefix='commit.interactive.')
313 diffopts.nodates = True
316 diffopts.nodates = True
314 diffopts.git = True
317 diffopts.git = True
315 diffopts.showfunc = True
318 diffopts.showfunc = True
316 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
319 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
317 originalchunks = patch.parsepatch(originaldiff)
320 originalchunks = patch.parsepatch(originaldiff)
318 match = scmutil.match(repo[None], pats)
321 match = scmutil.match(repo[None], pats)
319
322
320 # 1. filter patch, since we are intending to apply subset of it
323 # 1. filter patch, since we are intending to apply subset of it
321 try:
324 try:
322 chunks, newopts = filterfn(ui, originalchunks, match)
325 chunks, newopts = filterfn(ui, originalchunks, match)
323 except error.PatchError as err:
326 except error.PatchError as err:
324 raise error.Abort(_('error parsing patch: %s') % err)
327 raise error.Abort(_('error parsing patch: %s') % err)
325 opts.update(newopts)
328 opts.update(newopts)
326
329
327 # We need to keep a backup of files that have been newly added and
330 # We need to keep a backup of files that have been newly added and
328 # modified during the recording process because there is a previous
331 # modified during the recording process because there is a previous
329 # version without the edit in the workdir
332 # version without the edit in the workdir. We also will need to restore
330 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
333 # files that were the sources of renames so that the patch application
334 # works.
335 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks,
336 originalchunks)
331 contenders = set()
337 contenders = set()
332 for h in chunks:
338 for h in chunks:
333 try:
339 try:
334 contenders.update(set(h.files()))
340 contenders.update(set(h.files()))
335 except AttributeError:
341 except AttributeError:
336 pass
342 pass
337
343
338 changed = status.modified + status.added + status.removed
344 changed = status.modified + status.added + status.removed
339 newfiles = [f for f in changed if f in contenders]
345 newfiles = [f for f in changed if f in contenders]
340 if not newfiles:
346 if not newfiles:
341 ui.status(_('no changes to record\n'))
347 ui.status(_('no changes to record\n'))
342 return 0
348 return 0
343
349
344 modified = set(status.modified)
350 modified = set(status.modified)
345
351
346 # 2. backup changed files, so we can restore them in the end
352 # 2. backup changed files, so we can restore them in the end
347
353
348 if backupall:
354 if backupall:
349 tobackup = changed
355 tobackup = changed
350 else:
356 else:
351 tobackup = [f for f in newfiles if f in modified or f in
357 tobackup = [f for f in newfiles if f in modified or f in
352 newlyaddedandmodifiedfiles]
358 newlyaddedandmodifiedfiles]
353 backups = {}
359 backups = {}
354 if tobackup:
360 if tobackup:
355 backupdir = repo.vfs.join('record-backups')
361 backupdir = repo.vfs.join('record-backups')
356 try:
362 try:
357 os.mkdir(backupdir)
363 os.mkdir(backupdir)
358 except OSError as err:
364 except OSError as err:
359 if err.errno != errno.EEXIST:
365 if err.errno != errno.EEXIST:
360 raise
366 raise
361 try:
367 try:
362 # backup continues
368 # backup continues
363 for f in tobackup:
369 for f in tobackup:
364 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
370 fd, tmpname = pycompat.mkstemp(prefix=f.replace('/', '_') + '.',
365 dir=backupdir)
371 dir=backupdir)
366 os.close(fd)
372 os.close(fd)
367 ui.debug('backup %r as %r\n' % (f, tmpname))
373 ui.debug('backup %r as %r\n' % (f, tmpname))
368 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
374 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
369 backups[f] = tmpname
375 backups[f] = tmpname
370
376
371 fp = stringio()
377 fp = stringio()
372 for c in chunks:
378 for c in chunks:
373 fname = c.filename()
379 fname = c.filename()
374 if fname in backups:
380 if fname in backups:
375 c.write(fp)
381 c.write(fp)
376 dopatch = fp.tell()
382 dopatch = fp.tell()
377 fp.seek(0)
383 fp.seek(0)
378
384
379 # 2.5 optionally review / modify patch in text editor
385 # 2.5 optionally review / modify patch in text editor
380 if opts.get('review', False):
386 if opts.get('review', False):
381 patchtext = (crecordmod.diffhelptext
387 patchtext = (crecordmod.diffhelptext
382 + crecordmod.patchhelptext
388 + crecordmod.patchhelptext
383 + fp.read())
389 + fp.read())
384 reviewedpatch = ui.edit(patchtext, "",
390 reviewedpatch = ui.edit(patchtext, "",
385 action="diff",
391 action="diff",
386 repopath=repo.path)
392 repopath=repo.path)
387 fp.truncate(0)
393 fp.truncate(0)
388 fp.write(reviewedpatch)
394 fp.write(reviewedpatch)
389 fp.seek(0)
395 fp.seek(0)
390
396
391 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
397 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
392 # 3a. apply filtered patch to clean repo (clean)
398 # 3a. apply filtered patch to clean repo (clean)
393 if backups:
399 if backups:
394 # Equivalent to hg.revert
400 # Equivalent to hg.revert
395 m = scmutil.matchfiles(repo, backups.keys())
401 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
396 mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
402 mergemod.update(repo, repo.dirstate.p1(), branchmerge=False,
397 force=True, matcher=m)
403 force=True, matcher=m)
398
404
399 # 3b. (apply)
405 # 3b. (apply)
400 if dopatch:
406 if dopatch:
401 try:
407 try:
402 ui.debug('applying patch\n')
408 ui.debug('applying patch\n')
403 ui.debug(fp.getvalue())
409 ui.debug(fp.getvalue())
404 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
410 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
405 except error.PatchError as err:
411 except error.PatchError as err:
406 raise error.Abort(pycompat.bytestr(err))
412 raise error.Abort(pycompat.bytestr(err))
407 del fp
413 del fp
408
414
409 # 4. We prepared working directory according to filtered
415 # 4. We prepared working directory according to filtered
410 # patch. Now is the time to delegate the job to
416 # patch. Now is the time to delegate the job to
411 # commit/qrefresh or the like!
417 # commit/qrefresh or the like!
412
418
413 # Make all of the pathnames absolute.
419 # Make all of the pathnames absolute.
414 newfiles = [repo.wjoin(nf) for nf in newfiles]
420 newfiles = [repo.wjoin(nf) for nf in newfiles]
415 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
421 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
416 finally:
422 finally:
417 # 5. finally restore backed-up files
423 # 5. finally restore backed-up files
418 try:
424 try:
419 dirstate = repo.dirstate
425 dirstate = repo.dirstate
420 for realname, tmpname in backups.iteritems():
426 for realname, tmpname in backups.iteritems():
421 ui.debug('restoring %r to %r\n' % (tmpname, realname))
427 ui.debug('restoring %r to %r\n' % (tmpname, realname))
422
428
423 if dirstate[realname] == 'n':
429 if dirstate[realname] == 'n':
424 # without normallookup, restoring timestamp
430 # without normallookup, restoring timestamp
425 # may cause partially committed files
431 # may cause partially committed files
426 # to be treated as unmodified
432 # to be treated as unmodified
427 dirstate.normallookup(realname)
433 dirstate.normallookup(realname)
428
434
429 # copystat=True here and above are a hack to trick any
435 # copystat=True here and above are a hack to trick any
430 # editors that have f open that we haven't modified them.
436 # editors that have f open that we haven't modified them.
431 #
437 #
432 # Also note that this racy as an editor could notice the
438 # Also note that this racy as an editor could notice the
433 # file's mtime before we've finished writing it.
439 # file's mtime before we've finished writing it.
434 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
440 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
435 os.unlink(tmpname)
441 os.unlink(tmpname)
436 if tobackup:
442 if tobackup:
437 os.rmdir(backupdir)
443 os.rmdir(backupdir)
438 except OSError:
444 except OSError:
439 pass
445 pass
440
446
441 def recordinwlock(ui, repo, message, match, opts):
447 def recordinwlock(ui, repo, message, match, opts):
442 with repo.wlock():
448 with repo.wlock():
443 return recordfunc(ui, repo, message, match, opts)
449 return recordfunc(ui, repo, message, match, opts)
444
450
445 return commit(ui, repo, recordinwlock, pats, opts)
451 return commit(ui, repo, recordinwlock, pats, opts)
446
452
447 class dirnode(object):
453 class dirnode(object):
448 """
454 """
449 Represent a directory in user working copy with information required for
455 Represent a directory in user working copy with information required for
450 the purpose of tersing its status.
456 the purpose of tersing its status.
451
457
452 path is the path to the directory, without a trailing '/'
458 path is the path to the directory, without a trailing '/'
453
459
454 statuses is a set of statuses of all files in this directory (this includes
460 statuses is a set of statuses of all files in this directory (this includes
455 all the files in all the subdirectories too)
461 all the files in all the subdirectories too)
456
462
457 files is a list of files which are direct child of this directory
463 files is a list of files which are direct child of this directory
458
464
459 subdirs is a dictionary of sub-directory name as the key and it's own
465 subdirs is a dictionary of sub-directory name as the key and it's own
460 dirnode object as the value
466 dirnode object as the value
461 """
467 """
462
468
463 def __init__(self, dirpath):
469 def __init__(self, dirpath):
464 self.path = dirpath
470 self.path = dirpath
465 self.statuses = set()
471 self.statuses = set()
466 self.files = []
472 self.files = []
467 self.subdirs = {}
473 self.subdirs = {}
468
474
469 def _addfileindir(self, filename, status):
475 def _addfileindir(self, filename, status):
470 """Add a file in this directory as a direct child."""
476 """Add a file in this directory as a direct child."""
471 self.files.append((filename, status))
477 self.files.append((filename, status))
472
478
473 def addfile(self, filename, status):
479 def addfile(self, filename, status):
474 """
480 """
475 Add a file to this directory or to its direct parent directory.
481 Add a file to this directory or to its direct parent directory.
476
482
477 If the file is not direct child of this directory, we traverse to the
483 If the file is not direct child of this directory, we traverse to the
478 directory of which this file is a direct child of and add the file
484 directory of which this file is a direct child of and add the file
479 there.
485 there.
480 """
486 """
481
487
482 # the filename contains a path separator, it means it's not the direct
488 # the filename contains a path separator, it means it's not the direct
483 # child of this directory
489 # child of this directory
484 if '/' in filename:
490 if '/' in filename:
485 subdir, filep = filename.split('/', 1)
491 subdir, filep = filename.split('/', 1)
486
492
487 # does the dirnode object for subdir exists
493 # does the dirnode object for subdir exists
488 if subdir not in self.subdirs:
494 if subdir not in self.subdirs:
489 subdirpath = pathutil.join(self.path, subdir)
495 subdirpath = pathutil.join(self.path, subdir)
490 self.subdirs[subdir] = dirnode(subdirpath)
496 self.subdirs[subdir] = dirnode(subdirpath)
491
497
492 # try adding the file in subdir
498 # try adding the file in subdir
493 self.subdirs[subdir].addfile(filep, status)
499 self.subdirs[subdir].addfile(filep, status)
494
500
495 else:
501 else:
496 self._addfileindir(filename, status)
502 self._addfileindir(filename, status)
497
503
498 if status not in self.statuses:
504 if status not in self.statuses:
499 self.statuses.add(status)
505 self.statuses.add(status)
500
506
501 def iterfilepaths(self):
507 def iterfilepaths(self):
502 """Yield (status, path) for files directly under this directory."""
508 """Yield (status, path) for files directly under this directory."""
503 for f, st in self.files:
509 for f, st in self.files:
504 yield st, pathutil.join(self.path, f)
510 yield st, pathutil.join(self.path, f)
505
511
506 def tersewalk(self, terseargs):
512 def tersewalk(self, terseargs):
507 """
513 """
508 Yield (status, path) obtained by processing the status of this
514 Yield (status, path) obtained by processing the status of this
509 dirnode.
515 dirnode.
510
516
511 terseargs is the string of arguments passed by the user with `--terse`
517 terseargs is the string of arguments passed by the user with `--terse`
512 flag.
518 flag.
513
519
514 Following are the cases which can happen:
520 Following are the cases which can happen:
515
521
516 1) All the files in the directory (including all the files in its
522 1) All the files in the directory (including all the files in its
517 subdirectories) share the same status and the user has asked us to terse
523 subdirectories) share the same status and the user has asked us to terse
518 that status. -> yield (status, dirpath). dirpath will end in '/'.
524 that status. -> yield (status, dirpath). dirpath will end in '/'.
519
525
520 2) Otherwise, we do following:
526 2) Otherwise, we do following:
521
527
522 a) Yield (status, filepath) for all the files which are in this
528 a) Yield (status, filepath) for all the files which are in this
523 directory (only the ones in this directory, not the subdirs)
529 directory (only the ones in this directory, not the subdirs)
524
530
525 b) Recurse the function on all the subdirectories of this
531 b) Recurse the function on all the subdirectories of this
526 directory
532 directory
527 """
533 """
528
534
529 if len(self.statuses) == 1:
535 if len(self.statuses) == 1:
530 onlyst = self.statuses.pop()
536 onlyst = self.statuses.pop()
531
537
532 # Making sure we terse only when the status abbreviation is
538 # Making sure we terse only when the status abbreviation is
533 # passed as terse argument
539 # passed as terse argument
534 if onlyst in terseargs:
540 if onlyst in terseargs:
535 yield onlyst, self.path + '/'
541 yield onlyst, self.path + '/'
536 return
542 return
537
543
538 # add the files to status list
544 # add the files to status list
539 for st, fpath in self.iterfilepaths():
545 for st, fpath in self.iterfilepaths():
540 yield st, fpath
546 yield st, fpath
541
547
542 #recurse on the subdirs
548 #recurse on the subdirs
543 for dirobj in self.subdirs.values():
549 for dirobj in self.subdirs.values():
544 for st, fpath in dirobj.tersewalk(terseargs):
550 for st, fpath in dirobj.tersewalk(terseargs):
545 yield st, fpath
551 yield st, fpath
546
552
547 def tersedir(statuslist, terseargs):
553 def tersedir(statuslist, terseargs):
548 """
554 """
549 Terse the status if all the files in a directory shares the same status.
555 Terse the status if all the files in a directory shares the same status.
550
556
551 statuslist is scmutil.status() object which contains a list of files for
557 statuslist is scmutil.status() object which contains a list of files for
552 each status.
558 each status.
553 terseargs is string which is passed by the user as the argument to `--terse`
559 terseargs is string which is passed by the user as the argument to `--terse`
554 flag.
560 flag.
555
561
556 The function makes a tree of objects of dirnode class, and at each node it
562 The function makes a tree of objects of dirnode class, and at each node it
557 stores the information required to know whether we can terse a certain
563 stores the information required to know whether we can terse a certain
558 directory or not.
564 directory or not.
559 """
565 """
560 # the order matters here as that is used to produce final list
566 # the order matters here as that is used to produce final list
561 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
567 allst = ('m', 'a', 'r', 'd', 'u', 'i', 'c')
562
568
563 # checking the argument validity
569 # checking the argument validity
564 for s in pycompat.bytestr(terseargs):
570 for s in pycompat.bytestr(terseargs):
565 if s not in allst:
571 if s not in allst:
566 raise error.Abort(_("'%s' not recognized") % s)
572 raise error.Abort(_("'%s' not recognized") % s)
567
573
568 # creating a dirnode object for the root of the repo
574 # creating a dirnode object for the root of the repo
569 rootobj = dirnode('')
575 rootobj = dirnode('')
570 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
576 pstatus = ('modified', 'added', 'deleted', 'clean', 'unknown',
571 'ignored', 'removed')
577 'ignored', 'removed')
572
578
573 tersedict = {}
579 tersedict = {}
574 for attrname in pstatus:
580 for attrname in pstatus:
575 statuschar = attrname[0:1]
581 statuschar = attrname[0:1]
576 for f in getattr(statuslist, attrname):
582 for f in getattr(statuslist, attrname):
577 rootobj.addfile(f, statuschar)
583 rootobj.addfile(f, statuschar)
578 tersedict[statuschar] = []
584 tersedict[statuschar] = []
579
585
580 # we won't be tersing the root dir, so add files in it
586 # we won't be tersing the root dir, so add files in it
581 for st, fpath in rootobj.iterfilepaths():
587 for st, fpath in rootobj.iterfilepaths():
582 tersedict[st].append(fpath)
588 tersedict[st].append(fpath)
583
589
584 # process each sub-directory and build tersedict
590 # process each sub-directory and build tersedict
585 for subdir in rootobj.subdirs.values():
591 for subdir in rootobj.subdirs.values():
586 for st, f in subdir.tersewalk(terseargs):
592 for st, f in subdir.tersewalk(terseargs):
587 tersedict[st].append(f)
593 tersedict[st].append(f)
588
594
589 tersedlist = []
595 tersedlist = []
590 for st in allst:
596 for st in allst:
591 tersedict[st].sort()
597 tersedict[st].sort()
592 tersedlist.append(tersedict[st])
598 tersedlist.append(tersedict[st])
593
599
594 return tersedlist
600 return tersedlist
595
601
596 def _commentlines(raw):
602 def _commentlines(raw):
597 '''Surround lineswith a comment char and a new line'''
603 '''Surround lineswith a comment char and a new line'''
598 lines = raw.splitlines()
604 lines = raw.splitlines()
599 commentedlines = ['# %s' % line for line in lines]
605 commentedlines = ['# %s' % line for line in lines]
600 return '\n'.join(commentedlines) + '\n'
606 return '\n'.join(commentedlines) + '\n'
601
607
602 def _conflictsmsg(repo):
608 def _conflictsmsg(repo):
603 mergestate = mergemod.mergestate.read(repo)
609 mergestate = mergemod.mergestate.read(repo)
604 if not mergestate.active():
610 if not mergestate.active():
605 return
611 return
606
612
607 m = scmutil.match(repo[None])
613 m = scmutil.match(repo[None])
608 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
614 unresolvedlist = [f for f in mergestate.unresolved() if m(f)]
609 if unresolvedlist:
615 if unresolvedlist:
610 mergeliststr = '\n'.join(
616 mergeliststr = '\n'.join(
611 [' %s' % util.pathto(repo.root, encoding.getcwd(), path)
617 [' %s' % util.pathto(repo.root, encoding.getcwd(), path)
612 for path in sorted(unresolvedlist)])
618 for path in sorted(unresolvedlist)])
613 msg = _('''Unresolved merge conflicts:
619 msg = _('''Unresolved merge conflicts:
614
620
615 %s
621 %s
616
622
617 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
623 To mark files as resolved: hg resolve --mark FILE''') % mergeliststr
618 else:
624 else:
619 msg = _('No unresolved merge conflicts.')
625 msg = _('No unresolved merge conflicts.')
620
626
621 return _commentlines(msg)
627 return _commentlines(msg)
622
628
623 def morestatus(repo, fm):
629 def morestatus(repo, fm):
624 statetuple = statemod.getrepostate(repo)
630 statetuple = statemod.getrepostate(repo)
625 label = 'status.morestatus'
631 label = 'status.morestatus'
626 if statetuple:
632 if statetuple:
627 state, helpfulmsg = statetuple
633 state, helpfulmsg = statetuple
628 statemsg = _('The repository is in an unfinished *%s* state.') % state
634 statemsg = _('The repository is in an unfinished *%s* state.') % state
629 fm.plain('%s\n' % _commentlines(statemsg), label=label)
635 fm.plain('%s\n' % _commentlines(statemsg), label=label)
630 conmsg = _conflictsmsg(repo)
636 conmsg = _conflictsmsg(repo)
631 if conmsg:
637 if conmsg:
632 fm.plain('%s\n' % conmsg, label=label)
638 fm.plain('%s\n' % conmsg, label=label)
633 if helpfulmsg:
639 if helpfulmsg:
634 fm.plain('%s\n' % _commentlines(helpfulmsg), label=label)
640 fm.plain('%s\n' % _commentlines(helpfulmsg), label=label)
635
641
636 def findpossible(cmd, table, strict=False):
642 def findpossible(cmd, table, strict=False):
637 """
643 """
638 Return cmd -> (aliases, command table entry)
644 Return cmd -> (aliases, command table entry)
639 for each matching command.
645 for each matching command.
640 Return debug commands (or their aliases) only if no normal command matches.
646 Return debug commands (or their aliases) only if no normal command matches.
641 """
647 """
642 choice = {}
648 choice = {}
643 debugchoice = {}
649 debugchoice = {}
644
650
645 if cmd in table:
651 if cmd in table:
646 # short-circuit exact matches, "log" alias beats "log|history"
652 # short-circuit exact matches, "log" alias beats "log|history"
647 keys = [cmd]
653 keys = [cmd]
648 else:
654 else:
649 keys = table.keys()
655 keys = table.keys()
650
656
651 allcmds = []
657 allcmds = []
652 for e in keys:
658 for e in keys:
653 aliases = parsealiases(e)
659 aliases = parsealiases(e)
654 allcmds.extend(aliases)
660 allcmds.extend(aliases)
655 found = None
661 found = None
656 if cmd in aliases:
662 if cmd in aliases:
657 found = cmd
663 found = cmd
658 elif not strict:
664 elif not strict:
659 for a in aliases:
665 for a in aliases:
660 if a.startswith(cmd):
666 if a.startswith(cmd):
661 found = a
667 found = a
662 break
668 break
663 if found is not None:
669 if found is not None:
664 if aliases[0].startswith("debug") or found.startswith("debug"):
670 if aliases[0].startswith("debug") or found.startswith("debug"):
665 debugchoice[found] = (aliases, table[e])
671 debugchoice[found] = (aliases, table[e])
666 else:
672 else:
667 choice[found] = (aliases, table[e])
673 choice[found] = (aliases, table[e])
668
674
669 if not choice and debugchoice:
675 if not choice and debugchoice:
670 choice = debugchoice
676 choice = debugchoice
671
677
672 return choice, allcmds
678 return choice, allcmds
673
679
674 def findcmd(cmd, table, strict=True):
680 def findcmd(cmd, table, strict=True):
675 """Return (aliases, command table entry) for command string."""
681 """Return (aliases, command table entry) for command string."""
676 choice, allcmds = findpossible(cmd, table, strict)
682 choice, allcmds = findpossible(cmd, table, strict)
677
683
678 if cmd in choice:
684 if cmd in choice:
679 return choice[cmd]
685 return choice[cmd]
680
686
681 if len(choice) > 1:
687 if len(choice) > 1:
682 clist = sorted(choice)
688 clist = sorted(choice)
683 raise error.AmbiguousCommand(cmd, clist)
689 raise error.AmbiguousCommand(cmd, clist)
684
690
685 if choice:
691 if choice:
686 return list(choice.values())[0]
692 return list(choice.values())[0]
687
693
688 raise error.UnknownCommand(cmd, allcmds)
694 raise error.UnknownCommand(cmd, allcmds)
689
695
690 def changebranch(ui, repo, revs, label):
696 def changebranch(ui, repo, revs, label):
691 """ Change the branch name of given revs to label """
697 """ Change the branch name of given revs to label """
692
698
693 with repo.wlock(), repo.lock(), repo.transaction('branches'):
699 with repo.wlock(), repo.lock(), repo.transaction('branches'):
694 # abort in case of uncommitted merge or dirty wdir
700 # abort in case of uncommitted merge or dirty wdir
695 bailifchanged(repo)
701 bailifchanged(repo)
696 revs = scmutil.revrange(repo, revs)
702 revs = scmutil.revrange(repo, revs)
697 if not revs:
703 if not revs:
698 raise error.Abort("empty revision set")
704 raise error.Abort("empty revision set")
699 roots = repo.revs('roots(%ld)', revs)
705 roots = repo.revs('roots(%ld)', revs)
700 if len(roots) > 1:
706 if len(roots) > 1:
701 raise error.Abort(_("cannot change branch of non-linear revisions"))
707 raise error.Abort(_("cannot change branch of non-linear revisions"))
702 rewriteutil.precheck(repo, revs, 'change branch of')
708 rewriteutil.precheck(repo, revs, 'change branch of')
703
709
704 root = repo[roots.first()]
710 root = repo[roots.first()]
705 rpb = {parent.branch() for parent in root.parents()}
711 rpb = {parent.branch() for parent in root.parents()}
706 if label not in rpb and label in repo.branchmap():
712 if label not in rpb and label in repo.branchmap():
707 raise error.Abort(_("a branch of the same name already exists"))
713 raise error.Abort(_("a branch of the same name already exists"))
708
714
709 if repo.revs('obsolete() and %ld', revs):
715 if repo.revs('obsolete() and %ld', revs):
710 raise error.Abort(_("cannot change branch of a obsolete changeset"))
716 raise error.Abort(_("cannot change branch of a obsolete changeset"))
711
717
712 # make sure only topological heads
718 # make sure only topological heads
713 if repo.revs('heads(%ld) - head()', revs):
719 if repo.revs('heads(%ld) - head()', revs):
714 raise error.Abort(_("cannot change branch in middle of a stack"))
720 raise error.Abort(_("cannot change branch in middle of a stack"))
715
721
716 replacements = {}
722 replacements = {}
717 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
723 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
718 # mercurial.subrepo -> mercurial.cmdutil
724 # mercurial.subrepo -> mercurial.cmdutil
719 from . import context
725 from . import context
720 for rev in revs:
726 for rev in revs:
721 ctx = repo[rev]
727 ctx = repo[rev]
722 oldbranch = ctx.branch()
728 oldbranch = ctx.branch()
723 # check if ctx has same branch
729 # check if ctx has same branch
724 if oldbranch == label:
730 if oldbranch == label:
725 continue
731 continue
726
732
727 def filectxfn(repo, newctx, path):
733 def filectxfn(repo, newctx, path):
728 try:
734 try:
729 return ctx[path]
735 return ctx[path]
730 except error.ManifestLookupError:
736 except error.ManifestLookupError:
731 return None
737 return None
732
738
733 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
739 ui.debug("changing branch of '%s' from '%s' to '%s'\n"
734 % (hex(ctx.node()), oldbranch, label))
740 % (hex(ctx.node()), oldbranch, label))
735 extra = ctx.extra()
741 extra = ctx.extra()
736 extra['branch_change'] = hex(ctx.node())
742 extra['branch_change'] = hex(ctx.node())
737 # While changing branch of set of linear commits, make sure that
743 # While changing branch of set of linear commits, make sure that
738 # we base our commits on new parent rather than old parent which
744 # we base our commits on new parent rather than old parent which
739 # was obsoleted while changing the branch
745 # was obsoleted while changing the branch
740 p1 = ctx.p1().node()
746 p1 = ctx.p1().node()
741 p2 = ctx.p2().node()
747 p2 = ctx.p2().node()
742 if p1 in replacements:
748 if p1 in replacements:
743 p1 = replacements[p1][0]
749 p1 = replacements[p1][0]
744 if p2 in replacements:
750 if p2 in replacements:
745 p2 = replacements[p2][0]
751 p2 = replacements[p2][0]
746
752
747 mc = context.memctx(repo, (p1, p2),
753 mc = context.memctx(repo, (p1, p2),
748 ctx.description(),
754 ctx.description(),
749 ctx.files(),
755 ctx.files(),
750 filectxfn,
756 filectxfn,
751 user=ctx.user(),
757 user=ctx.user(),
752 date=ctx.date(),
758 date=ctx.date(),
753 extra=extra,
759 extra=extra,
754 branch=label)
760 branch=label)
755
761
756 newnode = repo.commitctx(mc)
762 newnode = repo.commitctx(mc)
757 replacements[ctx.node()] = (newnode,)
763 replacements[ctx.node()] = (newnode,)
758 ui.debug('new node id is %s\n' % hex(newnode))
764 ui.debug('new node id is %s\n' % hex(newnode))
759
765
760 # create obsmarkers and move bookmarks
766 # create obsmarkers and move bookmarks
761 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
767 scmutil.cleanupnodes(repo, replacements, 'branch-change', fixphase=True)
762
768
763 # move the working copy too
769 # move the working copy too
764 wctx = repo[None]
770 wctx = repo[None]
765 # in-progress merge is a bit too complex for now.
771 # in-progress merge is a bit too complex for now.
766 if len(wctx.parents()) == 1:
772 if len(wctx.parents()) == 1:
767 newid = replacements.get(wctx.p1().node())
773 newid = replacements.get(wctx.p1().node())
768 if newid is not None:
774 if newid is not None:
769 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
775 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
770 # mercurial.cmdutil
776 # mercurial.cmdutil
771 from . import hg
777 from . import hg
772 hg.update(repo, newid[0], quietempty=True)
778 hg.update(repo, newid[0], quietempty=True)
773
779
774 ui.status(_("changed branch on %d changesets\n") % len(replacements))
780 ui.status(_("changed branch on %d changesets\n") % len(replacements))
775
781
776 def findrepo(p):
782 def findrepo(p):
777 while not os.path.isdir(os.path.join(p, ".hg")):
783 while not os.path.isdir(os.path.join(p, ".hg")):
778 oldp, p = p, os.path.dirname(p)
784 oldp, p = p, os.path.dirname(p)
779 if p == oldp:
785 if p == oldp:
780 return None
786 return None
781
787
782 return p
788 return p
783
789
784 def bailifchanged(repo, merge=True, hint=None):
790 def bailifchanged(repo, merge=True, hint=None):
785 """ enforce the precondition that working directory must be clean.
791 """ enforce the precondition that working directory must be clean.
786
792
787 'merge' can be set to false if a pending uncommitted merge should be
793 'merge' can be set to false if a pending uncommitted merge should be
788 ignored (such as when 'update --check' runs).
794 ignored (such as when 'update --check' runs).
789
795
790 'hint' is the usual hint given to Abort exception.
796 'hint' is the usual hint given to Abort exception.
791 """
797 """
792
798
793 if merge and repo.dirstate.p2() != nullid:
799 if merge and repo.dirstate.p2() != nullid:
794 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
800 raise error.Abort(_('outstanding uncommitted merge'), hint=hint)
795 modified, added, removed, deleted = repo.status()[:4]
801 modified, added, removed, deleted = repo.status()[:4]
796 if modified or added or removed or deleted:
802 if modified or added or removed or deleted:
797 raise error.Abort(_('uncommitted changes'), hint=hint)
803 raise error.Abort(_('uncommitted changes'), hint=hint)
798 ctx = repo[None]
804 ctx = repo[None]
799 for s in sorted(ctx.substate):
805 for s in sorted(ctx.substate):
800 ctx.sub(s).bailifchanged(hint=hint)
806 ctx.sub(s).bailifchanged(hint=hint)
801
807
802 def logmessage(ui, opts):
808 def logmessage(ui, opts):
803 """ get the log message according to -m and -l option """
809 """ get the log message according to -m and -l option """
804 message = opts.get('message')
810 message = opts.get('message')
805 logfile = opts.get('logfile')
811 logfile = opts.get('logfile')
806
812
807 if message and logfile:
813 if message and logfile:
808 raise error.Abort(_('options --message and --logfile are mutually '
814 raise error.Abort(_('options --message and --logfile are mutually '
809 'exclusive'))
815 'exclusive'))
810 if not message and logfile:
816 if not message and logfile:
811 try:
817 try:
812 if isstdiofilename(logfile):
818 if isstdiofilename(logfile):
813 message = ui.fin.read()
819 message = ui.fin.read()
814 else:
820 else:
815 message = '\n'.join(util.readfile(logfile).splitlines())
821 message = '\n'.join(util.readfile(logfile).splitlines())
816 except IOError as inst:
822 except IOError as inst:
817 raise error.Abort(_("can't read commit message '%s': %s") %
823 raise error.Abort(_("can't read commit message '%s': %s") %
818 (logfile, encoding.strtolocal(inst.strerror)))
824 (logfile, encoding.strtolocal(inst.strerror)))
819 return message
825 return message
820
826
821 def mergeeditform(ctxorbool, baseformname):
827 def mergeeditform(ctxorbool, baseformname):
822 """return appropriate editform name (referencing a committemplate)
828 """return appropriate editform name (referencing a committemplate)
823
829
824 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
830 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
825 merging is committed.
831 merging is committed.
826
832
827 This returns baseformname with '.merge' appended if it is a merge,
833 This returns baseformname with '.merge' appended if it is a merge,
828 otherwise '.normal' is appended.
834 otherwise '.normal' is appended.
829 """
835 """
830 if isinstance(ctxorbool, bool):
836 if isinstance(ctxorbool, bool):
831 if ctxorbool:
837 if ctxorbool:
832 return baseformname + ".merge"
838 return baseformname + ".merge"
833 elif len(ctxorbool.parents()) > 1:
839 elif len(ctxorbool.parents()) > 1:
834 return baseformname + ".merge"
840 return baseformname + ".merge"
835
841
836 return baseformname + ".normal"
842 return baseformname + ".normal"
837
843
838 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
844 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
839 editform='', **opts):
845 editform='', **opts):
840 """get appropriate commit message editor according to '--edit' option
846 """get appropriate commit message editor according to '--edit' option
841
847
842 'finishdesc' is a function to be called with edited commit message
848 'finishdesc' is a function to be called with edited commit message
843 (= 'description' of the new changeset) just after editing, but
849 (= 'description' of the new changeset) just after editing, but
844 before checking empty-ness. It should return actual text to be
850 before checking empty-ness. It should return actual text to be
845 stored into history. This allows to change description before
851 stored into history. This allows to change description before
846 storing.
852 storing.
847
853
848 'extramsg' is a extra message to be shown in the editor instead of
854 'extramsg' is a extra message to be shown in the editor instead of
849 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
855 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
850 is automatically added.
856 is automatically added.
851
857
852 'editform' is a dot-separated list of names, to distinguish
858 'editform' is a dot-separated list of names, to distinguish
853 the purpose of commit text editing.
859 the purpose of commit text editing.
854
860
855 'getcommiteditor' returns 'commitforceeditor' regardless of
861 'getcommiteditor' returns 'commitforceeditor' regardless of
856 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
862 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
857 they are specific for usage in MQ.
863 they are specific for usage in MQ.
858 """
864 """
859 if edit or finishdesc or extramsg:
865 if edit or finishdesc or extramsg:
860 return lambda r, c, s: commitforceeditor(r, c, s,
866 return lambda r, c, s: commitforceeditor(r, c, s,
861 finishdesc=finishdesc,
867 finishdesc=finishdesc,
862 extramsg=extramsg,
868 extramsg=extramsg,
863 editform=editform)
869 editform=editform)
864 elif editform:
870 elif editform:
865 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
871 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
866 else:
872 else:
867 return commiteditor
873 return commiteditor
868
874
869 def _escapecommandtemplate(tmpl):
875 def _escapecommandtemplate(tmpl):
870 parts = []
876 parts = []
871 for typ, start, end in templater.scantemplate(tmpl, raw=True):
877 for typ, start, end in templater.scantemplate(tmpl, raw=True):
872 if typ == b'string':
878 if typ == b'string':
873 parts.append(stringutil.escapestr(tmpl[start:end]))
879 parts.append(stringutil.escapestr(tmpl[start:end]))
874 else:
880 else:
875 parts.append(tmpl[start:end])
881 parts.append(tmpl[start:end])
876 return b''.join(parts)
882 return b''.join(parts)
877
883
878 def rendercommandtemplate(ui, tmpl, props):
884 def rendercommandtemplate(ui, tmpl, props):
879 r"""Expand a literal template 'tmpl' in a way suitable for command line
885 r"""Expand a literal template 'tmpl' in a way suitable for command line
880
886
881 '\' in outermost string is not taken as an escape character because it
887 '\' in outermost string is not taken as an escape character because it
882 is a directory separator on Windows.
888 is a directory separator on Windows.
883
889
884 >>> from . import ui as uimod
890 >>> from . import ui as uimod
885 >>> ui = uimod.ui()
891 >>> ui = uimod.ui()
886 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
892 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
887 'c:\\foo'
893 'c:\\foo'
888 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
894 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
889 'c:{path}'
895 'c:{path}'
890 """
896 """
891 if not tmpl:
897 if not tmpl:
892 return tmpl
898 return tmpl
893 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
899 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
894 return t.renderdefault(props)
900 return t.renderdefault(props)
895
901
896 def rendertemplate(ctx, tmpl, props=None):
902 def rendertemplate(ctx, tmpl, props=None):
897 """Expand a literal template 'tmpl' byte-string against one changeset
903 """Expand a literal template 'tmpl' byte-string against one changeset
898
904
899 Each props item must be a stringify-able value or a callable returning
905 Each props item must be a stringify-able value or a callable returning
900 such value, i.e. no bare list nor dict should be passed.
906 such value, i.e. no bare list nor dict should be passed.
901 """
907 """
902 repo = ctx.repo()
908 repo = ctx.repo()
903 tres = formatter.templateresources(repo.ui, repo)
909 tres = formatter.templateresources(repo.ui, repo)
904 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
910 t = formatter.maketemplater(repo.ui, tmpl, defaults=templatekw.keywords,
905 resources=tres)
911 resources=tres)
906 mapping = {'ctx': ctx}
912 mapping = {'ctx': ctx}
907 if props:
913 if props:
908 mapping.update(props)
914 mapping.update(props)
909 return t.renderdefault(mapping)
915 return t.renderdefault(mapping)
910
916
911 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
917 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
912 r"""Convert old-style filename format string to template string
918 r"""Convert old-style filename format string to template string
913
919
914 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
920 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
915 'foo-{reporoot|basename}-{seqno}.patch'
921 'foo-{reporoot|basename}-{seqno}.patch'
916 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
922 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
917 '{rev}{tags % "{tag}"}{node}'
923 '{rev}{tags % "{tag}"}{node}'
918
924
919 '\' in outermost strings has to be escaped because it is a directory
925 '\' in outermost strings has to be escaped because it is a directory
920 separator on Windows:
926 separator on Windows:
921
927
922 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
928 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
923 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
929 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
924 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
930 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
925 '\\\\\\\\foo\\\\bar.patch'
931 '\\\\\\\\foo\\\\bar.patch'
926 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
932 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
927 '\\\\{tags % "{tag}"}'
933 '\\\\{tags % "{tag}"}'
928
934
929 but inner strings follow the template rules (i.e. '\' is taken as an
935 but inner strings follow the template rules (i.e. '\' is taken as an
930 escape character):
936 escape character):
931
937
932 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
938 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
933 '{"c:\\tmp"}'
939 '{"c:\\tmp"}'
934 """
940 """
935 expander = {
941 expander = {
936 b'H': b'{node}',
942 b'H': b'{node}',
937 b'R': b'{rev}',
943 b'R': b'{rev}',
938 b'h': b'{node|short}',
944 b'h': b'{node|short}',
939 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
945 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
940 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
946 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
941 b'%': b'%',
947 b'%': b'%',
942 b'b': b'{reporoot|basename}',
948 b'b': b'{reporoot|basename}',
943 }
949 }
944 if total is not None:
950 if total is not None:
945 expander[b'N'] = b'{total}'
951 expander[b'N'] = b'{total}'
946 if seqno is not None:
952 if seqno is not None:
947 expander[b'n'] = b'{seqno}'
953 expander[b'n'] = b'{seqno}'
948 if total is not None and seqno is not None:
954 if total is not None and seqno is not None:
949 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
955 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
950 if pathname is not None:
956 if pathname is not None:
951 expander[b's'] = b'{pathname|basename}'
957 expander[b's'] = b'{pathname|basename}'
952 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
958 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
953 expander[b'p'] = b'{pathname}'
959 expander[b'p'] = b'{pathname}'
954
960
955 newname = []
961 newname = []
956 for typ, start, end in templater.scantemplate(pat, raw=True):
962 for typ, start, end in templater.scantemplate(pat, raw=True):
957 if typ != b'string':
963 if typ != b'string':
958 newname.append(pat[start:end])
964 newname.append(pat[start:end])
959 continue
965 continue
960 i = start
966 i = start
961 while i < end:
967 while i < end:
962 n = pat.find(b'%', i, end)
968 n = pat.find(b'%', i, end)
963 if n < 0:
969 if n < 0:
964 newname.append(stringutil.escapestr(pat[i:end]))
970 newname.append(stringutil.escapestr(pat[i:end]))
965 break
971 break
966 newname.append(stringutil.escapestr(pat[i:n]))
972 newname.append(stringutil.escapestr(pat[i:n]))
967 if n + 2 > end:
973 if n + 2 > end:
968 raise error.Abort(_("incomplete format spec in output "
974 raise error.Abort(_("incomplete format spec in output "
969 "filename"))
975 "filename"))
970 c = pat[n + 1:n + 2]
976 c = pat[n + 1:n + 2]
971 i = n + 2
977 i = n + 2
972 try:
978 try:
973 newname.append(expander[c])
979 newname.append(expander[c])
974 except KeyError:
980 except KeyError:
975 raise error.Abort(_("invalid format spec '%%%s' in output "
981 raise error.Abort(_("invalid format spec '%%%s' in output "
976 "filename") % c)
982 "filename") % c)
977 return ''.join(newname)
983 return ''.join(newname)
978
984
979 def makefilename(ctx, pat, **props):
985 def makefilename(ctx, pat, **props):
980 if not pat:
986 if not pat:
981 return pat
987 return pat
982 tmpl = _buildfntemplate(pat, **props)
988 tmpl = _buildfntemplate(pat, **props)
983 # BUG: alias expansion shouldn't be made against template fragments
989 # BUG: alias expansion shouldn't be made against template fragments
984 # rewritten from %-format strings, but we have no easy way to partially
990 # rewritten from %-format strings, but we have no easy way to partially
985 # disable the expansion.
991 # disable the expansion.
986 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
992 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
987
993
988 def isstdiofilename(pat):
994 def isstdiofilename(pat):
989 """True if the given pat looks like a filename denoting stdin/stdout"""
995 """True if the given pat looks like a filename denoting stdin/stdout"""
990 return not pat or pat == '-'
996 return not pat or pat == '-'
991
997
992 class _unclosablefile(object):
998 class _unclosablefile(object):
993 def __init__(self, fp):
999 def __init__(self, fp):
994 self._fp = fp
1000 self._fp = fp
995
1001
996 def close(self):
1002 def close(self):
997 pass
1003 pass
998
1004
999 def __iter__(self):
1005 def __iter__(self):
1000 return iter(self._fp)
1006 return iter(self._fp)
1001
1007
1002 def __getattr__(self, attr):
1008 def __getattr__(self, attr):
1003 return getattr(self._fp, attr)
1009 return getattr(self._fp, attr)
1004
1010
1005 def __enter__(self):
1011 def __enter__(self):
1006 return self
1012 return self
1007
1013
1008 def __exit__(self, exc_type, exc_value, exc_tb):
1014 def __exit__(self, exc_type, exc_value, exc_tb):
1009 pass
1015 pass
1010
1016
1011 def makefileobj(ctx, pat, mode='wb', **props):
1017 def makefileobj(ctx, pat, mode='wb', **props):
1012 writable = mode not in ('r', 'rb')
1018 writable = mode not in ('r', 'rb')
1013
1019
1014 if isstdiofilename(pat):
1020 if isstdiofilename(pat):
1015 repo = ctx.repo()
1021 repo = ctx.repo()
1016 if writable:
1022 if writable:
1017 fp = repo.ui.fout
1023 fp = repo.ui.fout
1018 else:
1024 else:
1019 fp = repo.ui.fin
1025 fp = repo.ui.fin
1020 return _unclosablefile(fp)
1026 return _unclosablefile(fp)
1021 fn = makefilename(ctx, pat, **props)
1027 fn = makefilename(ctx, pat, **props)
1022 return open(fn, mode)
1028 return open(fn, mode)
1023
1029
1024 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1030 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1025 """opens the changelog, manifest, a filelog or a given revlog"""
1031 """opens the changelog, manifest, a filelog or a given revlog"""
1026 cl = opts['changelog']
1032 cl = opts['changelog']
1027 mf = opts['manifest']
1033 mf = opts['manifest']
1028 dir = opts['dir']
1034 dir = opts['dir']
1029 msg = None
1035 msg = None
1030 if cl and mf:
1036 if cl and mf:
1031 msg = _('cannot specify --changelog and --manifest at the same time')
1037 msg = _('cannot specify --changelog and --manifest at the same time')
1032 elif cl and dir:
1038 elif cl and dir:
1033 msg = _('cannot specify --changelog and --dir at the same time')
1039 msg = _('cannot specify --changelog and --dir at the same time')
1034 elif cl or mf or dir:
1040 elif cl or mf or dir:
1035 if file_:
1041 if file_:
1036 msg = _('cannot specify filename with --changelog or --manifest')
1042 msg = _('cannot specify filename with --changelog or --manifest')
1037 elif not repo:
1043 elif not repo:
1038 msg = _('cannot specify --changelog or --manifest or --dir '
1044 msg = _('cannot specify --changelog or --manifest or --dir '
1039 'without a repository')
1045 'without a repository')
1040 if msg:
1046 if msg:
1041 raise error.Abort(msg)
1047 raise error.Abort(msg)
1042
1048
1043 r = None
1049 r = None
1044 if repo:
1050 if repo:
1045 if cl:
1051 if cl:
1046 r = repo.unfiltered().changelog
1052 r = repo.unfiltered().changelog
1047 elif dir:
1053 elif dir:
1048 if 'treemanifest' not in repo.requirements:
1054 if 'treemanifest' not in repo.requirements:
1049 raise error.Abort(_("--dir can only be used on repos with "
1055 raise error.Abort(_("--dir can only be used on repos with "
1050 "treemanifest enabled"))
1056 "treemanifest enabled"))
1051 if not dir.endswith('/'):
1057 if not dir.endswith('/'):
1052 dir = dir + '/'
1058 dir = dir + '/'
1053 dirlog = repo.manifestlog.getstorage(dir)
1059 dirlog = repo.manifestlog.getstorage(dir)
1054 if len(dirlog):
1060 if len(dirlog):
1055 r = dirlog
1061 r = dirlog
1056 elif mf:
1062 elif mf:
1057 r = repo.manifestlog.getstorage(b'')
1063 r = repo.manifestlog.getstorage(b'')
1058 elif file_:
1064 elif file_:
1059 filelog = repo.file(file_)
1065 filelog = repo.file(file_)
1060 if len(filelog):
1066 if len(filelog):
1061 r = filelog
1067 r = filelog
1062
1068
1063 # Not all storage may be revlogs. If requested, try to return an actual
1069 # Not all storage may be revlogs. If requested, try to return an actual
1064 # revlog instance.
1070 # revlog instance.
1065 if returnrevlog:
1071 if returnrevlog:
1066 if isinstance(r, revlog.revlog):
1072 if isinstance(r, revlog.revlog):
1067 pass
1073 pass
1068 elif util.safehasattr(r, '_revlog'):
1074 elif util.safehasattr(r, '_revlog'):
1069 r = r._revlog
1075 r = r._revlog
1070 elif r is not None:
1076 elif r is not None:
1071 raise error.Abort(_('%r does not appear to be a revlog') % r)
1077 raise error.Abort(_('%r does not appear to be a revlog') % r)
1072
1078
1073 if not r:
1079 if not r:
1074 if not returnrevlog:
1080 if not returnrevlog:
1075 raise error.Abort(_('cannot give path to non-revlog'))
1081 raise error.Abort(_('cannot give path to non-revlog'))
1076
1082
1077 if not file_:
1083 if not file_:
1078 raise error.CommandError(cmd, _('invalid arguments'))
1084 raise error.CommandError(cmd, _('invalid arguments'))
1079 if not os.path.isfile(file_):
1085 if not os.path.isfile(file_):
1080 raise error.Abort(_("revlog '%s' not found") % file_)
1086 raise error.Abort(_("revlog '%s' not found") % file_)
1081 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
1087 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
1082 file_[:-2] + ".i")
1088 file_[:-2] + ".i")
1083 return r
1089 return r
1084
1090
1085 def openrevlog(repo, cmd, file_, opts):
1091 def openrevlog(repo, cmd, file_, opts):
1086 """Obtain a revlog backing storage of an item.
1092 """Obtain a revlog backing storage of an item.
1087
1093
1088 This is similar to ``openstorage()`` except it always returns a revlog.
1094 This is similar to ``openstorage()`` except it always returns a revlog.
1089
1095
1090 In most cases, a caller cares about the main storage object - not the
1096 In most cases, a caller cares about the main storage object - not the
1091 revlog backing it. Therefore, this function should only be used by code
1097 revlog backing it. Therefore, this function should only be used by code
1092 that needs to examine low-level revlog implementation details. e.g. debug
1098 that needs to examine low-level revlog implementation details. e.g. debug
1093 commands.
1099 commands.
1094 """
1100 """
1095 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1101 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1096
1102
1097 def copy(ui, repo, pats, opts, rename=False):
1103 def copy(ui, repo, pats, opts, rename=False):
1098 # called with the repo lock held
1104 # called with the repo lock held
1099 #
1105 #
1100 # hgsep => pathname that uses "/" to separate directories
1106 # hgsep => pathname that uses "/" to separate directories
1101 # ossep => pathname that uses os.sep to separate directories
1107 # ossep => pathname that uses os.sep to separate directories
1102 cwd = repo.getcwd()
1108 cwd = repo.getcwd()
1103 targets = {}
1109 targets = {}
1104 after = opts.get("after")
1110 after = opts.get("after")
1105 dryrun = opts.get("dry_run")
1111 dryrun = opts.get("dry_run")
1106 wctx = repo[None]
1112 wctx = repo[None]
1107
1113
1108 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1114 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1109 def walkpat(pat):
1115 def walkpat(pat):
1110 srcs = []
1116 srcs = []
1111 if after:
1117 if after:
1112 badstates = '?'
1118 badstates = '?'
1113 else:
1119 else:
1114 badstates = '?r'
1120 badstates = '?r'
1115 m = scmutil.match(wctx, [pat], opts, globbed=True)
1121 m = scmutil.match(wctx, [pat], opts, globbed=True)
1116 for abs in wctx.walk(m):
1122 for abs in wctx.walk(m):
1117 state = repo.dirstate[abs]
1123 state = repo.dirstate[abs]
1118 rel = uipathfn(abs)
1124 rel = uipathfn(abs)
1119 exact = m.exact(abs)
1125 exact = m.exact(abs)
1120 if state in badstates:
1126 if state in badstates:
1121 if exact and state == '?':
1127 if exact and state == '?':
1122 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1128 ui.warn(_('%s: not copying - file is not managed\n') % rel)
1123 if exact and state == 'r':
1129 if exact and state == 'r':
1124 ui.warn(_('%s: not copying - file has been marked for'
1130 ui.warn(_('%s: not copying - file has been marked for'
1125 ' remove\n') % rel)
1131 ' remove\n') % rel)
1126 continue
1132 continue
1127 # abs: hgsep
1133 # abs: hgsep
1128 # rel: ossep
1134 # rel: ossep
1129 srcs.append((abs, rel, exact))
1135 srcs.append((abs, rel, exact))
1130 return srcs
1136 return srcs
1131
1137
1132 # abssrc: hgsep
1138 # abssrc: hgsep
1133 # relsrc: ossep
1139 # relsrc: ossep
1134 # otarget: ossep
1140 # otarget: ossep
1135 def copyfile(abssrc, relsrc, otarget, exact):
1141 def copyfile(abssrc, relsrc, otarget, exact):
1136 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1142 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1137 if '/' in abstarget:
1143 if '/' in abstarget:
1138 # We cannot normalize abstarget itself, this would prevent
1144 # We cannot normalize abstarget itself, this would prevent
1139 # case only renames, like a => A.
1145 # case only renames, like a => A.
1140 abspath, absname = abstarget.rsplit('/', 1)
1146 abspath, absname = abstarget.rsplit('/', 1)
1141 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1147 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
1142 reltarget = repo.pathto(abstarget, cwd)
1148 reltarget = repo.pathto(abstarget, cwd)
1143 target = repo.wjoin(abstarget)
1149 target = repo.wjoin(abstarget)
1144 src = repo.wjoin(abssrc)
1150 src = repo.wjoin(abssrc)
1145 state = repo.dirstate[abstarget]
1151 state = repo.dirstate[abstarget]
1146
1152
1147 scmutil.checkportable(ui, abstarget)
1153 scmutil.checkportable(ui, abstarget)
1148
1154
1149 # check for collisions
1155 # check for collisions
1150 prevsrc = targets.get(abstarget)
1156 prevsrc = targets.get(abstarget)
1151 if prevsrc is not None:
1157 if prevsrc is not None:
1152 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1158 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
1153 (reltarget, repo.pathto(abssrc, cwd),
1159 (reltarget, repo.pathto(abssrc, cwd),
1154 repo.pathto(prevsrc, cwd)))
1160 repo.pathto(prevsrc, cwd)))
1155 return True # report a failure
1161 return True # report a failure
1156
1162
1157 # check for overwrites
1163 # check for overwrites
1158 exists = os.path.lexists(target)
1164 exists = os.path.lexists(target)
1159 samefile = False
1165 samefile = False
1160 if exists and abssrc != abstarget:
1166 if exists and abssrc != abstarget:
1161 if (repo.dirstate.normalize(abssrc) ==
1167 if (repo.dirstate.normalize(abssrc) ==
1162 repo.dirstate.normalize(abstarget)):
1168 repo.dirstate.normalize(abstarget)):
1163 if not rename:
1169 if not rename:
1164 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1170 ui.warn(_("%s: can't copy - same file\n") % reltarget)
1165 return True # report a failure
1171 return True # report a failure
1166 exists = False
1172 exists = False
1167 samefile = True
1173 samefile = True
1168
1174
1169 if not after and exists or after and state in 'mn':
1175 if not after and exists or after and state in 'mn':
1170 if not opts['force']:
1176 if not opts['force']:
1171 if state in 'mn':
1177 if state in 'mn':
1172 msg = _('%s: not overwriting - file already committed\n')
1178 msg = _('%s: not overwriting - file already committed\n')
1173 if after:
1179 if after:
1174 flags = '--after --force'
1180 flags = '--after --force'
1175 else:
1181 else:
1176 flags = '--force'
1182 flags = '--force'
1177 if rename:
1183 if rename:
1178 hint = _("('hg rename %s' to replace the file by "
1184 hint = _("('hg rename %s' to replace the file by "
1179 'recording a rename)\n') % flags
1185 'recording a rename)\n') % flags
1180 else:
1186 else:
1181 hint = _("('hg copy %s' to replace the file by "
1187 hint = _("('hg copy %s' to replace the file by "
1182 'recording a copy)\n') % flags
1188 'recording a copy)\n') % flags
1183 else:
1189 else:
1184 msg = _('%s: not overwriting - file exists\n')
1190 msg = _('%s: not overwriting - file exists\n')
1185 if rename:
1191 if rename:
1186 hint = _("('hg rename --after' to record the rename)\n")
1192 hint = _("('hg rename --after' to record the rename)\n")
1187 else:
1193 else:
1188 hint = _("('hg copy --after' to record the copy)\n")
1194 hint = _("('hg copy --after' to record the copy)\n")
1189 ui.warn(msg % reltarget)
1195 ui.warn(msg % reltarget)
1190 ui.warn(hint)
1196 ui.warn(hint)
1191 return True # report a failure
1197 return True # report a failure
1192
1198
1193 if after:
1199 if after:
1194 if not exists:
1200 if not exists:
1195 if rename:
1201 if rename:
1196 ui.warn(_('%s: not recording move - %s does not exist\n') %
1202 ui.warn(_('%s: not recording move - %s does not exist\n') %
1197 (relsrc, reltarget))
1203 (relsrc, reltarget))
1198 else:
1204 else:
1199 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1205 ui.warn(_('%s: not recording copy - %s does not exist\n') %
1200 (relsrc, reltarget))
1206 (relsrc, reltarget))
1201 return True # report a failure
1207 return True # report a failure
1202 elif not dryrun:
1208 elif not dryrun:
1203 try:
1209 try:
1204 if exists:
1210 if exists:
1205 os.unlink(target)
1211 os.unlink(target)
1206 targetdir = os.path.dirname(target) or '.'
1212 targetdir = os.path.dirname(target) or '.'
1207 if not os.path.isdir(targetdir):
1213 if not os.path.isdir(targetdir):
1208 os.makedirs(targetdir)
1214 os.makedirs(targetdir)
1209 if samefile:
1215 if samefile:
1210 tmp = target + "~hgrename"
1216 tmp = target + "~hgrename"
1211 os.rename(src, tmp)
1217 os.rename(src, tmp)
1212 os.rename(tmp, target)
1218 os.rename(tmp, target)
1213 else:
1219 else:
1214 # Preserve stat info on renames, not on copies; this matches
1220 # Preserve stat info on renames, not on copies; this matches
1215 # Linux CLI behavior.
1221 # Linux CLI behavior.
1216 util.copyfile(src, target, copystat=rename)
1222 util.copyfile(src, target, copystat=rename)
1217 srcexists = True
1223 srcexists = True
1218 except IOError as inst:
1224 except IOError as inst:
1219 if inst.errno == errno.ENOENT:
1225 if inst.errno == errno.ENOENT:
1220 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1226 ui.warn(_('%s: deleted in working directory\n') % relsrc)
1221 srcexists = False
1227 srcexists = False
1222 else:
1228 else:
1223 ui.warn(_('%s: cannot copy - %s\n') %
1229 ui.warn(_('%s: cannot copy - %s\n') %
1224 (relsrc, encoding.strtolocal(inst.strerror)))
1230 (relsrc, encoding.strtolocal(inst.strerror)))
1225 return True # report a failure
1231 return True # report a failure
1226
1232
1227 if ui.verbose or not exact:
1233 if ui.verbose or not exact:
1228 if rename:
1234 if rename:
1229 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1235 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
1230 else:
1236 else:
1231 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1237 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
1232
1238
1233 targets[abstarget] = abssrc
1239 targets[abstarget] = abssrc
1234
1240
1235 # fix up dirstate
1241 # fix up dirstate
1236 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1242 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
1237 dryrun=dryrun, cwd=cwd)
1243 dryrun=dryrun, cwd=cwd)
1238 if rename and not dryrun:
1244 if rename and not dryrun:
1239 if not after and srcexists and not samefile:
1245 if not after and srcexists and not samefile:
1240 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1246 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
1241 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1247 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1242 wctx.forget([abssrc])
1248 wctx.forget([abssrc])
1243
1249
1244 # pat: ossep
1250 # pat: ossep
1245 # dest ossep
1251 # dest ossep
1246 # srcs: list of (hgsep, hgsep, ossep, bool)
1252 # srcs: list of (hgsep, hgsep, ossep, bool)
1247 # return: function that takes hgsep and returns ossep
1253 # return: function that takes hgsep and returns ossep
1248 def targetpathfn(pat, dest, srcs):
1254 def targetpathfn(pat, dest, srcs):
1249 if os.path.isdir(pat):
1255 if os.path.isdir(pat):
1250 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1256 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1251 abspfx = util.localpath(abspfx)
1257 abspfx = util.localpath(abspfx)
1252 if destdirexists:
1258 if destdirexists:
1253 striplen = len(os.path.split(abspfx)[0])
1259 striplen = len(os.path.split(abspfx)[0])
1254 else:
1260 else:
1255 striplen = len(abspfx)
1261 striplen = len(abspfx)
1256 if striplen:
1262 if striplen:
1257 striplen += len(pycompat.ossep)
1263 striplen += len(pycompat.ossep)
1258 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1264 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1259 elif destdirexists:
1265 elif destdirexists:
1260 res = lambda p: os.path.join(dest,
1266 res = lambda p: os.path.join(dest,
1261 os.path.basename(util.localpath(p)))
1267 os.path.basename(util.localpath(p)))
1262 else:
1268 else:
1263 res = lambda p: dest
1269 res = lambda p: dest
1264 return res
1270 return res
1265
1271
1266 # pat: ossep
1272 # pat: ossep
1267 # dest ossep
1273 # dest ossep
1268 # srcs: list of (hgsep, hgsep, ossep, bool)
1274 # srcs: list of (hgsep, hgsep, ossep, bool)
1269 # return: function that takes hgsep and returns ossep
1275 # return: function that takes hgsep and returns ossep
1270 def targetpathafterfn(pat, dest, srcs):
1276 def targetpathafterfn(pat, dest, srcs):
1271 if matchmod.patkind(pat):
1277 if matchmod.patkind(pat):
1272 # a mercurial pattern
1278 # a mercurial pattern
1273 res = lambda p: os.path.join(dest,
1279 res = lambda p: os.path.join(dest,
1274 os.path.basename(util.localpath(p)))
1280 os.path.basename(util.localpath(p)))
1275 else:
1281 else:
1276 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1282 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1277 if len(abspfx) < len(srcs[0][0]):
1283 if len(abspfx) < len(srcs[0][0]):
1278 # A directory. Either the target path contains the last
1284 # A directory. Either the target path contains the last
1279 # component of the source path or it does not.
1285 # component of the source path or it does not.
1280 def evalpath(striplen):
1286 def evalpath(striplen):
1281 score = 0
1287 score = 0
1282 for s in srcs:
1288 for s in srcs:
1283 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1289 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1284 if os.path.lexists(t):
1290 if os.path.lexists(t):
1285 score += 1
1291 score += 1
1286 return score
1292 return score
1287
1293
1288 abspfx = util.localpath(abspfx)
1294 abspfx = util.localpath(abspfx)
1289 striplen = len(abspfx)
1295 striplen = len(abspfx)
1290 if striplen:
1296 if striplen:
1291 striplen += len(pycompat.ossep)
1297 striplen += len(pycompat.ossep)
1292 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1298 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1293 score = evalpath(striplen)
1299 score = evalpath(striplen)
1294 striplen1 = len(os.path.split(abspfx)[0])
1300 striplen1 = len(os.path.split(abspfx)[0])
1295 if striplen1:
1301 if striplen1:
1296 striplen1 += len(pycompat.ossep)
1302 striplen1 += len(pycompat.ossep)
1297 if evalpath(striplen1) > score:
1303 if evalpath(striplen1) > score:
1298 striplen = striplen1
1304 striplen = striplen1
1299 res = lambda p: os.path.join(dest,
1305 res = lambda p: os.path.join(dest,
1300 util.localpath(p)[striplen:])
1306 util.localpath(p)[striplen:])
1301 else:
1307 else:
1302 # a file
1308 # a file
1303 if destdirexists:
1309 if destdirexists:
1304 res = lambda p: os.path.join(dest,
1310 res = lambda p: os.path.join(dest,
1305 os.path.basename(util.localpath(p)))
1311 os.path.basename(util.localpath(p)))
1306 else:
1312 else:
1307 res = lambda p: dest
1313 res = lambda p: dest
1308 return res
1314 return res
1309
1315
1310 pats = scmutil.expandpats(pats)
1316 pats = scmutil.expandpats(pats)
1311 if not pats:
1317 if not pats:
1312 raise error.Abort(_('no source or destination specified'))
1318 raise error.Abort(_('no source or destination specified'))
1313 if len(pats) == 1:
1319 if len(pats) == 1:
1314 raise error.Abort(_('no destination specified'))
1320 raise error.Abort(_('no destination specified'))
1315 dest = pats.pop()
1321 dest = pats.pop()
1316 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1322 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1317 if not destdirexists:
1323 if not destdirexists:
1318 if len(pats) > 1 or matchmod.patkind(pats[0]):
1324 if len(pats) > 1 or matchmod.patkind(pats[0]):
1319 raise error.Abort(_('with multiple sources, destination must be an '
1325 raise error.Abort(_('with multiple sources, destination must be an '
1320 'existing directory'))
1326 'existing directory'))
1321 if util.endswithsep(dest):
1327 if util.endswithsep(dest):
1322 raise error.Abort(_('destination %s is not a directory') % dest)
1328 raise error.Abort(_('destination %s is not a directory') % dest)
1323
1329
1324 tfn = targetpathfn
1330 tfn = targetpathfn
1325 if after:
1331 if after:
1326 tfn = targetpathafterfn
1332 tfn = targetpathafterfn
1327 copylist = []
1333 copylist = []
1328 for pat in pats:
1334 for pat in pats:
1329 srcs = walkpat(pat)
1335 srcs = walkpat(pat)
1330 if not srcs:
1336 if not srcs:
1331 continue
1337 continue
1332 copylist.append((tfn(pat, dest, srcs), srcs))
1338 copylist.append((tfn(pat, dest, srcs), srcs))
1333 if not copylist:
1339 if not copylist:
1334 raise error.Abort(_('no files to copy'))
1340 raise error.Abort(_('no files to copy'))
1335
1341
1336 errors = 0
1342 errors = 0
1337 for targetpath, srcs in copylist:
1343 for targetpath, srcs in copylist:
1338 for abssrc, relsrc, exact in srcs:
1344 for abssrc, relsrc, exact in srcs:
1339 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1345 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1340 errors += 1
1346 errors += 1
1341
1347
1342 return errors != 0
1348 return errors != 0
1343
1349
1344 ## facility to let extension process additional data into an import patch
1350 ## facility to let extension process additional data into an import patch
1345 # list of identifier to be executed in order
1351 # list of identifier to be executed in order
1346 extrapreimport = [] # run before commit
1352 extrapreimport = [] # run before commit
1347 extrapostimport = [] # run after commit
1353 extrapostimport = [] # run after commit
1348 # mapping from identifier to actual import function
1354 # mapping from identifier to actual import function
1349 #
1355 #
1350 # 'preimport' are run before the commit is made and are provided the following
1356 # 'preimport' are run before the commit is made and are provided the following
1351 # arguments:
1357 # arguments:
1352 # - repo: the localrepository instance,
1358 # - repo: the localrepository instance,
1353 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1359 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1354 # - extra: the future extra dictionary of the changeset, please mutate it,
1360 # - extra: the future extra dictionary of the changeset, please mutate it,
1355 # - opts: the import options.
1361 # - opts: the import options.
1356 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1362 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1357 # mutation of in memory commit and more. Feel free to rework the code to get
1363 # mutation of in memory commit and more. Feel free to rework the code to get
1358 # there.
1364 # there.
1359 extrapreimportmap = {}
1365 extrapreimportmap = {}
1360 # 'postimport' are run after the commit is made and are provided the following
1366 # 'postimport' are run after the commit is made and are provided the following
1361 # argument:
1367 # argument:
1362 # - ctx: the changectx created by import.
1368 # - ctx: the changectx created by import.
1363 extrapostimportmap = {}
1369 extrapostimportmap = {}
1364
1370
1365 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1371 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1366 """Utility function used by commands.import to import a single patch
1372 """Utility function used by commands.import to import a single patch
1367
1373
1368 This function is explicitly defined here to help the evolve extension to
1374 This function is explicitly defined here to help the evolve extension to
1369 wrap this part of the import logic.
1375 wrap this part of the import logic.
1370
1376
1371 The API is currently a bit ugly because it a simple code translation from
1377 The API is currently a bit ugly because it a simple code translation from
1372 the import command. Feel free to make it better.
1378 the import command. Feel free to make it better.
1373
1379
1374 :patchdata: a dictionary containing parsed patch data (such as from
1380 :patchdata: a dictionary containing parsed patch data (such as from
1375 ``patch.extract()``)
1381 ``patch.extract()``)
1376 :parents: nodes that will be parent of the created commit
1382 :parents: nodes that will be parent of the created commit
1377 :opts: the full dict of option passed to the import command
1383 :opts: the full dict of option passed to the import command
1378 :msgs: list to save commit message to.
1384 :msgs: list to save commit message to.
1379 (used in case we need to save it when failing)
1385 (used in case we need to save it when failing)
1380 :updatefunc: a function that update a repo to a given node
1386 :updatefunc: a function that update a repo to a given node
1381 updatefunc(<repo>, <node>)
1387 updatefunc(<repo>, <node>)
1382 """
1388 """
1383 # avoid cycle context -> subrepo -> cmdutil
1389 # avoid cycle context -> subrepo -> cmdutil
1384 from . import context
1390 from . import context
1385
1391
1386 tmpname = patchdata.get('filename')
1392 tmpname = patchdata.get('filename')
1387 message = patchdata.get('message')
1393 message = patchdata.get('message')
1388 user = opts.get('user') or patchdata.get('user')
1394 user = opts.get('user') or patchdata.get('user')
1389 date = opts.get('date') or patchdata.get('date')
1395 date = opts.get('date') or patchdata.get('date')
1390 branch = patchdata.get('branch')
1396 branch = patchdata.get('branch')
1391 nodeid = patchdata.get('nodeid')
1397 nodeid = patchdata.get('nodeid')
1392 p1 = patchdata.get('p1')
1398 p1 = patchdata.get('p1')
1393 p2 = patchdata.get('p2')
1399 p2 = patchdata.get('p2')
1394
1400
1395 nocommit = opts.get('no_commit')
1401 nocommit = opts.get('no_commit')
1396 importbranch = opts.get('import_branch')
1402 importbranch = opts.get('import_branch')
1397 update = not opts.get('bypass')
1403 update = not opts.get('bypass')
1398 strip = opts["strip"]
1404 strip = opts["strip"]
1399 prefix = opts["prefix"]
1405 prefix = opts["prefix"]
1400 sim = float(opts.get('similarity') or 0)
1406 sim = float(opts.get('similarity') or 0)
1401
1407
1402 if not tmpname:
1408 if not tmpname:
1403 return None, None, False
1409 return None, None, False
1404
1410
1405 rejects = False
1411 rejects = False
1406
1412
1407 cmdline_message = logmessage(ui, opts)
1413 cmdline_message = logmessage(ui, opts)
1408 if cmdline_message:
1414 if cmdline_message:
1409 # pickup the cmdline msg
1415 # pickup the cmdline msg
1410 message = cmdline_message
1416 message = cmdline_message
1411 elif message:
1417 elif message:
1412 # pickup the patch msg
1418 # pickup the patch msg
1413 message = message.strip()
1419 message = message.strip()
1414 else:
1420 else:
1415 # launch the editor
1421 # launch the editor
1416 message = None
1422 message = None
1417 ui.debug('message:\n%s\n' % (message or ''))
1423 ui.debug('message:\n%s\n' % (message or ''))
1418
1424
1419 if len(parents) == 1:
1425 if len(parents) == 1:
1420 parents.append(repo[nullid])
1426 parents.append(repo[nullid])
1421 if opts.get('exact'):
1427 if opts.get('exact'):
1422 if not nodeid or not p1:
1428 if not nodeid or not p1:
1423 raise error.Abort(_('not a Mercurial patch'))
1429 raise error.Abort(_('not a Mercurial patch'))
1424 p1 = repo[p1]
1430 p1 = repo[p1]
1425 p2 = repo[p2 or nullid]
1431 p2 = repo[p2 or nullid]
1426 elif p2:
1432 elif p2:
1427 try:
1433 try:
1428 p1 = repo[p1]
1434 p1 = repo[p1]
1429 p2 = repo[p2]
1435 p2 = repo[p2]
1430 # Without any options, consider p2 only if the
1436 # Without any options, consider p2 only if the
1431 # patch is being applied on top of the recorded
1437 # patch is being applied on top of the recorded
1432 # first parent.
1438 # first parent.
1433 if p1 != parents[0]:
1439 if p1 != parents[0]:
1434 p1 = parents[0]
1440 p1 = parents[0]
1435 p2 = repo[nullid]
1441 p2 = repo[nullid]
1436 except error.RepoError:
1442 except error.RepoError:
1437 p1, p2 = parents
1443 p1, p2 = parents
1438 if p2.node() == nullid:
1444 if p2.node() == nullid:
1439 ui.warn(_("warning: import the patch as a normal revision\n"
1445 ui.warn(_("warning: import the patch as a normal revision\n"
1440 "(use --exact to import the patch as a merge)\n"))
1446 "(use --exact to import the patch as a merge)\n"))
1441 else:
1447 else:
1442 p1, p2 = parents
1448 p1, p2 = parents
1443
1449
1444 n = None
1450 n = None
1445 if update:
1451 if update:
1446 if p1 != parents[0]:
1452 if p1 != parents[0]:
1447 updatefunc(repo, p1.node())
1453 updatefunc(repo, p1.node())
1448 if p2 != parents[1]:
1454 if p2 != parents[1]:
1449 repo.setparents(p1.node(), p2.node())
1455 repo.setparents(p1.node(), p2.node())
1450
1456
1451 if opts.get('exact') or importbranch:
1457 if opts.get('exact') or importbranch:
1452 repo.dirstate.setbranch(branch or 'default')
1458 repo.dirstate.setbranch(branch or 'default')
1453
1459
1454 partial = opts.get('partial', False)
1460 partial = opts.get('partial', False)
1455 files = set()
1461 files = set()
1456 try:
1462 try:
1457 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1463 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
1458 files=files, eolmode=None, similarity=sim / 100.0)
1464 files=files, eolmode=None, similarity=sim / 100.0)
1459 except error.PatchError as e:
1465 except error.PatchError as e:
1460 if not partial:
1466 if not partial:
1461 raise error.Abort(pycompat.bytestr(e))
1467 raise error.Abort(pycompat.bytestr(e))
1462 if partial:
1468 if partial:
1463 rejects = True
1469 rejects = True
1464
1470
1465 files = list(files)
1471 files = list(files)
1466 if nocommit:
1472 if nocommit:
1467 if message:
1473 if message:
1468 msgs.append(message)
1474 msgs.append(message)
1469 else:
1475 else:
1470 if opts.get('exact') or p2:
1476 if opts.get('exact') or p2:
1471 # If you got here, you either use --force and know what
1477 # If you got here, you either use --force and know what
1472 # you are doing or used --exact or a merge patch while
1478 # you are doing or used --exact or a merge patch while
1473 # being updated to its first parent.
1479 # being updated to its first parent.
1474 m = None
1480 m = None
1475 else:
1481 else:
1476 m = scmutil.matchfiles(repo, files or [])
1482 m = scmutil.matchfiles(repo, files or [])
1477 editform = mergeeditform(repo[None], 'import.normal')
1483 editform = mergeeditform(repo[None], 'import.normal')
1478 if opts.get('exact'):
1484 if opts.get('exact'):
1479 editor = None
1485 editor = None
1480 else:
1486 else:
1481 editor = getcommiteditor(editform=editform,
1487 editor = getcommiteditor(editform=editform,
1482 **pycompat.strkwargs(opts))
1488 **pycompat.strkwargs(opts))
1483 extra = {}
1489 extra = {}
1484 for idfunc in extrapreimport:
1490 for idfunc in extrapreimport:
1485 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1491 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1486 overrides = {}
1492 overrides = {}
1487 if partial:
1493 if partial:
1488 overrides[('ui', 'allowemptycommit')] = True
1494 overrides[('ui', 'allowemptycommit')] = True
1489 with repo.ui.configoverride(overrides, 'import'):
1495 with repo.ui.configoverride(overrides, 'import'):
1490 n = repo.commit(message, user,
1496 n = repo.commit(message, user,
1491 date, match=m,
1497 date, match=m,
1492 editor=editor, extra=extra)
1498 editor=editor, extra=extra)
1493 for idfunc in extrapostimport:
1499 for idfunc in extrapostimport:
1494 extrapostimportmap[idfunc](repo[n])
1500 extrapostimportmap[idfunc](repo[n])
1495 else:
1501 else:
1496 if opts.get('exact') or importbranch:
1502 if opts.get('exact') or importbranch:
1497 branch = branch or 'default'
1503 branch = branch or 'default'
1498 else:
1504 else:
1499 branch = p1.branch()
1505 branch = p1.branch()
1500 store = patch.filestore()
1506 store = patch.filestore()
1501 try:
1507 try:
1502 files = set()
1508 files = set()
1503 try:
1509 try:
1504 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1510 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
1505 files, eolmode=None)
1511 files, eolmode=None)
1506 except error.PatchError as e:
1512 except error.PatchError as e:
1507 raise error.Abort(stringutil.forcebytestr(e))
1513 raise error.Abort(stringutil.forcebytestr(e))
1508 if opts.get('exact'):
1514 if opts.get('exact'):
1509 editor = None
1515 editor = None
1510 else:
1516 else:
1511 editor = getcommiteditor(editform='import.bypass')
1517 editor = getcommiteditor(editform='import.bypass')
1512 memctx = context.memctx(repo, (p1.node(), p2.node()),
1518 memctx = context.memctx(repo, (p1.node(), p2.node()),
1513 message,
1519 message,
1514 files=files,
1520 files=files,
1515 filectxfn=store,
1521 filectxfn=store,
1516 user=user,
1522 user=user,
1517 date=date,
1523 date=date,
1518 branch=branch,
1524 branch=branch,
1519 editor=editor)
1525 editor=editor)
1520 n = memctx.commit()
1526 n = memctx.commit()
1521 finally:
1527 finally:
1522 store.close()
1528 store.close()
1523 if opts.get('exact') and nocommit:
1529 if opts.get('exact') and nocommit:
1524 # --exact with --no-commit is still useful in that it does merge
1530 # --exact with --no-commit is still useful in that it does merge
1525 # and branch bits
1531 # and branch bits
1526 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1532 ui.warn(_("warning: can't check exact import with --no-commit\n"))
1527 elif opts.get('exact') and (not n or hex(n) != nodeid):
1533 elif opts.get('exact') and (not n or hex(n) != nodeid):
1528 raise error.Abort(_('patch is damaged or loses information'))
1534 raise error.Abort(_('patch is damaged or loses information'))
1529 msg = _('applied to working directory')
1535 msg = _('applied to working directory')
1530 if n:
1536 if n:
1531 # i18n: refers to a short changeset id
1537 # i18n: refers to a short changeset id
1532 msg = _('created %s') % short(n)
1538 msg = _('created %s') % short(n)
1533 return msg, n, rejects
1539 return msg, n, rejects
1534
1540
1535 # facility to let extensions include additional data in an exported patch
1541 # facility to let extensions include additional data in an exported patch
1536 # list of identifiers to be executed in order
1542 # list of identifiers to be executed in order
1537 extraexport = []
1543 extraexport = []
1538 # mapping from identifier to actual export function
1544 # mapping from identifier to actual export function
1539 # function as to return a string to be added to the header or None
1545 # function as to return a string to be added to the header or None
1540 # it is given two arguments (sequencenumber, changectx)
1546 # it is given two arguments (sequencenumber, changectx)
1541 extraexportmap = {}
1547 extraexportmap = {}
1542
1548
1543 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1549 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
1544 node = scmutil.binnode(ctx)
1550 node = scmutil.binnode(ctx)
1545 parents = [p.node() for p in ctx.parents() if p]
1551 parents = [p.node() for p in ctx.parents() if p]
1546 branch = ctx.branch()
1552 branch = ctx.branch()
1547 if switch_parent:
1553 if switch_parent:
1548 parents.reverse()
1554 parents.reverse()
1549
1555
1550 if parents:
1556 if parents:
1551 prev = parents[0]
1557 prev = parents[0]
1552 else:
1558 else:
1553 prev = nullid
1559 prev = nullid
1554
1560
1555 fm.context(ctx=ctx)
1561 fm.context(ctx=ctx)
1556 fm.plain('# HG changeset patch\n')
1562 fm.plain('# HG changeset patch\n')
1557 fm.write('user', '# User %s\n', ctx.user())
1563 fm.write('user', '# User %s\n', ctx.user())
1558 fm.plain('# Date %d %d\n' % ctx.date())
1564 fm.plain('# Date %d %d\n' % ctx.date())
1559 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1565 fm.write('date', '# %s\n', fm.formatdate(ctx.date()))
1560 fm.condwrite(branch and branch != 'default',
1566 fm.condwrite(branch and branch != 'default',
1561 'branch', '# Branch %s\n', branch)
1567 'branch', '# Branch %s\n', branch)
1562 fm.write('node', '# Node ID %s\n', hex(node))
1568 fm.write('node', '# Node ID %s\n', hex(node))
1563 fm.plain('# Parent %s\n' % hex(prev))
1569 fm.plain('# Parent %s\n' % hex(prev))
1564 if len(parents) > 1:
1570 if len(parents) > 1:
1565 fm.plain('# Parent %s\n' % hex(parents[1]))
1571 fm.plain('# Parent %s\n' % hex(parents[1]))
1566 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1572 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name='node'))
1567
1573
1568 # TODO: redesign extraexportmap function to support formatter
1574 # TODO: redesign extraexportmap function to support formatter
1569 for headerid in extraexport:
1575 for headerid in extraexport:
1570 header = extraexportmap[headerid](seqno, ctx)
1576 header = extraexportmap[headerid](seqno, ctx)
1571 if header is not None:
1577 if header is not None:
1572 fm.plain('# %s\n' % header)
1578 fm.plain('# %s\n' % header)
1573
1579
1574 fm.write('desc', '%s\n', ctx.description().rstrip())
1580 fm.write('desc', '%s\n', ctx.description().rstrip())
1575 fm.plain('\n')
1581 fm.plain('\n')
1576
1582
1577 if fm.isplain():
1583 if fm.isplain():
1578 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1584 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
1579 for chunk, label in chunkiter:
1585 for chunk, label in chunkiter:
1580 fm.plain(chunk, label=label)
1586 fm.plain(chunk, label=label)
1581 else:
1587 else:
1582 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1588 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
1583 # TODO: make it structured?
1589 # TODO: make it structured?
1584 fm.data(diff=b''.join(chunkiter))
1590 fm.data(diff=b''.join(chunkiter))
1585
1591
1586 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1592 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
1587 """Export changesets to stdout or a single file"""
1593 """Export changesets to stdout or a single file"""
1588 for seqno, rev in enumerate(revs, 1):
1594 for seqno, rev in enumerate(revs, 1):
1589 ctx = repo[rev]
1595 ctx = repo[rev]
1590 if not dest.startswith('<'):
1596 if not dest.startswith('<'):
1591 repo.ui.note("%s\n" % dest)
1597 repo.ui.note("%s\n" % dest)
1592 fm.startitem()
1598 fm.startitem()
1593 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1599 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
1594
1600
1595 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1601 def _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, diffopts,
1596 match):
1602 match):
1597 """Export changesets to possibly multiple files"""
1603 """Export changesets to possibly multiple files"""
1598 total = len(revs)
1604 total = len(revs)
1599 revwidth = max(len(str(rev)) for rev in revs)
1605 revwidth = max(len(str(rev)) for rev in revs)
1600 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1606 filemap = util.sortdict() # filename: [(seqno, rev), ...]
1601
1607
1602 for seqno, rev in enumerate(revs, 1):
1608 for seqno, rev in enumerate(revs, 1):
1603 ctx = repo[rev]
1609 ctx = repo[rev]
1604 dest = makefilename(ctx, fntemplate,
1610 dest = makefilename(ctx, fntemplate,
1605 total=total, seqno=seqno, revwidth=revwidth)
1611 total=total, seqno=seqno, revwidth=revwidth)
1606 filemap.setdefault(dest, []).append((seqno, rev))
1612 filemap.setdefault(dest, []).append((seqno, rev))
1607
1613
1608 for dest in filemap:
1614 for dest in filemap:
1609 with formatter.maybereopen(basefm, dest) as fm:
1615 with formatter.maybereopen(basefm, dest) as fm:
1610 repo.ui.note("%s\n" % dest)
1616 repo.ui.note("%s\n" % dest)
1611 for seqno, rev in filemap[dest]:
1617 for seqno, rev in filemap[dest]:
1612 fm.startitem()
1618 fm.startitem()
1613 ctx = repo[rev]
1619 ctx = repo[rev]
1614 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1620 _exportsingle(repo, ctx, fm, match, switch_parent, seqno,
1615 diffopts)
1621 diffopts)
1616
1622
1617 def _prefetchchangedfiles(repo, revs, match):
1623 def _prefetchchangedfiles(repo, revs, match):
1618 allfiles = set()
1624 allfiles = set()
1619 for rev in revs:
1625 for rev in revs:
1620 for file in repo[rev].files():
1626 for file in repo[rev].files():
1621 if not match or match(file):
1627 if not match or match(file):
1622 allfiles.add(file)
1628 allfiles.add(file)
1623 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1629 scmutil.prefetchfiles(repo, revs, scmutil.matchfiles(repo, allfiles))
1624
1630
1625 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1631 def export(repo, revs, basefm, fntemplate='hg-%h.patch', switch_parent=False,
1626 opts=None, match=None):
1632 opts=None, match=None):
1627 '''export changesets as hg patches
1633 '''export changesets as hg patches
1628
1634
1629 Args:
1635 Args:
1630 repo: The repository from which we're exporting revisions.
1636 repo: The repository from which we're exporting revisions.
1631 revs: A list of revisions to export as revision numbers.
1637 revs: A list of revisions to export as revision numbers.
1632 basefm: A formatter to which patches should be written.
1638 basefm: A formatter to which patches should be written.
1633 fntemplate: An optional string to use for generating patch file names.
1639 fntemplate: An optional string to use for generating patch file names.
1634 switch_parent: If True, show diffs against second parent when not nullid.
1640 switch_parent: If True, show diffs against second parent when not nullid.
1635 Default is false, which always shows diff against p1.
1641 Default is false, which always shows diff against p1.
1636 opts: diff options to use for generating the patch.
1642 opts: diff options to use for generating the patch.
1637 match: If specified, only export changes to files matching this matcher.
1643 match: If specified, only export changes to files matching this matcher.
1638
1644
1639 Returns:
1645 Returns:
1640 Nothing.
1646 Nothing.
1641
1647
1642 Side Effect:
1648 Side Effect:
1643 "HG Changeset Patch" data is emitted to one of the following
1649 "HG Changeset Patch" data is emitted to one of the following
1644 destinations:
1650 destinations:
1645 fntemplate specified: Each rev is written to a unique file named using
1651 fntemplate specified: Each rev is written to a unique file named using
1646 the given template.
1652 the given template.
1647 Otherwise: All revs will be written to basefm.
1653 Otherwise: All revs will be written to basefm.
1648 '''
1654 '''
1649 _prefetchchangedfiles(repo, revs, match)
1655 _prefetchchangedfiles(repo, revs, match)
1650
1656
1651 if not fntemplate:
1657 if not fntemplate:
1652 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1658 _exportfile(repo, revs, basefm, '<unnamed>', switch_parent, opts, match)
1653 else:
1659 else:
1654 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1660 _exportfntemplate(repo, revs, basefm, fntemplate, switch_parent, opts,
1655 match)
1661 match)
1656
1662
1657 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1663 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
1658 """Export changesets to the given file stream"""
1664 """Export changesets to the given file stream"""
1659 _prefetchchangedfiles(repo, revs, match)
1665 _prefetchchangedfiles(repo, revs, match)
1660
1666
1661 dest = getattr(fp, 'name', '<unnamed>')
1667 dest = getattr(fp, 'name', '<unnamed>')
1662 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1668 with formatter.formatter(repo.ui, fp, 'export', {}) as fm:
1663 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1669 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
1664
1670
1665 def showmarker(fm, marker, index=None):
1671 def showmarker(fm, marker, index=None):
1666 """utility function to display obsolescence marker in a readable way
1672 """utility function to display obsolescence marker in a readable way
1667
1673
1668 To be used by debug function."""
1674 To be used by debug function."""
1669 if index is not None:
1675 if index is not None:
1670 fm.write('index', '%i ', index)
1676 fm.write('index', '%i ', index)
1671 fm.write('prednode', '%s ', hex(marker.prednode()))
1677 fm.write('prednode', '%s ', hex(marker.prednode()))
1672 succs = marker.succnodes()
1678 succs = marker.succnodes()
1673 fm.condwrite(succs, 'succnodes', '%s ',
1679 fm.condwrite(succs, 'succnodes', '%s ',
1674 fm.formatlist(map(hex, succs), name='node'))
1680 fm.formatlist(map(hex, succs), name='node'))
1675 fm.write('flag', '%X ', marker.flags())
1681 fm.write('flag', '%X ', marker.flags())
1676 parents = marker.parentnodes()
1682 parents = marker.parentnodes()
1677 if parents is not None:
1683 if parents is not None:
1678 fm.write('parentnodes', '{%s} ',
1684 fm.write('parentnodes', '{%s} ',
1679 fm.formatlist(map(hex, parents), name='node', sep=', '))
1685 fm.formatlist(map(hex, parents), name='node', sep=', '))
1680 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1686 fm.write('date', '(%s) ', fm.formatdate(marker.date()))
1681 meta = marker.metadata().copy()
1687 meta = marker.metadata().copy()
1682 meta.pop('date', None)
1688 meta.pop('date', None)
1683 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
1689 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
1684 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1690 fm.write('metadata', '{%s}', fm.formatdict(smeta, fmt='%r: %r', sep=', '))
1685 fm.plain('\n')
1691 fm.plain('\n')
1686
1692
1687 def finddate(ui, repo, date):
1693 def finddate(ui, repo, date):
1688 """Find the tipmost changeset that matches the given date spec"""
1694 """Find the tipmost changeset that matches the given date spec"""
1689
1695
1690 df = dateutil.matchdate(date)
1696 df = dateutil.matchdate(date)
1691 m = scmutil.matchall(repo)
1697 m = scmutil.matchall(repo)
1692 results = {}
1698 results = {}
1693
1699
1694 def prep(ctx, fns):
1700 def prep(ctx, fns):
1695 d = ctx.date()
1701 d = ctx.date()
1696 if df(d[0]):
1702 if df(d[0]):
1697 results[ctx.rev()] = d
1703 results[ctx.rev()] = d
1698
1704
1699 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1705 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1700 rev = ctx.rev()
1706 rev = ctx.rev()
1701 if rev in results:
1707 if rev in results:
1702 ui.status(_("found revision %s from %s\n") %
1708 ui.status(_("found revision %s from %s\n") %
1703 (rev, dateutil.datestr(results[rev])))
1709 (rev, dateutil.datestr(results[rev])))
1704 return '%d' % rev
1710 return '%d' % rev
1705
1711
1706 raise error.Abort(_("revision matching date not found"))
1712 raise error.Abort(_("revision matching date not found"))
1707
1713
1708 def increasingwindows(windowsize=8, sizelimit=512):
1714 def increasingwindows(windowsize=8, sizelimit=512):
1709 while True:
1715 while True:
1710 yield windowsize
1716 yield windowsize
1711 if windowsize < sizelimit:
1717 if windowsize < sizelimit:
1712 windowsize *= 2
1718 windowsize *= 2
1713
1719
1714 def _walkrevs(repo, opts):
1720 def _walkrevs(repo, opts):
1715 # Default --rev value depends on --follow but --follow behavior
1721 # Default --rev value depends on --follow but --follow behavior
1716 # depends on revisions resolved from --rev...
1722 # depends on revisions resolved from --rev...
1717 follow = opts.get('follow') or opts.get('follow_first')
1723 follow = opts.get('follow') or opts.get('follow_first')
1718 if opts.get('rev'):
1724 if opts.get('rev'):
1719 revs = scmutil.revrange(repo, opts['rev'])
1725 revs = scmutil.revrange(repo, opts['rev'])
1720 elif follow and repo.dirstate.p1() == nullid:
1726 elif follow and repo.dirstate.p1() == nullid:
1721 revs = smartset.baseset()
1727 revs = smartset.baseset()
1722 elif follow:
1728 elif follow:
1723 revs = repo.revs('reverse(:.)')
1729 revs = repo.revs('reverse(:.)')
1724 else:
1730 else:
1725 revs = smartset.spanset(repo)
1731 revs = smartset.spanset(repo)
1726 revs.reverse()
1732 revs.reverse()
1727 return revs
1733 return revs
1728
1734
1729 class FileWalkError(Exception):
1735 class FileWalkError(Exception):
1730 pass
1736 pass
1731
1737
1732 def walkfilerevs(repo, match, follow, revs, fncache):
1738 def walkfilerevs(repo, match, follow, revs, fncache):
1733 '''Walks the file history for the matched files.
1739 '''Walks the file history for the matched files.
1734
1740
1735 Returns the changeset revs that are involved in the file history.
1741 Returns the changeset revs that are involved in the file history.
1736
1742
1737 Throws FileWalkError if the file history can't be walked using
1743 Throws FileWalkError if the file history can't be walked using
1738 filelogs alone.
1744 filelogs alone.
1739 '''
1745 '''
1740 wanted = set()
1746 wanted = set()
1741 copies = []
1747 copies = []
1742 minrev, maxrev = min(revs), max(revs)
1748 minrev, maxrev = min(revs), max(revs)
1743 def filerevs(filelog, last):
1749 def filerevs(filelog, last):
1744 """
1750 """
1745 Only files, no patterns. Check the history of each file.
1751 Only files, no patterns. Check the history of each file.
1746
1752
1747 Examines filelog entries within minrev, maxrev linkrev range
1753 Examines filelog entries within minrev, maxrev linkrev range
1748 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1754 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1749 tuples in backwards order
1755 tuples in backwards order
1750 """
1756 """
1751 cl_count = len(repo)
1757 cl_count = len(repo)
1752 revs = []
1758 revs = []
1753 for j in pycompat.xrange(0, last + 1):
1759 for j in pycompat.xrange(0, last + 1):
1754 linkrev = filelog.linkrev(j)
1760 linkrev = filelog.linkrev(j)
1755 if linkrev < minrev:
1761 if linkrev < minrev:
1756 continue
1762 continue
1757 # only yield rev for which we have the changelog, it can
1763 # only yield rev for which we have the changelog, it can
1758 # happen while doing "hg log" during a pull or commit
1764 # happen while doing "hg log" during a pull or commit
1759 if linkrev >= cl_count:
1765 if linkrev >= cl_count:
1760 break
1766 break
1761
1767
1762 parentlinkrevs = []
1768 parentlinkrevs = []
1763 for p in filelog.parentrevs(j):
1769 for p in filelog.parentrevs(j):
1764 if p != nullrev:
1770 if p != nullrev:
1765 parentlinkrevs.append(filelog.linkrev(p))
1771 parentlinkrevs.append(filelog.linkrev(p))
1766 n = filelog.node(j)
1772 n = filelog.node(j)
1767 revs.append((linkrev, parentlinkrevs,
1773 revs.append((linkrev, parentlinkrevs,
1768 follow and filelog.renamed(n)))
1774 follow and filelog.renamed(n)))
1769
1775
1770 return reversed(revs)
1776 return reversed(revs)
1771 def iterfiles():
1777 def iterfiles():
1772 pctx = repo['.']
1778 pctx = repo['.']
1773 for filename in match.files():
1779 for filename in match.files():
1774 if follow:
1780 if follow:
1775 if filename not in pctx:
1781 if filename not in pctx:
1776 raise error.Abort(_('cannot follow file not in parent '
1782 raise error.Abort(_('cannot follow file not in parent '
1777 'revision: "%s"') % filename)
1783 'revision: "%s"') % filename)
1778 yield filename, pctx[filename].filenode()
1784 yield filename, pctx[filename].filenode()
1779 else:
1785 else:
1780 yield filename, None
1786 yield filename, None
1781 for filename_node in copies:
1787 for filename_node in copies:
1782 yield filename_node
1788 yield filename_node
1783
1789
1784 for file_, node in iterfiles():
1790 for file_, node in iterfiles():
1785 filelog = repo.file(file_)
1791 filelog = repo.file(file_)
1786 if not len(filelog):
1792 if not len(filelog):
1787 if node is None:
1793 if node is None:
1788 # A zero count may be a directory or deleted file, so
1794 # A zero count may be a directory or deleted file, so
1789 # try to find matching entries on the slow path.
1795 # try to find matching entries on the slow path.
1790 if follow:
1796 if follow:
1791 raise error.Abort(
1797 raise error.Abort(
1792 _('cannot follow nonexistent file: "%s"') % file_)
1798 _('cannot follow nonexistent file: "%s"') % file_)
1793 raise FileWalkError("Cannot walk via filelog")
1799 raise FileWalkError("Cannot walk via filelog")
1794 else:
1800 else:
1795 continue
1801 continue
1796
1802
1797 if node is None:
1803 if node is None:
1798 last = len(filelog) - 1
1804 last = len(filelog) - 1
1799 else:
1805 else:
1800 last = filelog.rev(node)
1806 last = filelog.rev(node)
1801
1807
1802 # keep track of all ancestors of the file
1808 # keep track of all ancestors of the file
1803 ancestors = {filelog.linkrev(last)}
1809 ancestors = {filelog.linkrev(last)}
1804
1810
1805 # iterate from latest to oldest revision
1811 # iterate from latest to oldest revision
1806 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
1812 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
1807 if not follow:
1813 if not follow:
1808 if rev > maxrev:
1814 if rev > maxrev:
1809 continue
1815 continue
1810 else:
1816 else:
1811 # Note that last might not be the first interesting
1817 # Note that last might not be the first interesting
1812 # rev to us:
1818 # rev to us:
1813 # if the file has been changed after maxrev, we'll
1819 # if the file has been changed after maxrev, we'll
1814 # have linkrev(last) > maxrev, and we still need
1820 # have linkrev(last) > maxrev, and we still need
1815 # to explore the file graph
1821 # to explore the file graph
1816 if rev not in ancestors:
1822 if rev not in ancestors:
1817 continue
1823 continue
1818 # XXX insert 1327 fix here
1824 # XXX insert 1327 fix here
1819 if flparentlinkrevs:
1825 if flparentlinkrevs:
1820 ancestors.update(flparentlinkrevs)
1826 ancestors.update(flparentlinkrevs)
1821
1827
1822 fncache.setdefault(rev, []).append(file_)
1828 fncache.setdefault(rev, []).append(file_)
1823 wanted.add(rev)
1829 wanted.add(rev)
1824 if copied:
1830 if copied:
1825 copies.append(copied)
1831 copies.append(copied)
1826
1832
1827 return wanted
1833 return wanted
1828
1834
1829 class _followfilter(object):
1835 class _followfilter(object):
1830 def __init__(self, repo, onlyfirst=False):
1836 def __init__(self, repo, onlyfirst=False):
1831 self.repo = repo
1837 self.repo = repo
1832 self.startrev = nullrev
1838 self.startrev = nullrev
1833 self.roots = set()
1839 self.roots = set()
1834 self.onlyfirst = onlyfirst
1840 self.onlyfirst = onlyfirst
1835
1841
1836 def match(self, rev):
1842 def match(self, rev):
1837 def realparents(rev):
1843 def realparents(rev):
1838 if self.onlyfirst:
1844 if self.onlyfirst:
1839 return self.repo.changelog.parentrevs(rev)[0:1]
1845 return self.repo.changelog.parentrevs(rev)[0:1]
1840 else:
1846 else:
1841 return filter(lambda x: x != nullrev,
1847 return filter(lambda x: x != nullrev,
1842 self.repo.changelog.parentrevs(rev))
1848 self.repo.changelog.parentrevs(rev))
1843
1849
1844 if self.startrev == nullrev:
1850 if self.startrev == nullrev:
1845 self.startrev = rev
1851 self.startrev = rev
1846 return True
1852 return True
1847
1853
1848 if rev > self.startrev:
1854 if rev > self.startrev:
1849 # forward: all descendants
1855 # forward: all descendants
1850 if not self.roots:
1856 if not self.roots:
1851 self.roots.add(self.startrev)
1857 self.roots.add(self.startrev)
1852 for parent in realparents(rev):
1858 for parent in realparents(rev):
1853 if parent in self.roots:
1859 if parent in self.roots:
1854 self.roots.add(rev)
1860 self.roots.add(rev)
1855 return True
1861 return True
1856 else:
1862 else:
1857 # backwards: all parents
1863 # backwards: all parents
1858 if not self.roots:
1864 if not self.roots:
1859 self.roots.update(realparents(self.startrev))
1865 self.roots.update(realparents(self.startrev))
1860 if rev in self.roots:
1866 if rev in self.roots:
1861 self.roots.remove(rev)
1867 self.roots.remove(rev)
1862 self.roots.update(realparents(rev))
1868 self.roots.update(realparents(rev))
1863 return True
1869 return True
1864
1870
1865 return False
1871 return False
1866
1872
1867 def walkchangerevs(repo, match, opts, prepare):
1873 def walkchangerevs(repo, match, opts, prepare):
1868 '''Iterate over files and the revs in which they changed.
1874 '''Iterate over files and the revs in which they changed.
1869
1875
1870 Callers most commonly need to iterate backwards over the history
1876 Callers most commonly need to iterate backwards over the history
1871 in which they are interested. Doing so has awful (quadratic-looking)
1877 in which they are interested. Doing so has awful (quadratic-looking)
1872 performance, so we use iterators in a "windowed" way.
1878 performance, so we use iterators in a "windowed" way.
1873
1879
1874 We walk a window of revisions in the desired order. Within the
1880 We walk a window of revisions in the desired order. Within the
1875 window, we first walk forwards to gather data, then in the desired
1881 window, we first walk forwards to gather data, then in the desired
1876 order (usually backwards) to display it.
1882 order (usually backwards) to display it.
1877
1883
1878 This function returns an iterator yielding contexts. Before
1884 This function returns an iterator yielding contexts. Before
1879 yielding each context, the iterator will first call the prepare
1885 yielding each context, the iterator will first call the prepare
1880 function on each context in the window in forward order.'''
1886 function on each context in the window in forward order.'''
1881
1887
1882 allfiles = opts.get('all_files')
1888 allfiles = opts.get('all_files')
1883 follow = opts.get('follow') or opts.get('follow_first')
1889 follow = opts.get('follow') or opts.get('follow_first')
1884 revs = _walkrevs(repo, opts)
1890 revs = _walkrevs(repo, opts)
1885 if not revs:
1891 if not revs:
1886 return []
1892 return []
1887 wanted = set()
1893 wanted = set()
1888 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1894 slowpath = match.anypats() or (not match.always() and opts.get('removed'))
1889 fncache = {}
1895 fncache = {}
1890 change = repo.__getitem__
1896 change = repo.__getitem__
1891
1897
1892 # First step is to fill wanted, the set of revisions that we want to yield.
1898 # First step is to fill wanted, the set of revisions that we want to yield.
1893 # When it does not induce extra cost, we also fill fncache for revisions in
1899 # When it does not induce extra cost, we also fill fncache for revisions in
1894 # wanted: a cache of filenames that were changed (ctx.files()) and that
1900 # wanted: a cache of filenames that were changed (ctx.files()) and that
1895 # match the file filtering conditions.
1901 # match the file filtering conditions.
1896
1902
1897 if match.always() or allfiles:
1903 if match.always() or allfiles:
1898 # No files, no patterns. Display all revs.
1904 # No files, no patterns. Display all revs.
1899 wanted = revs
1905 wanted = revs
1900 elif not slowpath:
1906 elif not slowpath:
1901 # We only have to read through the filelog to find wanted revisions
1907 # We only have to read through the filelog to find wanted revisions
1902
1908
1903 try:
1909 try:
1904 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1910 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1905 except FileWalkError:
1911 except FileWalkError:
1906 slowpath = True
1912 slowpath = True
1907
1913
1908 # We decided to fall back to the slowpath because at least one
1914 # We decided to fall back to the slowpath because at least one
1909 # of the paths was not a file. Check to see if at least one of them
1915 # of the paths was not a file. Check to see if at least one of them
1910 # existed in history, otherwise simply return
1916 # existed in history, otherwise simply return
1911 for path in match.files():
1917 for path in match.files():
1912 if path == '.' or path in repo.store:
1918 if path == '.' or path in repo.store:
1913 break
1919 break
1914 else:
1920 else:
1915 return []
1921 return []
1916
1922
1917 if slowpath:
1923 if slowpath:
1918 # We have to read the changelog to match filenames against
1924 # We have to read the changelog to match filenames against
1919 # changed files
1925 # changed files
1920
1926
1921 if follow:
1927 if follow:
1922 raise error.Abort(_('can only follow copies/renames for explicit '
1928 raise error.Abort(_('can only follow copies/renames for explicit '
1923 'filenames'))
1929 'filenames'))
1924
1930
1925 # The slow path checks files modified in every changeset.
1931 # The slow path checks files modified in every changeset.
1926 # This is really slow on large repos, so compute the set lazily.
1932 # This is really slow on large repos, so compute the set lazily.
1927 class lazywantedset(object):
1933 class lazywantedset(object):
1928 def __init__(self):
1934 def __init__(self):
1929 self.set = set()
1935 self.set = set()
1930 self.revs = set(revs)
1936 self.revs = set(revs)
1931
1937
1932 # No need to worry about locality here because it will be accessed
1938 # No need to worry about locality here because it will be accessed
1933 # in the same order as the increasing window below.
1939 # in the same order as the increasing window below.
1934 def __contains__(self, value):
1940 def __contains__(self, value):
1935 if value in self.set:
1941 if value in self.set:
1936 return True
1942 return True
1937 elif not value in self.revs:
1943 elif not value in self.revs:
1938 return False
1944 return False
1939 else:
1945 else:
1940 self.revs.discard(value)
1946 self.revs.discard(value)
1941 ctx = change(value)
1947 ctx = change(value)
1942 if allfiles:
1948 if allfiles:
1943 matches = list(ctx.manifest().walk(match))
1949 matches = list(ctx.manifest().walk(match))
1944 else:
1950 else:
1945 matches = [f for f in ctx.files() if match(f)]
1951 matches = [f for f in ctx.files() if match(f)]
1946 if matches:
1952 if matches:
1947 fncache[value] = matches
1953 fncache[value] = matches
1948 self.set.add(value)
1954 self.set.add(value)
1949 return True
1955 return True
1950 return False
1956 return False
1951
1957
1952 def discard(self, value):
1958 def discard(self, value):
1953 self.revs.discard(value)
1959 self.revs.discard(value)
1954 self.set.discard(value)
1960 self.set.discard(value)
1955
1961
1956 wanted = lazywantedset()
1962 wanted = lazywantedset()
1957
1963
1958 # it might be worthwhile to do this in the iterator if the rev range
1964 # it might be worthwhile to do this in the iterator if the rev range
1959 # is descending and the prune args are all within that range
1965 # is descending and the prune args are all within that range
1960 for rev in opts.get('prune', ()):
1966 for rev in opts.get('prune', ()):
1961 rev = repo[rev].rev()
1967 rev = repo[rev].rev()
1962 ff = _followfilter(repo)
1968 ff = _followfilter(repo)
1963 stop = min(revs[0], revs[-1])
1969 stop = min(revs[0], revs[-1])
1964 for x in pycompat.xrange(rev, stop - 1, -1):
1970 for x in pycompat.xrange(rev, stop - 1, -1):
1965 if ff.match(x):
1971 if ff.match(x):
1966 wanted = wanted - [x]
1972 wanted = wanted - [x]
1967
1973
1968 # Now that wanted is correctly initialized, we can iterate over the
1974 # Now that wanted is correctly initialized, we can iterate over the
1969 # revision range, yielding only revisions in wanted.
1975 # revision range, yielding only revisions in wanted.
1970 def iterate():
1976 def iterate():
1971 if follow and match.always():
1977 if follow and match.always():
1972 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1978 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1973 def want(rev):
1979 def want(rev):
1974 return ff.match(rev) and rev in wanted
1980 return ff.match(rev) and rev in wanted
1975 else:
1981 else:
1976 def want(rev):
1982 def want(rev):
1977 return rev in wanted
1983 return rev in wanted
1978
1984
1979 it = iter(revs)
1985 it = iter(revs)
1980 stopiteration = False
1986 stopiteration = False
1981 for windowsize in increasingwindows():
1987 for windowsize in increasingwindows():
1982 nrevs = []
1988 nrevs = []
1983 for i in pycompat.xrange(windowsize):
1989 for i in pycompat.xrange(windowsize):
1984 rev = next(it, None)
1990 rev = next(it, None)
1985 if rev is None:
1991 if rev is None:
1986 stopiteration = True
1992 stopiteration = True
1987 break
1993 break
1988 elif want(rev):
1994 elif want(rev):
1989 nrevs.append(rev)
1995 nrevs.append(rev)
1990 for rev in sorted(nrevs):
1996 for rev in sorted(nrevs):
1991 fns = fncache.get(rev)
1997 fns = fncache.get(rev)
1992 ctx = change(rev)
1998 ctx = change(rev)
1993 if not fns:
1999 if not fns:
1994 def fns_generator():
2000 def fns_generator():
1995 if allfiles:
2001 if allfiles:
1996 fiter = iter(ctx)
2002 fiter = iter(ctx)
1997 else:
2003 else:
1998 fiter = ctx.files()
2004 fiter = ctx.files()
1999 for f in fiter:
2005 for f in fiter:
2000 if match(f):
2006 if match(f):
2001 yield f
2007 yield f
2002 fns = fns_generator()
2008 fns = fns_generator()
2003 prepare(ctx, fns)
2009 prepare(ctx, fns)
2004 for rev in nrevs:
2010 for rev in nrevs:
2005 yield change(rev)
2011 yield change(rev)
2006
2012
2007 if stopiteration:
2013 if stopiteration:
2008 break
2014 break
2009
2015
2010 return iterate()
2016 return iterate()
2011
2017
2012 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2018 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2013 bad = []
2019 bad = []
2014
2020
2015 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2021 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2016 names = []
2022 names = []
2017 wctx = repo[None]
2023 wctx = repo[None]
2018 cca = None
2024 cca = None
2019 abort, warn = scmutil.checkportabilityalert(ui)
2025 abort, warn = scmutil.checkportabilityalert(ui)
2020 if abort or warn:
2026 if abort or warn:
2021 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2027 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2022
2028
2023 match = repo.narrowmatch(match, includeexact=True)
2029 match = repo.narrowmatch(match, includeexact=True)
2024 badmatch = matchmod.badmatch(match, badfn)
2030 badmatch = matchmod.badmatch(match, badfn)
2025 dirstate = repo.dirstate
2031 dirstate = repo.dirstate
2026 # We don't want to just call wctx.walk here, since it would return a lot of
2032 # We don't want to just call wctx.walk here, since it would return a lot of
2027 # clean files, which we aren't interested in and takes time.
2033 # clean files, which we aren't interested in and takes time.
2028 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2034 for f in sorted(dirstate.walk(badmatch, subrepos=sorted(wctx.substate),
2029 unknown=True, ignored=False, full=False)):
2035 unknown=True, ignored=False, full=False)):
2030 exact = match.exact(f)
2036 exact = match.exact(f)
2031 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2037 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2032 if cca:
2038 if cca:
2033 cca(f)
2039 cca(f)
2034 names.append(f)
2040 names.append(f)
2035 if ui.verbose or not exact:
2041 if ui.verbose or not exact:
2036 ui.status(_('adding %s\n') % uipathfn(f),
2042 ui.status(_('adding %s\n') % uipathfn(f),
2037 label='ui.addremove.added')
2043 label='ui.addremove.added')
2038
2044
2039 for subpath in sorted(wctx.substate):
2045 for subpath in sorted(wctx.substate):
2040 sub = wctx.sub(subpath)
2046 sub = wctx.sub(subpath)
2041 try:
2047 try:
2042 submatch = matchmod.subdirmatcher(subpath, match)
2048 submatch = matchmod.subdirmatcher(subpath, match)
2043 subprefix = repo.wvfs.reljoin(prefix, subpath)
2049 subprefix = repo.wvfs.reljoin(prefix, subpath)
2044 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2050 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2045 if opts.get(r'subrepos'):
2051 if opts.get(r'subrepos'):
2046 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, False,
2052 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, False,
2047 **opts))
2053 **opts))
2048 else:
2054 else:
2049 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, True,
2055 bad.extend(sub.add(ui, submatch, subprefix, subuipathfn, True,
2050 **opts))
2056 **opts))
2051 except error.LookupError:
2057 except error.LookupError:
2052 ui.status(_("skipping missing subrepository: %s\n")
2058 ui.status(_("skipping missing subrepository: %s\n")
2053 % uipathfn(subpath))
2059 % uipathfn(subpath))
2054
2060
2055 if not opts.get(r'dry_run'):
2061 if not opts.get(r'dry_run'):
2056 rejected = wctx.add(names, prefix)
2062 rejected = wctx.add(names, prefix)
2057 bad.extend(f for f in rejected if f in match.files())
2063 bad.extend(f for f in rejected if f in match.files())
2058 return bad
2064 return bad
2059
2065
2060 def addwebdirpath(repo, serverpath, webconf):
2066 def addwebdirpath(repo, serverpath, webconf):
2061 webconf[serverpath] = repo.root
2067 webconf[serverpath] = repo.root
2062 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2068 repo.ui.debug('adding %s = %s\n' % (serverpath, repo.root))
2063
2069
2064 for r in repo.revs('filelog("path:.hgsub")'):
2070 for r in repo.revs('filelog("path:.hgsub")'):
2065 ctx = repo[r]
2071 ctx = repo[r]
2066 for subpath in ctx.substate:
2072 for subpath in ctx.substate:
2067 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2073 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2068
2074
2069 def forget(ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
2075 def forget(ui, repo, match, prefix, uipathfn, explicitonly, dryrun,
2070 interactive):
2076 interactive):
2071 if dryrun and interactive:
2077 if dryrun and interactive:
2072 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2078 raise error.Abort(_("cannot specify both --dry-run and --interactive"))
2073 bad = []
2079 bad = []
2074 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2080 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2075 wctx = repo[None]
2081 wctx = repo[None]
2076 forgot = []
2082 forgot = []
2077
2083
2078 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2084 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2079 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2085 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2080 if explicitonly:
2086 if explicitonly:
2081 forget = [f for f in forget if match.exact(f)]
2087 forget = [f for f in forget if match.exact(f)]
2082
2088
2083 for subpath in sorted(wctx.substate):
2089 for subpath in sorted(wctx.substate):
2084 sub = wctx.sub(subpath)
2090 sub = wctx.sub(subpath)
2085 submatch = matchmod.subdirmatcher(subpath, match)
2091 submatch = matchmod.subdirmatcher(subpath, match)
2086 subprefix = repo.wvfs.reljoin(prefix, subpath)
2092 subprefix = repo.wvfs.reljoin(prefix, subpath)
2087 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2093 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2088 try:
2094 try:
2089 subbad, subforgot = sub.forget(submatch, subprefix, subuipathfn,
2095 subbad, subforgot = sub.forget(submatch, subprefix, subuipathfn,
2090 dryrun=dryrun,
2096 dryrun=dryrun,
2091 interactive=interactive)
2097 interactive=interactive)
2092 bad.extend([subpath + '/' + f for f in subbad])
2098 bad.extend([subpath + '/' + f for f in subbad])
2093 forgot.extend([subpath + '/' + f for f in subforgot])
2099 forgot.extend([subpath + '/' + f for f in subforgot])
2094 except error.LookupError:
2100 except error.LookupError:
2095 ui.status(_("skipping missing subrepository: %s\n")
2101 ui.status(_("skipping missing subrepository: %s\n")
2096 % uipathfn(subpath))
2102 % uipathfn(subpath))
2097
2103
2098 if not explicitonly:
2104 if not explicitonly:
2099 for f in match.files():
2105 for f in match.files():
2100 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2106 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2101 if f not in forgot:
2107 if f not in forgot:
2102 if repo.wvfs.exists(f):
2108 if repo.wvfs.exists(f):
2103 # Don't complain if the exact case match wasn't given.
2109 # Don't complain if the exact case match wasn't given.
2104 # But don't do this until after checking 'forgot', so
2110 # But don't do this until after checking 'forgot', so
2105 # that subrepo files aren't normalized, and this op is
2111 # that subrepo files aren't normalized, and this op is
2106 # purely from data cached by the status walk above.
2112 # purely from data cached by the status walk above.
2107 if repo.dirstate.normalize(f) in repo.dirstate:
2113 if repo.dirstate.normalize(f) in repo.dirstate:
2108 continue
2114 continue
2109 ui.warn(_('not removing %s: '
2115 ui.warn(_('not removing %s: '
2110 'file is already untracked\n')
2116 'file is already untracked\n')
2111 % uipathfn(f))
2117 % uipathfn(f))
2112 bad.append(f)
2118 bad.append(f)
2113
2119
2114 if interactive:
2120 if interactive:
2115 responses = _('[Ynsa?]'
2121 responses = _('[Ynsa?]'
2116 '$$ &Yes, forget this file'
2122 '$$ &Yes, forget this file'
2117 '$$ &No, skip this file'
2123 '$$ &No, skip this file'
2118 '$$ &Skip remaining files'
2124 '$$ &Skip remaining files'
2119 '$$ Include &all remaining files'
2125 '$$ Include &all remaining files'
2120 '$$ &? (display help)')
2126 '$$ &? (display help)')
2121 for filename in forget[:]:
2127 for filename in forget[:]:
2122 r = ui.promptchoice(_('forget %s %s') %
2128 r = ui.promptchoice(_('forget %s %s') %
2123 (uipathfn(filename), responses))
2129 (uipathfn(filename), responses))
2124 if r == 4: # ?
2130 if r == 4: # ?
2125 while r == 4:
2131 while r == 4:
2126 for c, t in ui.extractchoices(responses)[1]:
2132 for c, t in ui.extractchoices(responses)[1]:
2127 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2133 ui.write('%s - %s\n' % (c, encoding.lower(t)))
2128 r = ui.promptchoice(_('forget %s %s') %
2134 r = ui.promptchoice(_('forget %s %s') %
2129 (uipathfn(filename), responses))
2135 (uipathfn(filename), responses))
2130 if r == 0: # yes
2136 if r == 0: # yes
2131 continue
2137 continue
2132 elif r == 1: # no
2138 elif r == 1: # no
2133 forget.remove(filename)
2139 forget.remove(filename)
2134 elif r == 2: # Skip
2140 elif r == 2: # Skip
2135 fnindex = forget.index(filename)
2141 fnindex = forget.index(filename)
2136 del forget[fnindex:]
2142 del forget[fnindex:]
2137 break
2143 break
2138 elif r == 3: # All
2144 elif r == 3: # All
2139 break
2145 break
2140
2146
2141 for f in forget:
2147 for f in forget:
2142 if ui.verbose or not match.exact(f) or interactive:
2148 if ui.verbose or not match.exact(f) or interactive:
2143 ui.status(_('removing %s\n') % uipathfn(f),
2149 ui.status(_('removing %s\n') % uipathfn(f),
2144 label='ui.addremove.removed')
2150 label='ui.addremove.removed')
2145
2151
2146 if not dryrun:
2152 if not dryrun:
2147 rejected = wctx.forget(forget, prefix)
2153 rejected = wctx.forget(forget, prefix)
2148 bad.extend(f for f in rejected if f in match.files())
2154 bad.extend(f for f in rejected if f in match.files())
2149 forgot.extend(f for f in forget if f not in rejected)
2155 forgot.extend(f for f in forget if f not in rejected)
2150 return bad, forgot
2156 return bad, forgot
2151
2157
2152 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2158 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2153 ret = 1
2159 ret = 1
2154
2160
2155 needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
2161 needsfctx = ui.verbose or {'size', 'flags'} & fm.datahint()
2156 for f in ctx.matches(m):
2162 for f in ctx.matches(m):
2157 fm.startitem()
2163 fm.startitem()
2158 fm.context(ctx=ctx)
2164 fm.context(ctx=ctx)
2159 if needsfctx:
2165 if needsfctx:
2160 fc = ctx[f]
2166 fc = ctx[f]
2161 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2167 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2162 fm.data(path=f)
2168 fm.data(path=f)
2163 fm.plain(fmt % uipathfn(f))
2169 fm.plain(fmt % uipathfn(f))
2164 ret = 0
2170 ret = 0
2165
2171
2166 for subpath in sorted(ctx.substate):
2172 for subpath in sorted(ctx.substate):
2167 submatch = matchmod.subdirmatcher(subpath, m)
2173 submatch = matchmod.subdirmatcher(subpath, m)
2168 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2174 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2169 if (subrepos or m.exact(subpath) or any(submatch.files())):
2175 if (subrepos or m.exact(subpath) or any(submatch.files())):
2170 sub = ctx.sub(subpath)
2176 sub = ctx.sub(subpath)
2171 try:
2177 try:
2172 recurse = m.exact(subpath) or subrepos
2178 recurse = m.exact(subpath) or subrepos
2173 if sub.printfiles(ui, submatch, subuipathfn, fm, fmt,
2179 if sub.printfiles(ui, submatch, subuipathfn, fm, fmt,
2174 recurse) == 0:
2180 recurse) == 0:
2175 ret = 0
2181 ret = 0
2176 except error.LookupError:
2182 except error.LookupError:
2177 ui.status(_("skipping missing subrepository: %s\n")
2183 ui.status(_("skipping missing subrepository: %s\n")
2178 % uipathfn(subpath))
2184 % uipathfn(subpath))
2179
2185
2180 return ret
2186 return ret
2181
2187
2182 def remove(ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun,
2188 def remove(ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun,
2183 warnings=None):
2189 warnings=None):
2184 ret = 0
2190 ret = 0
2185 s = repo.status(match=m, clean=True)
2191 s = repo.status(match=m, clean=True)
2186 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2192 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2187
2193
2188 wctx = repo[None]
2194 wctx = repo[None]
2189
2195
2190 if warnings is None:
2196 if warnings is None:
2191 warnings = []
2197 warnings = []
2192 warn = True
2198 warn = True
2193 else:
2199 else:
2194 warn = False
2200 warn = False
2195
2201
2196 subs = sorted(wctx.substate)
2202 subs = sorted(wctx.substate)
2197 progress = ui.makeprogress(_('searching'), total=len(subs),
2203 progress = ui.makeprogress(_('searching'), total=len(subs),
2198 unit=_('subrepos'))
2204 unit=_('subrepos'))
2199 for subpath in subs:
2205 for subpath in subs:
2200 submatch = matchmod.subdirmatcher(subpath, m)
2206 submatch = matchmod.subdirmatcher(subpath, m)
2201 subprefix = repo.wvfs.reljoin(prefix, subpath)
2207 subprefix = repo.wvfs.reljoin(prefix, subpath)
2202 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2208 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2203 if subrepos or m.exact(subpath) or any(submatch.files()):
2209 if subrepos or m.exact(subpath) or any(submatch.files()):
2204 progress.increment()
2210 progress.increment()
2205 sub = wctx.sub(subpath)
2211 sub = wctx.sub(subpath)
2206 try:
2212 try:
2207 if sub.removefiles(submatch, subprefix, subuipathfn, after,
2213 if sub.removefiles(submatch, subprefix, subuipathfn, after,
2208 force, subrepos, dryrun, warnings):
2214 force, subrepos, dryrun, warnings):
2209 ret = 1
2215 ret = 1
2210 except error.LookupError:
2216 except error.LookupError:
2211 warnings.append(_("skipping missing subrepository: %s\n")
2217 warnings.append(_("skipping missing subrepository: %s\n")
2212 % uipathfn(subpath))
2218 % uipathfn(subpath))
2213 progress.complete()
2219 progress.complete()
2214
2220
2215 # warn about failure to delete explicit files/dirs
2221 # warn about failure to delete explicit files/dirs
2216 deleteddirs = util.dirs(deleted)
2222 deleteddirs = util.dirs(deleted)
2217 files = m.files()
2223 files = m.files()
2218 progress = ui.makeprogress(_('deleting'), total=len(files),
2224 progress = ui.makeprogress(_('deleting'), total=len(files),
2219 unit=_('files'))
2225 unit=_('files'))
2220 for f in files:
2226 for f in files:
2221 def insubrepo():
2227 def insubrepo():
2222 for subpath in wctx.substate:
2228 for subpath in wctx.substate:
2223 if f.startswith(subpath + '/'):
2229 if f.startswith(subpath + '/'):
2224 return True
2230 return True
2225 return False
2231 return False
2226
2232
2227 progress.increment()
2233 progress.increment()
2228 isdir = f in deleteddirs or wctx.hasdir(f)
2234 isdir = f in deleteddirs or wctx.hasdir(f)
2229 if (f in repo.dirstate or isdir or f == '.'
2235 if (f in repo.dirstate or isdir or f == '.'
2230 or insubrepo() or f in subs):
2236 or insubrepo() or f in subs):
2231 continue
2237 continue
2232
2238
2233 if repo.wvfs.exists(f):
2239 if repo.wvfs.exists(f):
2234 if repo.wvfs.isdir(f):
2240 if repo.wvfs.isdir(f):
2235 warnings.append(_('not removing %s: no tracked files\n')
2241 warnings.append(_('not removing %s: no tracked files\n')
2236 % uipathfn(f))
2242 % uipathfn(f))
2237 else:
2243 else:
2238 warnings.append(_('not removing %s: file is untracked\n')
2244 warnings.append(_('not removing %s: file is untracked\n')
2239 % uipathfn(f))
2245 % uipathfn(f))
2240 # missing files will generate a warning elsewhere
2246 # missing files will generate a warning elsewhere
2241 ret = 1
2247 ret = 1
2242 progress.complete()
2248 progress.complete()
2243
2249
2244 if force:
2250 if force:
2245 list = modified + deleted + clean + added
2251 list = modified + deleted + clean + added
2246 elif after:
2252 elif after:
2247 list = deleted
2253 list = deleted
2248 remaining = modified + added + clean
2254 remaining = modified + added + clean
2249 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2255 progress = ui.makeprogress(_('skipping'), total=len(remaining),
2250 unit=_('files'))
2256 unit=_('files'))
2251 for f in remaining:
2257 for f in remaining:
2252 progress.increment()
2258 progress.increment()
2253 if ui.verbose or (f in files):
2259 if ui.verbose or (f in files):
2254 warnings.append(_('not removing %s: file still exists\n')
2260 warnings.append(_('not removing %s: file still exists\n')
2255 % uipathfn(f))
2261 % uipathfn(f))
2256 ret = 1
2262 ret = 1
2257 progress.complete()
2263 progress.complete()
2258 else:
2264 else:
2259 list = deleted + clean
2265 list = deleted + clean
2260 progress = ui.makeprogress(_('skipping'),
2266 progress = ui.makeprogress(_('skipping'),
2261 total=(len(modified) + len(added)),
2267 total=(len(modified) + len(added)),
2262 unit=_('files'))
2268 unit=_('files'))
2263 for f in modified:
2269 for f in modified:
2264 progress.increment()
2270 progress.increment()
2265 warnings.append(_('not removing %s: file is modified (use -f'
2271 warnings.append(_('not removing %s: file is modified (use -f'
2266 ' to force removal)\n') % uipathfn(f))
2272 ' to force removal)\n') % uipathfn(f))
2267 ret = 1
2273 ret = 1
2268 for f in added:
2274 for f in added:
2269 progress.increment()
2275 progress.increment()
2270 warnings.append(_("not removing %s: file has been marked for add"
2276 warnings.append(_("not removing %s: file has been marked for add"
2271 " (use 'hg forget' to undo add)\n") % uipathfn(f))
2277 " (use 'hg forget' to undo add)\n") % uipathfn(f))
2272 ret = 1
2278 ret = 1
2273 progress.complete()
2279 progress.complete()
2274
2280
2275 list = sorted(list)
2281 list = sorted(list)
2276 progress = ui.makeprogress(_('deleting'), total=len(list),
2282 progress = ui.makeprogress(_('deleting'), total=len(list),
2277 unit=_('files'))
2283 unit=_('files'))
2278 for f in list:
2284 for f in list:
2279 if ui.verbose or not m.exact(f):
2285 if ui.verbose or not m.exact(f):
2280 progress.increment()
2286 progress.increment()
2281 ui.status(_('removing %s\n') % uipathfn(f),
2287 ui.status(_('removing %s\n') % uipathfn(f),
2282 label='ui.addremove.removed')
2288 label='ui.addremove.removed')
2283 progress.complete()
2289 progress.complete()
2284
2290
2285 if not dryrun:
2291 if not dryrun:
2286 with repo.wlock():
2292 with repo.wlock():
2287 if not after:
2293 if not after:
2288 for f in list:
2294 for f in list:
2289 if f in added:
2295 if f in added:
2290 continue # we never unlink added files on remove
2296 continue # we never unlink added files on remove
2291 rmdir = repo.ui.configbool('experimental',
2297 rmdir = repo.ui.configbool('experimental',
2292 'removeemptydirs')
2298 'removeemptydirs')
2293 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2299 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2294 repo[None].forget(list)
2300 repo[None].forget(list)
2295
2301
2296 if warn:
2302 if warn:
2297 for warning in warnings:
2303 for warning in warnings:
2298 ui.warn(warning)
2304 ui.warn(warning)
2299
2305
2300 return ret
2306 return ret
2301
2307
2302 def _catfmtneedsdata(fm):
2308 def _catfmtneedsdata(fm):
2303 return not fm.datahint() or 'data' in fm.datahint()
2309 return not fm.datahint() or 'data' in fm.datahint()
2304
2310
2305 def _updatecatformatter(fm, ctx, matcher, path, decode):
2311 def _updatecatformatter(fm, ctx, matcher, path, decode):
2306 """Hook for adding data to the formatter used by ``hg cat``.
2312 """Hook for adding data to the formatter used by ``hg cat``.
2307
2313
2308 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2314 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2309 this method first."""
2315 this method first."""
2310
2316
2311 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2317 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2312 # wasn't requested.
2318 # wasn't requested.
2313 data = b''
2319 data = b''
2314 if _catfmtneedsdata(fm):
2320 if _catfmtneedsdata(fm):
2315 data = ctx[path].data()
2321 data = ctx[path].data()
2316 if decode:
2322 if decode:
2317 data = ctx.repo().wwritedata(path, data)
2323 data = ctx.repo().wwritedata(path, data)
2318 fm.startitem()
2324 fm.startitem()
2319 fm.context(ctx=ctx)
2325 fm.context(ctx=ctx)
2320 fm.write('data', '%s', data)
2326 fm.write('data', '%s', data)
2321 fm.data(path=path)
2327 fm.data(path=path)
2322
2328
2323 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2329 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2324 err = 1
2330 err = 1
2325 opts = pycompat.byteskwargs(opts)
2331 opts = pycompat.byteskwargs(opts)
2326
2332
2327 def write(path):
2333 def write(path):
2328 filename = None
2334 filename = None
2329 if fntemplate:
2335 if fntemplate:
2330 filename = makefilename(ctx, fntemplate,
2336 filename = makefilename(ctx, fntemplate,
2331 pathname=os.path.join(prefix, path))
2337 pathname=os.path.join(prefix, path))
2332 # attempt to create the directory if it does not already exist
2338 # attempt to create the directory if it does not already exist
2333 try:
2339 try:
2334 os.makedirs(os.path.dirname(filename))
2340 os.makedirs(os.path.dirname(filename))
2335 except OSError:
2341 except OSError:
2336 pass
2342 pass
2337 with formatter.maybereopen(basefm, filename) as fm:
2343 with formatter.maybereopen(basefm, filename) as fm:
2338 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2344 _updatecatformatter(fm, ctx, matcher, path, opts.get('decode'))
2339
2345
2340 # Automation often uses hg cat on single files, so special case it
2346 # Automation often uses hg cat on single files, so special case it
2341 # for performance to avoid the cost of parsing the manifest.
2347 # for performance to avoid the cost of parsing the manifest.
2342 if len(matcher.files()) == 1 and not matcher.anypats():
2348 if len(matcher.files()) == 1 and not matcher.anypats():
2343 file = matcher.files()[0]
2349 file = matcher.files()[0]
2344 mfl = repo.manifestlog
2350 mfl = repo.manifestlog
2345 mfnode = ctx.manifestnode()
2351 mfnode = ctx.manifestnode()
2346 try:
2352 try:
2347 if mfnode and mfl[mfnode].find(file)[0]:
2353 if mfnode and mfl[mfnode].find(file)[0]:
2348 if _catfmtneedsdata(basefm):
2354 if _catfmtneedsdata(basefm):
2349 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2355 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2350 write(file)
2356 write(file)
2351 return 0
2357 return 0
2352 except KeyError:
2358 except KeyError:
2353 pass
2359 pass
2354
2360
2355 if _catfmtneedsdata(basefm):
2361 if _catfmtneedsdata(basefm):
2356 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2362 scmutil.prefetchfiles(repo, [ctx.rev()], matcher)
2357
2363
2358 for abs in ctx.walk(matcher):
2364 for abs in ctx.walk(matcher):
2359 write(abs)
2365 write(abs)
2360 err = 0
2366 err = 0
2361
2367
2362 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2368 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2363 for subpath in sorted(ctx.substate):
2369 for subpath in sorted(ctx.substate):
2364 sub = ctx.sub(subpath)
2370 sub = ctx.sub(subpath)
2365 try:
2371 try:
2366 submatch = matchmod.subdirmatcher(subpath, matcher)
2372 submatch = matchmod.subdirmatcher(subpath, matcher)
2367 subprefix = os.path.join(prefix, subpath)
2373 subprefix = os.path.join(prefix, subpath)
2368 if not sub.cat(submatch, basefm, fntemplate, subprefix,
2374 if not sub.cat(submatch, basefm, fntemplate, subprefix,
2369 **pycompat.strkwargs(opts)):
2375 **pycompat.strkwargs(opts)):
2370 err = 0
2376 err = 0
2371 except error.RepoLookupError:
2377 except error.RepoLookupError:
2372 ui.status(_("skipping missing subrepository: %s\n") %
2378 ui.status(_("skipping missing subrepository: %s\n") %
2373 uipathfn(subpath))
2379 uipathfn(subpath))
2374
2380
2375 return err
2381 return err
2376
2382
2377 def commit(ui, repo, commitfunc, pats, opts):
2383 def commit(ui, repo, commitfunc, pats, opts):
2378 '''commit the specified files or all outstanding changes'''
2384 '''commit the specified files or all outstanding changes'''
2379 date = opts.get('date')
2385 date = opts.get('date')
2380 if date:
2386 if date:
2381 opts['date'] = dateutil.parsedate(date)
2387 opts['date'] = dateutil.parsedate(date)
2382 message = logmessage(ui, opts)
2388 message = logmessage(ui, opts)
2383 matcher = scmutil.match(repo[None], pats, opts)
2389 matcher = scmutil.match(repo[None], pats, opts)
2384
2390
2385 dsguard = None
2391 dsguard = None
2386 # extract addremove carefully -- this function can be called from a command
2392 # extract addremove carefully -- this function can be called from a command
2387 # that doesn't support addremove
2393 # that doesn't support addremove
2388 if opts.get('addremove'):
2394 if opts.get('addremove'):
2389 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2395 dsguard = dirstateguard.dirstateguard(repo, 'commit')
2390 with dsguard or util.nullcontextmanager():
2396 with dsguard or util.nullcontextmanager():
2391 if dsguard:
2397 if dsguard:
2392 relative = scmutil.anypats(pats, opts)
2398 relative = scmutil.anypats(pats, opts)
2393 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2399 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2394 if scmutil.addremove(repo, matcher, "", uipathfn, opts) != 0:
2400 if scmutil.addremove(repo, matcher, "", uipathfn, opts) != 0:
2395 raise error.Abort(
2401 raise error.Abort(
2396 _("failed to mark all new/missing files as added/removed"))
2402 _("failed to mark all new/missing files as added/removed"))
2397
2403
2398 return commitfunc(ui, repo, message, matcher, opts)
2404 return commitfunc(ui, repo, message, matcher, opts)
2399
2405
2400 def samefile(f, ctx1, ctx2):
2406 def samefile(f, ctx1, ctx2):
2401 if f in ctx1.manifest():
2407 if f in ctx1.manifest():
2402 a = ctx1.filectx(f)
2408 a = ctx1.filectx(f)
2403 if f in ctx2.manifest():
2409 if f in ctx2.manifest():
2404 b = ctx2.filectx(f)
2410 b = ctx2.filectx(f)
2405 return (not a.cmp(b)
2411 return (not a.cmp(b)
2406 and a.flags() == b.flags())
2412 and a.flags() == b.flags())
2407 else:
2413 else:
2408 return False
2414 return False
2409 else:
2415 else:
2410 return f not in ctx2.manifest()
2416 return f not in ctx2.manifest()
2411
2417
2412 def amend(ui, repo, old, extra, pats, opts):
2418 def amend(ui, repo, old, extra, pats, opts):
2413 # avoid cycle context -> subrepo -> cmdutil
2419 # avoid cycle context -> subrepo -> cmdutil
2414 from . import context
2420 from . import context
2415
2421
2416 # amend will reuse the existing user if not specified, but the obsolete
2422 # amend will reuse the existing user if not specified, but the obsolete
2417 # marker creation requires that the current user's name is specified.
2423 # marker creation requires that the current user's name is specified.
2418 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2424 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2419 ui.username() # raise exception if username not set
2425 ui.username() # raise exception if username not set
2420
2426
2421 ui.note(_('amending changeset %s\n') % old)
2427 ui.note(_('amending changeset %s\n') % old)
2422 base = old.p1()
2428 base = old.p1()
2423
2429
2424 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2430 with repo.wlock(), repo.lock(), repo.transaction('amend'):
2425 # Participating changesets:
2431 # Participating changesets:
2426 #
2432 #
2427 # wctx o - workingctx that contains changes from working copy
2433 # wctx o - workingctx that contains changes from working copy
2428 # | to go into amending commit
2434 # | to go into amending commit
2429 # |
2435 # |
2430 # old o - changeset to amend
2436 # old o - changeset to amend
2431 # |
2437 # |
2432 # base o - first parent of the changeset to amend
2438 # base o - first parent of the changeset to amend
2433 wctx = repo[None]
2439 wctx = repo[None]
2434
2440
2435 # Copy to avoid mutating input
2441 # Copy to avoid mutating input
2436 extra = extra.copy()
2442 extra = extra.copy()
2437 # Update extra dict from amended commit (e.g. to preserve graft
2443 # Update extra dict from amended commit (e.g. to preserve graft
2438 # source)
2444 # source)
2439 extra.update(old.extra())
2445 extra.update(old.extra())
2440
2446
2441 # Also update it from the from the wctx
2447 # Also update it from the from the wctx
2442 extra.update(wctx.extra())
2448 extra.update(wctx.extra())
2443
2449
2444 user = opts.get('user') or old.user()
2450 user = opts.get('user') or old.user()
2445
2451
2446 datemaydiffer = False # date-only change should be ignored?
2452 datemaydiffer = False # date-only change should be ignored?
2447 if opts.get('date') and opts.get('currentdate'):
2453 if opts.get('date') and opts.get('currentdate'):
2448 raise error.Abort(_('--date and --currentdate are mutually '
2454 raise error.Abort(_('--date and --currentdate are mutually '
2449 'exclusive'))
2455 'exclusive'))
2450 if opts.get('date'):
2456 if opts.get('date'):
2451 date = dateutil.parsedate(opts.get('date'))
2457 date = dateutil.parsedate(opts.get('date'))
2452 elif opts.get('currentdate'):
2458 elif opts.get('currentdate'):
2453 date = dateutil.makedate()
2459 date = dateutil.makedate()
2454 elif (ui.configbool('rewrite', 'update-timestamp')
2460 elif (ui.configbool('rewrite', 'update-timestamp')
2455 and opts.get('currentdate') is None):
2461 and opts.get('currentdate') is None):
2456 date = dateutil.makedate()
2462 date = dateutil.makedate()
2457 datemaydiffer = True
2463 datemaydiffer = True
2458 else:
2464 else:
2459 date = old.date()
2465 date = old.date()
2460
2466
2461 if len(old.parents()) > 1:
2467 if len(old.parents()) > 1:
2462 # ctx.files() isn't reliable for merges, so fall back to the
2468 # ctx.files() isn't reliable for merges, so fall back to the
2463 # slower repo.status() method
2469 # slower repo.status() method
2464 files = {fn for st in base.status(old)[:3] for fn in st}
2470 files = {fn for st in base.status(old)[:3] for fn in st}
2465 else:
2471 else:
2466 files = set(old.files())
2472 files = set(old.files())
2467
2473
2468 # add/remove the files to the working copy if the "addremove" option
2474 # add/remove the files to the working copy if the "addremove" option
2469 # was specified.
2475 # was specified.
2470 matcher = scmutil.match(wctx, pats, opts)
2476 matcher = scmutil.match(wctx, pats, opts)
2471 relative = scmutil.anypats(pats, opts)
2477 relative = scmutil.anypats(pats, opts)
2472 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2478 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2473 if (opts.get('addremove')
2479 if (opts.get('addremove')
2474 and scmutil.addremove(repo, matcher, "", uipathfn, opts)):
2480 and scmutil.addremove(repo, matcher, "", uipathfn, opts)):
2475 raise error.Abort(
2481 raise error.Abort(
2476 _("failed to mark all new/missing files as added/removed"))
2482 _("failed to mark all new/missing files as added/removed"))
2477
2483
2478 # Check subrepos. This depends on in-place wctx._status update in
2484 # Check subrepos. This depends on in-place wctx._status update in
2479 # subrepo.precommit(). To minimize the risk of this hack, we do
2485 # subrepo.precommit(). To minimize the risk of this hack, we do
2480 # nothing if .hgsub does not exist.
2486 # nothing if .hgsub does not exist.
2481 if '.hgsub' in wctx or '.hgsub' in old:
2487 if '.hgsub' in wctx or '.hgsub' in old:
2482 subs, commitsubs, newsubstate = subrepoutil.precommit(
2488 subs, commitsubs, newsubstate = subrepoutil.precommit(
2483 ui, wctx, wctx._status, matcher)
2489 ui, wctx, wctx._status, matcher)
2484 # amend should abort if commitsubrepos is enabled
2490 # amend should abort if commitsubrepos is enabled
2485 assert not commitsubs
2491 assert not commitsubs
2486 if subs:
2492 if subs:
2487 subrepoutil.writestate(repo, newsubstate)
2493 subrepoutil.writestate(repo, newsubstate)
2488
2494
2489 ms = mergemod.mergestate.read(repo)
2495 ms = mergemod.mergestate.read(repo)
2490 mergeutil.checkunresolved(ms)
2496 mergeutil.checkunresolved(ms)
2491
2497
2492 filestoamend = set(f for f in wctx.files() if matcher(f))
2498 filestoamend = set(f for f in wctx.files() if matcher(f))
2493
2499
2494 changes = (len(filestoamend) > 0)
2500 changes = (len(filestoamend) > 0)
2495 if changes:
2501 if changes:
2496 # Recompute copies (avoid recording a -> b -> a)
2502 # Recompute copies (avoid recording a -> b -> a)
2497 copied = copies.pathcopies(base, wctx, matcher)
2503 copied = copies.pathcopies(base, wctx, matcher)
2498 if old.p2:
2504 if old.p2:
2499 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2505 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2500
2506
2501 # Prune files which were reverted by the updates: if old
2507 # Prune files which were reverted by the updates: if old
2502 # introduced file X and the file was renamed in the working
2508 # introduced file X and the file was renamed in the working
2503 # copy, then those two files are the same and
2509 # copy, then those two files are the same and
2504 # we can discard X from our list of files. Likewise if X
2510 # we can discard X from our list of files. Likewise if X
2505 # was removed, it's no longer relevant. If X is missing (aka
2511 # was removed, it's no longer relevant. If X is missing (aka
2506 # deleted), old X must be preserved.
2512 # deleted), old X must be preserved.
2507 files.update(filestoamend)
2513 files.update(filestoamend)
2508 files = [f for f in files if (f not in filestoamend
2514 files = [f for f in files if (f not in filestoamend
2509 or not samefile(f, wctx, base))]
2515 or not samefile(f, wctx, base))]
2510
2516
2511 def filectxfn(repo, ctx_, path):
2517 def filectxfn(repo, ctx_, path):
2512 try:
2518 try:
2513 # If the file being considered is not amongst the files
2519 # If the file being considered is not amongst the files
2514 # to be amended, we should return the file context from the
2520 # to be amended, we should return the file context from the
2515 # old changeset. This avoids issues when only some files in
2521 # old changeset. This avoids issues when only some files in
2516 # the working copy are being amended but there are also
2522 # the working copy are being amended but there are also
2517 # changes to other files from the old changeset.
2523 # changes to other files from the old changeset.
2518 if path not in filestoamend:
2524 if path not in filestoamend:
2519 return old.filectx(path)
2525 return old.filectx(path)
2520
2526
2521 # Return None for removed files.
2527 # Return None for removed files.
2522 if path in wctx.removed():
2528 if path in wctx.removed():
2523 return None
2529 return None
2524
2530
2525 fctx = wctx[path]
2531 fctx = wctx[path]
2526 flags = fctx.flags()
2532 flags = fctx.flags()
2527 mctx = context.memfilectx(repo, ctx_,
2533 mctx = context.memfilectx(repo, ctx_,
2528 fctx.path(), fctx.data(),
2534 fctx.path(), fctx.data(),
2529 islink='l' in flags,
2535 islink='l' in flags,
2530 isexec='x' in flags,
2536 isexec='x' in flags,
2531 copysource=copied.get(path))
2537 copysource=copied.get(path))
2532 return mctx
2538 return mctx
2533 except KeyError:
2539 except KeyError:
2534 return None
2540 return None
2535 else:
2541 else:
2536 ui.note(_('copying changeset %s to %s\n') % (old, base))
2542 ui.note(_('copying changeset %s to %s\n') % (old, base))
2537
2543
2538 # Use version of files as in the old cset
2544 # Use version of files as in the old cset
2539 def filectxfn(repo, ctx_, path):
2545 def filectxfn(repo, ctx_, path):
2540 try:
2546 try:
2541 return old.filectx(path)
2547 return old.filectx(path)
2542 except KeyError:
2548 except KeyError:
2543 return None
2549 return None
2544
2550
2545 # See if we got a message from -m or -l, if not, open the editor with
2551 # See if we got a message from -m or -l, if not, open the editor with
2546 # the message of the changeset to amend.
2552 # the message of the changeset to amend.
2547 message = logmessage(ui, opts)
2553 message = logmessage(ui, opts)
2548
2554
2549 editform = mergeeditform(old, 'commit.amend')
2555 editform = mergeeditform(old, 'commit.amend')
2550
2556
2551 if not message:
2557 if not message:
2552 message = old.description()
2558 message = old.description()
2553 # Default if message isn't provided and --edit is not passed is to
2559 # Default if message isn't provided and --edit is not passed is to
2554 # invoke editor, but allow --no-edit. If somehow we don't have any
2560 # invoke editor, but allow --no-edit. If somehow we don't have any
2555 # description, let's always start the editor.
2561 # description, let's always start the editor.
2556 doedit = not message or opts.get('edit') in [True, None]
2562 doedit = not message or opts.get('edit') in [True, None]
2557 else:
2563 else:
2558 # Default if message is provided is to not invoke editor, but allow
2564 # Default if message is provided is to not invoke editor, but allow
2559 # --edit.
2565 # --edit.
2560 doedit = opts.get('edit') is True
2566 doedit = opts.get('edit') is True
2561 editor = getcommiteditor(edit=doedit, editform=editform)
2567 editor = getcommiteditor(edit=doedit, editform=editform)
2562
2568
2563 pureextra = extra.copy()
2569 pureextra = extra.copy()
2564 extra['amend_source'] = old.hex()
2570 extra['amend_source'] = old.hex()
2565
2571
2566 new = context.memctx(repo,
2572 new = context.memctx(repo,
2567 parents=[base.node(), old.p2().node()],
2573 parents=[base.node(), old.p2().node()],
2568 text=message,
2574 text=message,
2569 files=files,
2575 files=files,
2570 filectxfn=filectxfn,
2576 filectxfn=filectxfn,
2571 user=user,
2577 user=user,
2572 date=date,
2578 date=date,
2573 extra=extra,
2579 extra=extra,
2574 editor=editor)
2580 editor=editor)
2575
2581
2576 newdesc = changelog.stripdesc(new.description())
2582 newdesc = changelog.stripdesc(new.description())
2577 if ((not changes)
2583 if ((not changes)
2578 and newdesc == old.description()
2584 and newdesc == old.description()
2579 and user == old.user()
2585 and user == old.user()
2580 and (date == old.date() or datemaydiffer)
2586 and (date == old.date() or datemaydiffer)
2581 and pureextra == old.extra()):
2587 and pureextra == old.extra()):
2582 # nothing changed. continuing here would create a new node
2588 # nothing changed. continuing here would create a new node
2583 # anyway because of the amend_source noise.
2589 # anyway because of the amend_source noise.
2584 #
2590 #
2585 # This not what we expect from amend.
2591 # This not what we expect from amend.
2586 return old.node()
2592 return old.node()
2587
2593
2588 commitphase = None
2594 commitphase = None
2589 if opts.get('secret'):
2595 if opts.get('secret'):
2590 commitphase = phases.secret
2596 commitphase = phases.secret
2591 newid = repo.commitctx(new)
2597 newid = repo.commitctx(new)
2592
2598
2593 # Reroute the working copy parent to the new changeset
2599 # Reroute the working copy parent to the new changeset
2594 repo.setparents(newid, nullid)
2600 repo.setparents(newid, nullid)
2595 mapping = {old.node(): (newid,)}
2601 mapping = {old.node(): (newid,)}
2596 obsmetadata = None
2602 obsmetadata = None
2597 if opts.get('note'):
2603 if opts.get('note'):
2598 obsmetadata = {'note': encoding.fromlocal(opts['note'])}
2604 obsmetadata = {'note': encoding.fromlocal(opts['note'])}
2599 backup = ui.configbool('rewrite', 'backup-bundle')
2605 backup = ui.configbool('rewrite', 'backup-bundle')
2600 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2606 scmutil.cleanupnodes(repo, mapping, 'amend', metadata=obsmetadata,
2601 fixphase=True, targetphase=commitphase,
2607 fixphase=True, targetphase=commitphase,
2602 backup=backup)
2608 backup=backup)
2603
2609
2604 # Fixing the dirstate because localrepo.commitctx does not update
2610 # Fixing the dirstate because localrepo.commitctx does not update
2605 # it. This is rather convenient because we did not need to update
2611 # it. This is rather convenient because we did not need to update
2606 # the dirstate for all the files in the new commit which commitctx
2612 # the dirstate for all the files in the new commit which commitctx
2607 # could have done if it updated the dirstate. Now, we can
2613 # could have done if it updated the dirstate. Now, we can
2608 # selectively update the dirstate only for the amended files.
2614 # selectively update the dirstate only for the amended files.
2609 dirstate = repo.dirstate
2615 dirstate = repo.dirstate
2610
2616
2611 # Update the state of the files which were added and
2617 # Update the state of the files which were added and
2612 # and modified in the amend to "normal" in the dirstate.
2618 # and modified in the amend to "normal" in the dirstate.
2613 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2619 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
2614 for f in normalfiles:
2620 for f in normalfiles:
2615 dirstate.normal(f)
2621 dirstate.normal(f)
2616
2622
2617 # Update the state of files which were removed in the amend
2623 # Update the state of files which were removed in the amend
2618 # to "removed" in the dirstate.
2624 # to "removed" in the dirstate.
2619 removedfiles = set(wctx.removed()) & filestoamend
2625 removedfiles = set(wctx.removed()) & filestoamend
2620 for f in removedfiles:
2626 for f in removedfiles:
2621 dirstate.drop(f)
2627 dirstate.drop(f)
2622
2628
2623 return newid
2629 return newid
2624
2630
2625 def commiteditor(repo, ctx, subs, editform=''):
2631 def commiteditor(repo, ctx, subs, editform=''):
2626 if ctx.description():
2632 if ctx.description():
2627 return ctx.description()
2633 return ctx.description()
2628 return commitforceeditor(repo, ctx, subs, editform=editform,
2634 return commitforceeditor(repo, ctx, subs, editform=editform,
2629 unchangedmessagedetection=True)
2635 unchangedmessagedetection=True)
2630
2636
2631 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2637 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2632 editform='', unchangedmessagedetection=False):
2638 editform='', unchangedmessagedetection=False):
2633 if not extramsg:
2639 if not extramsg:
2634 extramsg = _("Leave message empty to abort commit.")
2640 extramsg = _("Leave message empty to abort commit.")
2635
2641
2636 forms = [e for e in editform.split('.') if e]
2642 forms = [e for e in editform.split('.') if e]
2637 forms.insert(0, 'changeset')
2643 forms.insert(0, 'changeset')
2638 templatetext = None
2644 templatetext = None
2639 while forms:
2645 while forms:
2640 ref = '.'.join(forms)
2646 ref = '.'.join(forms)
2641 if repo.ui.config('committemplate', ref):
2647 if repo.ui.config('committemplate', ref):
2642 templatetext = committext = buildcommittemplate(
2648 templatetext = committext = buildcommittemplate(
2643 repo, ctx, subs, extramsg, ref)
2649 repo, ctx, subs, extramsg, ref)
2644 break
2650 break
2645 forms.pop()
2651 forms.pop()
2646 else:
2652 else:
2647 committext = buildcommittext(repo, ctx, subs, extramsg)
2653 committext = buildcommittext(repo, ctx, subs, extramsg)
2648
2654
2649 # run editor in the repository root
2655 # run editor in the repository root
2650 olddir = encoding.getcwd()
2656 olddir = encoding.getcwd()
2651 os.chdir(repo.root)
2657 os.chdir(repo.root)
2652
2658
2653 # make in-memory changes visible to external process
2659 # make in-memory changes visible to external process
2654 tr = repo.currenttransaction()
2660 tr = repo.currenttransaction()
2655 repo.dirstate.write(tr)
2661 repo.dirstate.write(tr)
2656 pending = tr and tr.writepending() and repo.root
2662 pending = tr and tr.writepending() and repo.root
2657
2663
2658 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2664 editortext = repo.ui.edit(committext, ctx.user(), ctx.extra(),
2659 editform=editform, pending=pending,
2665 editform=editform, pending=pending,
2660 repopath=repo.path, action='commit')
2666 repopath=repo.path, action='commit')
2661 text = editortext
2667 text = editortext
2662
2668
2663 # strip away anything below this special string (used for editors that want
2669 # strip away anything below this special string (used for editors that want
2664 # to display the diff)
2670 # to display the diff)
2665 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2671 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
2666 if stripbelow:
2672 if stripbelow:
2667 text = text[:stripbelow.start()]
2673 text = text[:stripbelow.start()]
2668
2674
2669 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2675 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2670 os.chdir(olddir)
2676 os.chdir(olddir)
2671
2677
2672 if finishdesc:
2678 if finishdesc:
2673 text = finishdesc(text)
2679 text = finishdesc(text)
2674 if not text.strip():
2680 if not text.strip():
2675 raise error.Abort(_("empty commit message"))
2681 raise error.Abort(_("empty commit message"))
2676 if unchangedmessagedetection and editortext == templatetext:
2682 if unchangedmessagedetection and editortext == templatetext:
2677 raise error.Abort(_("commit message unchanged"))
2683 raise error.Abort(_("commit message unchanged"))
2678
2684
2679 return text
2685 return text
2680
2686
2681 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2687 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
2682 ui = repo.ui
2688 ui = repo.ui
2683 spec = formatter.templatespec(ref, None, None)
2689 spec = formatter.templatespec(ref, None, None)
2684 t = logcmdutil.changesettemplater(ui, repo, spec)
2690 t = logcmdutil.changesettemplater(ui, repo, spec)
2685 t.t.cache.update((k, templater.unquotestring(v))
2691 t.t.cache.update((k, templater.unquotestring(v))
2686 for k, v in repo.ui.configitems('committemplate'))
2692 for k, v in repo.ui.configitems('committemplate'))
2687
2693
2688 if not extramsg:
2694 if not extramsg:
2689 extramsg = '' # ensure that extramsg is string
2695 extramsg = '' # ensure that extramsg is string
2690
2696
2691 ui.pushbuffer()
2697 ui.pushbuffer()
2692 t.show(ctx, extramsg=extramsg)
2698 t.show(ctx, extramsg=extramsg)
2693 return ui.popbuffer()
2699 return ui.popbuffer()
2694
2700
2695 def hgprefix(msg):
2701 def hgprefix(msg):
2696 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2702 return "\n".join(["HG: %s" % a for a in msg.split("\n") if a])
2697
2703
2698 def buildcommittext(repo, ctx, subs, extramsg):
2704 def buildcommittext(repo, ctx, subs, extramsg):
2699 edittext = []
2705 edittext = []
2700 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2706 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2701 if ctx.description():
2707 if ctx.description():
2702 edittext.append(ctx.description())
2708 edittext.append(ctx.description())
2703 edittext.append("")
2709 edittext.append("")
2704 edittext.append("") # Empty line between message and comments.
2710 edittext.append("") # Empty line between message and comments.
2705 edittext.append(hgprefix(_("Enter commit message."
2711 edittext.append(hgprefix(_("Enter commit message."
2706 " Lines beginning with 'HG:' are removed.")))
2712 " Lines beginning with 'HG:' are removed.")))
2707 edittext.append(hgprefix(extramsg))
2713 edittext.append(hgprefix(extramsg))
2708 edittext.append("HG: --")
2714 edittext.append("HG: --")
2709 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2715 edittext.append(hgprefix(_("user: %s") % ctx.user()))
2710 if ctx.p2():
2716 if ctx.p2():
2711 edittext.append(hgprefix(_("branch merge")))
2717 edittext.append(hgprefix(_("branch merge")))
2712 if ctx.branch():
2718 if ctx.branch():
2713 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2719 edittext.append(hgprefix(_("branch '%s'") % ctx.branch()))
2714 if bookmarks.isactivewdirparent(repo):
2720 if bookmarks.isactivewdirparent(repo):
2715 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2721 edittext.append(hgprefix(_("bookmark '%s'") % repo._activebookmark))
2716 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2722 edittext.extend([hgprefix(_("subrepo %s") % s) for s in subs])
2717 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2723 edittext.extend([hgprefix(_("added %s") % f) for f in added])
2718 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2724 edittext.extend([hgprefix(_("changed %s") % f) for f in modified])
2719 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2725 edittext.extend([hgprefix(_("removed %s") % f) for f in removed])
2720 if not added and not modified and not removed:
2726 if not added and not modified and not removed:
2721 edittext.append(hgprefix(_("no files changed")))
2727 edittext.append(hgprefix(_("no files changed")))
2722 edittext.append("")
2728 edittext.append("")
2723
2729
2724 return "\n".join(edittext)
2730 return "\n".join(edittext)
2725
2731
2726 def commitstatus(repo, node, branch, bheads=None, opts=None):
2732 def commitstatus(repo, node, branch, bheads=None, opts=None):
2727 if opts is None:
2733 if opts is None:
2728 opts = {}
2734 opts = {}
2729 ctx = repo[node]
2735 ctx = repo[node]
2730 parents = ctx.parents()
2736 parents = ctx.parents()
2731
2737
2732 if (not opts.get('amend') and bheads and node not in bheads and not
2738 if (not opts.get('amend') and bheads and node not in bheads and not
2733 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2739 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2734 repo.ui.status(_('created new head\n'))
2740 repo.ui.status(_('created new head\n'))
2735 # The message is not printed for initial roots. For the other
2741 # The message is not printed for initial roots. For the other
2736 # changesets, it is printed in the following situations:
2742 # changesets, it is printed in the following situations:
2737 #
2743 #
2738 # Par column: for the 2 parents with ...
2744 # Par column: for the 2 parents with ...
2739 # N: null or no parent
2745 # N: null or no parent
2740 # B: parent is on another named branch
2746 # B: parent is on another named branch
2741 # C: parent is a regular non head changeset
2747 # C: parent is a regular non head changeset
2742 # H: parent was a branch head of the current branch
2748 # H: parent was a branch head of the current branch
2743 # Msg column: whether we print "created new head" message
2749 # Msg column: whether we print "created new head" message
2744 # In the following, it is assumed that there already exists some
2750 # In the following, it is assumed that there already exists some
2745 # initial branch heads of the current branch, otherwise nothing is
2751 # initial branch heads of the current branch, otherwise nothing is
2746 # printed anyway.
2752 # printed anyway.
2747 #
2753 #
2748 # Par Msg Comment
2754 # Par Msg Comment
2749 # N N y additional topo root
2755 # N N y additional topo root
2750 #
2756 #
2751 # B N y additional branch root
2757 # B N y additional branch root
2752 # C N y additional topo head
2758 # C N y additional topo head
2753 # H N n usual case
2759 # H N n usual case
2754 #
2760 #
2755 # B B y weird additional branch root
2761 # B B y weird additional branch root
2756 # C B y branch merge
2762 # C B y branch merge
2757 # H B n merge with named branch
2763 # H B n merge with named branch
2758 #
2764 #
2759 # C C y additional head from merge
2765 # C C y additional head from merge
2760 # C H n merge with a head
2766 # C H n merge with a head
2761 #
2767 #
2762 # H H n head merge: head count decreases
2768 # H H n head merge: head count decreases
2763
2769
2764 if not opts.get('close_branch'):
2770 if not opts.get('close_branch'):
2765 for r in parents:
2771 for r in parents:
2766 if r.closesbranch() and r.branch() == branch:
2772 if r.closesbranch() and r.branch() == branch:
2767 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2773 repo.ui.status(_('reopening closed branch head %d\n') % r.rev())
2768
2774
2769 if repo.ui.debugflag:
2775 if repo.ui.debugflag:
2770 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2776 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx.hex()))
2771 elif repo.ui.verbose:
2777 elif repo.ui.verbose:
2772 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2778 repo.ui.write(_('committed changeset %d:%s\n') % (ctx.rev(), ctx))
2773
2779
2774 def postcommitstatus(repo, pats, opts):
2780 def postcommitstatus(repo, pats, opts):
2775 return repo.status(match=scmutil.match(repo[None], pats, opts))
2781 return repo.status(match=scmutil.match(repo[None], pats, opts))
2776
2782
2777 def revert(ui, repo, ctx, parents, *pats, **opts):
2783 def revert(ui, repo, ctx, parents, *pats, **opts):
2778 opts = pycompat.byteskwargs(opts)
2784 opts = pycompat.byteskwargs(opts)
2779 parent, p2 = parents
2785 parent, p2 = parents
2780 node = ctx.node()
2786 node = ctx.node()
2781
2787
2782 mf = ctx.manifest()
2788 mf = ctx.manifest()
2783 if node == p2:
2789 if node == p2:
2784 parent = p2
2790 parent = p2
2785
2791
2786 # need all matching names in dirstate and manifest of target rev,
2792 # need all matching names in dirstate and manifest of target rev,
2787 # so have to walk both. do not print errors if files exist in one
2793 # so have to walk both. do not print errors if files exist in one
2788 # but not other. in both cases, filesets should be evaluated against
2794 # but not other. in both cases, filesets should be evaluated against
2789 # workingctx to get consistent result (issue4497). this means 'set:**'
2795 # workingctx to get consistent result (issue4497). this means 'set:**'
2790 # cannot be used to select missing files from target rev.
2796 # cannot be used to select missing files from target rev.
2791
2797
2792 # `names` is a mapping for all elements in working copy and target revision
2798 # `names` is a mapping for all elements in working copy and target revision
2793 # The mapping is in the form:
2799 # The mapping is in the form:
2794 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2800 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2795 names = {}
2801 names = {}
2796 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2802 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2797
2803
2798 with repo.wlock():
2804 with repo.wlock():
2799 ## filling of the `names` mapping
2805 ## filling of the `names` mapping
2800 # walk dirstate to fill `names`
2806 # walk dirstate to fill `names`
2801
2807
2802 interactive = opts.get('interactive', False)
2808 interactive = opts.get('interactive', False)
2803 wctx = repo[None]
2809 wctx = repo[None]
2804 m = scmutil.match(wctx, pats, opts)
2810 m = scmutil.match(wctx, pats, opts)
2805
2811
2806 # we'll need this later
2812 # we'll need this later
2807 targetsubs = sorted(s for s in wctx.substate if m(s))
2813 targetsubs = sorted(s for s in wctx.substate if m(s))
2808
2814
2809 if not m.always():
2815 if not m.always():
2810 matcher = matchmod.badmatch(m, lambda x, y: False)
2816 matcher = matchmod.badmatch(m, lambda x, y: False)
2811 for abs in wctx.walk(matcher):
2817 for abs in wctx.walk(matcher):
2812 names[abs] = m.exact(abs)
2818 names[abs] = m.exact(abs)
2813
2819
2814 # walk target manifest to fill `names`
2820 # walk target manifest to fill `names`
2815
2821
2816 def badfn(path, msg):
2822 def badfn(path, msg):
2817 if path in names:
2823 if path in names:
2818 return
2824 return
2819 if path in ctx.substate:
2825 if path in ctx.substate:
2820 return
2826 return
2821 path_ = path + '/'
2827 path_ = path + '/'
2822 for f in names:
2828 for f in names:
2823 if f.startswith(path_):
2829 if f.startswith(path_):
2824 return
2830 return
2825 ui.warn("%s: %s\n" % (uipathfn(path), msg))
2831 ui.warn("%s: %s\n" % (uipathfn(path), msg))
2826
2832
2827 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2833 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
2828 if abs not in names:
2834 if abs not in names:
2829 names[abs] = m.exact(abs)
2835 names[abs] = m.exact(abs)
2830
2836
2831 # Find status of all file in `names`.
2837 # Find status of all file in `names`.
2832 m = scmutil.matchfiles(repo, names)
2838 m = scmutil.matchfiles(repo, names)
2833
2839
2834 changes = repo.status(node1=node, match=m,
2840 changes = repo.status(node1=node, match=m,
2835 unknown=True, ignored=True, clean=True)
2841 unknown=True, ignored=True, clean=True)
2836 else:
2842 else:
2837 changes = repo.status(node1=node, match=m)
2843 changes = repo.status(node1=node, match=m)
2838 for kind in changes:
2844 for kind in changes:
2839 for abs in kind:
2845 for abs in kind:
2840 names[abs] = m.exact(abs)
2846 names[abs] = m.exact(abs)
2841
2847
2842 m = scmutil.matchfiles(repo, names)
2848 m = scmutil.matchfiles(repo, names)
2843
2849
2844 modified = set(changes.modified)
2850 modified = set(changes.modified)
2845 added = set(changes.added)
2851 added = set(changes.added)
2846 removed = set(changes.removed)
2852 removed = set(changes.removed)
2847 _deleted = set(changes.deleted)
2853 _deleted = set(changes.deleted)
2848 unknown = set(changes.unknown)
2854 unknown = set(changes.unknown)
2849 unknown.update(changes.ignored)
2855 unknown.update(changes.ignored)
2850 clean = set(changes.clean)
2856 clean = set(changes.clean)
2851 modadded = set()
2857 modadded = set()
2852
2858
2853 # We need to account for the state of the file in the dirstate,
2859 # We need to account for the state of the file in the dirstate,
2854 # even when we revert against something else than parent. This will
2860 # even when we revert against something else than parent. This will
2855 # slightly alter the behavior of revert (doing back up or not, delete
2861 # slightly alter the behavior of revert (doing back up or not, delete
2856 # or just forget etc).
2862 # or just forget etc).
2857 if parent == node:
2863 if parent == node:
2858 dsmodified = modified
2864 dsmodified = modified
2859 dsadded = added
2865 dsadded = added
2860 dsremoved = removed
2866 dsremoved = removed
2861 # store all local modifications, useful later for rename detection
2867 # store all local modifications, useful later for rename detection
2862 localchanges = dsmodified | dsadded
2868 localchanges = dsmodified | dsadded
2863 modified, added, removed = set(), set(), set()
2869 modified, added, removed = set(), set(), set()
2864 else:
2870 else:
2865 changes = repo.status(node1=parent, match=m)
2871 changes = repo.status(node1=parent, match=m)
2866 dsmodified = set(changes.modified)
2872 dsmodified = set(changes.modified)
2867 dsadded = set(changes.added)
2873 dsadded = set(changes.added)
2868 dsremoved = set(changes.removed)
2874 dsremoved = set(changes.removed)
2869 # store all local modifications, useful later for rename detection
2875 # store all local modifications, useful later for rename detection
2870 localchanges = dsmodified | dsadded
2876 localchanges = dsmodified | dsadded
2871
2877
2872 # only take into account for removes between wc and target
2878 # only take into account for removes between wc and target
2873 clean |= dsremoved - removed
2879 clean |= dsremoved - removed
2874 dsremoved &= removed
2880 dsremoved &= removed
2875 # distinct between dirstate remove and other
2881 # distinct between dirstate remove and other
2876 removed -= dsremoved
2882 removed -= dsremoved
2877
2883
2878 modadded = added & dsmodified
2884 modadded = added & dsmodified
2879 added -= modadded
2885 added -= modadded
2880
2886
2881 # tell newly modified apart.
2887 # tell newly modified apart.
2882 dsmodified &= modified
2888 dsmodified &= modified
2883 dsmodified |= modified & dsadded # dirstate added may need backup
2889 dsmodified |= modified & dsadded # dirstate added may need backup
2884 modified -= dsmodified
2890 modified -= dsmodified
2885
2891
2886 # We need to wait for some post-processing to update this set
2892 # We need to wait for some post-processing to update this set
2887 # before making the distinction. The dirstate will be used for
2893 # before making the distinction. The dirstate will be used for
2888 # that purpose.
2894 # that purpose.
2889 dsadded = added
2895 dsadded = added
2890
2896
2891 # in case of merge, files that are actually added can be reported as
2897 # in case of merge, files that are actually added can be reported as
2892 # modified, we need to post process the result
2898 # modified, we need to post process the result
2893 if p2 != nullid:
2899 if p2 != nullid:
2894 mergeadd = set(dsmodified)
2900 mergeadd = set(dsmodified)
2895 for path in dsmodified:
2901 for path in dsmodified:
2896 if path in mf:
2902 if path in mf:
2897 mergeadd.remove(path)
2903 mergeadd.remove(path)
2898 dsadded |= mergeadd
2904 dsadded |= mergeadd
2899 dsmodified -= mergeadd
2905 dsmodified -= mergeadd
2900
2906
2901 # if f is a rename, update `names` to also revert the source
2907 # if f is a rename, update `names` to also revert the source
2902 for f in localchanges:
2908 for f in localchanges:
2903 src = repo.dirstate.copied(f)
2909 src = repo.dirstate.copied(f)
2904 # XXX should we check for rename down to target node?
2910 # XXX should we check for rename down to target node?
2905 if src and src not in names and repo.dirstate[src] == 'r':
2911 if src and src not in names and repo.dirstate[src] == 'r':
2906 dsremoved.add(src)
2912 dsremoved.add(src)
2907 names[src] = True
2913 names[src] = True
2908
2914
2909 # determine the exact nature of the deleted changesets
2915 # determine the exact nature of the deleted changesets
2910 deladded = set(_deleted)
2916 deladded = set(_deleted)
2911 for path in _deleted:
2917 for path in _deleted:
2912 if path in mf:
2918 if path in mf:
2913 deladded.remove(path)
2919 deladded.remove(path)
2914 deleted = _deleted - deladded
2920 deleted = _deleted - deladded
2915
2921
2916 # distinguish between file to forget and the other
2922 # distinguish between file to forget and the other
2917 added = set()
2923 added = set()
2918 for abs in dsadded:
2924 for abs in dsadded:
2919 if repo.dirstate[abs] != 'a':
2925 if repo.dirstate[abs] != 'a':
2920 added.add(abs)
2926 added.add(abs)
2921 dsadded -= added
2927 dsadded -= added
2922
2928
2923 for abs in deladded:
2929 for abs in deladded:
2924 if repo.dirstate[abs] == 'a':
2930 if repo.dirstate[abs] == 'a':
2925 dsadded.add(abs)
2931 dsadded.add(abs)
2926 deladded -= dsadded
2932 deladded -= dsadded
2927
2933
2928 # For files marked as removed, we check if an unknown file is present at
2934 # For files marked as removed, we check if an unknown file is present at
2929 # the same path. If a such file exists it may need to be backed up.
2935 # the same path. If a such file exists it may need to be backed up.
2930 # Making the distinction at this stage helps have simpler backup
2936 # Making the distinction at this stage helps have simpler backup
2931 # logic.
2937 # logic.
2932 removunk = set()
2938 removunk = set()
2933 for abs in removed:
2939 for abs in removed:
2934 target = repo.wjoin(abs)
2940 target = repo.wjoin(abs)
2935 if os.path.lexists(target):
2941 if os.path.lexists(target):
2936 removunk.add(abs)
2942 removunk.add(abs)
2937 removed -= removunk
2943 removed -= removunk
2938
2944
2939 dsremovunk = set()
2945 dsremovunk = set()
2940 for abs in dsremoved:
2946 for abs in dsremoved:
2941 target = repo.wjoin(abs)
2947 target = repo.wjoin(abs)
2942 if os.path.lexists(target):
2948 if os.path.lexists(target):
2943 dsremovunk.add(abs)
2949 dsremovunk.add(abs)
2944 dsremoved -= dsremovunk
2950 dsremoved -= dsremovunk
2945
2951
2946 # action to be actually performed by revert
2952 # action to be actually performed by revert
2947 # (<list of file>, message>) tuple
2953 # (<list of file>, message>) tuple
2948 actions = {'revert': ([], _('reverting %s\n')),
2954 actions = {'revert': ([], _('reverting %s\n')),
2949 'add': ([], _('adding %s\n')),
2955 'add': ([], _('adding %s\n')),
2950 'remove': ([], _('removing %s\n')),
2956 'remove': ([], _('removing %s\n')),
2951 'drop': ([], _('removing %s\n')),
2957 'drop': ([], _('removing %s\n')),
2952 'forget': ([], _('forgetting %s\n')),
2958 'forget': ([], _('forgetting %s\n')),
2953 'undelete': ([], _('undeleting %s\n')),
2959 'undelete': ([], _('undeleting %s\n')),
2954 'noop': (None, _('no changes needed to %s\n')),
2960 'noop': (None, _('no changes needed to %s\n')),
2955 'unknown': (None, _('file not managed: %s\n')),
2961 'unknown': (None, _('file not managed: %s\n')),
2956 }
2962 }
2957
2963
2958 # "constant" that convey the backup strategy.
2964 # "constant" that convey the backup strategy.
2959 # All set to `discard` if `no-backup` is set do avoid checking
2965 # All set to `discard` if `no-backup` is set do avoid checking
2960 # no_backup lower in the code.
2966 # no_backup lower in the code.
2961 # These values are ordered for comparison purposes
2967 # These values are ordered for comparison purposes
2962 backupinteractive = 3 # do backup if interactively modified
2968 backupinteractive = 3 # do backup if interactively modified
2963 backup = 2 # unconditionally do backup
2969 backup = 2 # unconditionally do backup
2964 check = 1 # check if the existing file differs from target
2970 check = 1 # check if the existing file differs from target
2965 discard = 0 # never do backup
2971 discard = 0 # never do backup
2966 if opts.get('no_backup'):
2972 if opts.get('no_backup'):
2967 backupinteractive = backup = check = discard
2973 backupinteractive = backup = check = discard
2968 if interactive:
2974 if interactive:
2969 dsmodifiedbackup = backupinteractive
2975 dsmodifiedbackup = backupinteractive
2970 else:
2976 else:
2971 dsmodifiedbackup = backup
2977 dsmodifiedbackup = backup
2972 tobackup = set()
2978 tobackup = set()
2973
2979
2974 backupanddel = actions['remove']
2980 backupanddel = actions['remove']
2975 if not opts.get('no_backup'):
2981 if not opts.get('no_backup'):
2976 backupanddel = actions['drop']
2982 backupanddel = actions['drop']
2977
2983
2978 disptable = (
2984 disptable = (
2979 # dispatch table:
2985 # dispatch table:
2980 # file state
2986 # file state
2981 # action
2987 # action
2982 # make backup
2988 # make backup
2983
2989
2984 ## Sets that results that will change file on disk
2990 ## Sets that results that will change file on disk
2985 # Modified compared to target, no local change
2991 # Modified compared to target, no local change
2986 (modified, actions['revert'], discard),
2992 (modified, actions['revert'], discard),
2987 # Modified compared to target, but local file is deleted
2993 # Modified compared to target, but local file is deleted
2988 (deleted, actions['revert'], discard),
2994 (deleted, actions['revert'], discard),
2989 # Modified compared to target, local change
2995 # Modified compared to target, local change
2990 (dsmodified, actions['revert'], dsmodifiedbackup),
2996 (dsmodified, actions['revert'], dsmodifiedbackup),
2991 # Added since target
2997 # Added since target
2992 (added, actions['remove'], discard),
2998 (added, actions['remove'], discard),
2993 # Added in working directory
2999 # Added in working directory
2994 (dsadded, actions['forget'], discard),
3000 (dsadded, actions['forget'], discard),
2995 # Added since target, have local modification
3001 # Added since target, have local modification
2996 (modadded, backupanddel, backup),
3002 (modadded, backupanddel, backup),
2997 # Added since target but file is missing in working directory
3003 # Added since target but file is missing in working directory
2998 (deladded, actions['drop'], discard),
3004 (deladded, actions['drop'], discard),
2999 # Removed since target, before working copy parent
3005 # Removed since target, before working copy parent
3000 (removed, actions['add'], discard),
3006 (removed, actions['add'], discard),
3001 # Same as `removed` but an unknown file exists at the same path
3007 # Same as `removed` but an unknown file exists at the same path
3002 (removunk, actions['add'], check),
3008 (removunk, actions['add'], check),
3003 # Removed since targe, marked as such in working copy parent
3009 # Removed since targe, marked as such in working copy parent
3004 (dsremoved, actions['undelete'], discard),
3010 (dsremoved, actions['undelete'], discard),
3005 # Same as `dsremoved` but an unknown file exists at the same path
3011 # Same as `dsremoved` but an unknown file exists at the same path
3006 (dsremovunk, actions['undelete'], check),
3012 (dsremovunk, actions['undelete'], check),
3007 ## the following sets does not result in any file changes
3013 ## the following sets does not result in any file changes
3008 # File with no modification
3014 # File with no modification
3009 (clean, actions['noop'], discard),
3015 (clean, actions['noop'], discard),
3010 # Existing file, not tracked anywhere
3016 # Existing file, not tracked anywhere
3011 (unknown, actions['unknown'], discard),
3017 (unknown, actions['unknown'], discard),
3012 )
3018 )
3013
3019
3014 for abs, exact in sorted(names.items()):
3020 for abs, exact in sorted(names.items()):
3015 # target file to be touch on disk (relative to cwd)
3021 # target file to be touch on disk (relative to cwd)
3016 target = repo.wjoin(abs)
3022 target = repo.wjoin(abs)
3017 # search the entry in the dispatch table.
3023 # search the entry in the dispatch table.
3018 # if the file is in any of these sets, it was touched in the working
3024 # if the file is in any of these sets, it was touched in the working
3019 # directory parent and we are sure it needs to be reverted.
3025 # directory parent and we are sure it needs to be reverted.
3020 for table, (xlist, msg), dobackup in disptable:
3026 for table, (xlist, msg), dobackup in disptable:
3021 if abs not in table:
3027 if abs not in table:
3022 continue
3028 continue
3023 if xlist is not None:
3029 if xlist is not None:
3024 xlist.append(abs)
3030 xlist.append(abs)
3025 if dobackup:
3031 if dobackup:
3026 # If in interactive mode, don't automatically create
3032 # If in interactive mode, don't automatically create
3027 # .orig files (issue4793)
3033 # .orig files (issue4793)
3028 if dobackup == backupinteractive:
3034 if dobackup == backupinteractive:
3029 tobackup.add(abs)
3035 tobackup.add(abs)
3030 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3036 elif (backup <= dobackup or wctx[abs].cmp(ctx[abs])):
3031 absbakname = scmutil.backuppath(ui, repo, abs)
3037 absbakname = scmutil.backuppath(ui, repo, abs)
3032 bakname = os.path.relpath(absbakname,
3038 bakname = os.path.relpath(absbakname,
3033 start=repo.root)
3039 start=repo.root)
3034 ui.note(_('saving current version of %s as %s\n') %
3040 ui.note(_('saving current version of %s as %s\n') %
3035 (uipathfn(abs), uipathfn(bakname)))
3041 (uipathfn(abs), uipathfn(bakname)))
3036 if not opts.get('dry_run'):
3042 if not opts.get('dry_run'):
3037 if interactive:
3043 if interactive:
3038 util.copyfile(target, absbakname)
3044 util.copyfile(target, absbakname)
3039 else:
3045 else:
3040 util.rename(target, absbakname)
3046 util.rename(target, absbakname)
3041 if opts.get('dry_run'):
3047 if opts.get('dry_run'):
3042 if ui.verbose or not exact:
3048 if ui.verbose or not exact:
3043 ui.status(msg % uipathfn(abs))
3049 ui.status(msg % uipathfn(abs))
3044 elif exact:
3050 elif exact:
3045 ui.warn(msg % uipathfn(abs))
3051 ui.warn(msg % uipathfn(abs))
3046 break
3052 break
3047
3053
3048 if not opts.get('dry_run'):
3054 if not opts.get('dry_run'):
3049 needdata = ('revert', 'add', 'undelete')
3055 needdata = ('revert', 'add', 'undelete')
3050 oplist = [actions[name][0] for name in needdata]
3056 oplist = [actions[name][0] for name in needdata]
3051 prefetch = scmutil.prefetchfiles
3057 prefetch = scmutil.prefetchfiles
3052 matchfiles = scmutil.matchfiles
3058 matchfiles = scmutil.matchfiles
3053 prefetch(repo, [ctx.rev()],
3059 prefetch(repo, [ctx.rev()],
3054 matchfiles(repo,
3060 matchfiles(repo,
3055 [f for sublist in oplist for f in sublist]))
3061 [f for sublist in oplist for f in sublist]))
3056 match = scmutil.match(repo[None], pats)
3062 match = scmutil.match(repo[None], pats)
3057 _performrevert(repo, parents, ctx, names, uipathfn, actions,
3063 _performrevert(repo, parents, ctx, names, uipathfn, actions,
3058 match, interactive, tobackup)
3064 match, interactive, tobackup)
3059
3065
3060 if targetsubs:
3066 if targetsubs:
3061 # Revert the subrepos on the revert list
3067 # Revert the subrepos on the revert list
3062 for sub in targetsubs:
3068 for sub in targetsubs:
3063 try:
3069 try:
3064 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3070 wctx.sub(sub).revert(ctx.substate[sub], *pats,
3065 **pycompat.strkwargs(opts))
3071 **pycompat.strkwargs(opts))
3066 except KeyError:
3072 except KeyError:
3067 raise error.Abort("subrepository '%s' does not exist in %s!"
3073 raise error.Abort("subrepository '%s' does not exist in %s!"
3068 % (sub, short(ctx.node())))
3074 % (sub, short(ctx.node())))
3069
3075
3070 def _performrevert(repo, parents, ctx, names, uipathfn, actions,
3076 def _performrevert(repo, parents, ctx, names, uipathfn, actions,
3071 match, interactive=False, tobackup=None):
3077 match, interactive=False, tobackup=None):
3072 """function that actually perform all the actions computed for revert
3078 """function that actually perform all the actions computed for revert
3073
3079
3074 This is an independent function to let extension to plug in and react to
3080 This is an independent function to let extension to plug in and react to
3075 the imminent revert.
3081 the imminent revert.
3076
3082
3077 Make sure you have the working directory locked when calling this function.
3083 Make sure you have the working directory locked when calling this function.
3078 """
3084 """
3079 parent, p2 = parents
3085 parent, p2 = parents
3080 node = ctx.node()
3086 node = ctx.node()
3081 excluded_files = []
3087 excluded_files = []
3082
3088
3083 def checkout(f):
3089 def checkout(f):
3084 fc = ctx[f]
3090 fc = ctx[f]
3085 repo.wwrite(f, fc.data(), fc.flags())
3091 repo.wwrite(f, fc.data(), fc.flags())
3086
3092
3087 def doremove(f):
3093 def doremove(f):
3088 try:
3094 try:
3089 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3095 rmdir = repo.ui.configbool('experimental', 'removeemptydirs')
3090 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3096 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3091 except OSError:
3097 except OSError:
3092 pass
3098 pass
3093 repo.dirstate.remove(f)
3099 repo.dirstate.remove(f)
3094
3100
3095 def prntstatusmsg(action, f):
3101 def prntstatusmsg(action, f):
3096 exact = names[f]
3102 exact = names[f]
3097 if repo.ui.verbose or not exact:
3103 if repo.ui.verbose or not exact:
3098 repo.ui.status(actions[action][1] % uipathfn(f))
3104 repo.ui.status(actions[action][1] % uipathfn(f))
3099
3105
3100 audit_path = pathutil.pathauditor(repo.root, cached=True)
3106 audit_path = pathutil.pathauditor(repo.root, cached=True)
3101 for f in actions['forget'][0]:
3107 for f in actions['forget'][0]:
3102 if interactive:
3108 if interactive:
3103 choice = repo.ui.promptchoice(
3109 choice = repo.ui.promptchoice(
3104 _("forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3110 _("forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3105 if choice == 0:
3111 if choice == 0:
3106 prntstatusmsg('forget', f)
3112 prntstatusmsg('forget', f)
3107 repo.dirstate.drop(f)
3113 repo.dirstate.drop(f)
3108 else:
3114 else:
3109 excluded_files.append(f)
3115 excluded_files.append(f)
3110 else:
3116 else:
3111 prntstatusmsg('forget', f)
3117 prntstatusmsg('forget', f)
3112 repo.dirstate.drop(f)
3118 repo.dirstate.drop(f)
3113 for f in actions['remove'][0]:
3119 for f in actions['remove'][0]:
3114 audit_path(f)
3120 audit_path(f)
3115 if interactive:
3121 if interactive:
3116 choice = repo.ui.promptchoice(
3122 choice = repo.ui.promptchoice(
3117 _("remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3123 _("remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f))
3118 if choice == 0:
3124 if choice == 0:
3119 prntstatusmsg('remove', f)
3125 prntstatusmsg('remove', f)
3120 doremove(f)
3126 doremove(f)
3121 else:
3127 else:
3122 excluded_files.append(f)
3128 excluded_files.append(f)
3123 else:
3129 else:
3124 prntstatusmsg('remove', f)
3130 prntstatusmsg('remove', f)
3125 doremove(f)
3131 doremove(f)
3126 for f in actions['drop'][0]:
3132 for f in actions['drop'][0]:
3127 audit_path(f)
3133 audit_path(f)
3128 prntstatusmsg('drop', f)
3134 prntstatusmsg('drop', f)
3129 repo.dirstate.remove(f)
3135 repo.dirstate.remove(f)
3130
3136
3131 normal = None
3137 normal = None
3132 if node == parent:
3138 if node == parent:
3133 # We're reverting to our parent. If possible, we'd like status
3139 # We're reverting to our parent. If possible, we'd like status
3134 # to report the file as clean. We have to use normallookup for
3140 # to report the file as clean. We have to use normallookup for
3135 # merges to avoid losing information about merged/dirty files.
3141 # merges to avoid losing information about merged/dirty files.
3136 if p2 != nullid:
3142 if p2 != nullid:
3137 normal = repo.dirstate.normallookup
3143 normal = repo.dirstate.normallookup
3138 else:
3144 else:
3139 normal = repo.dirstate.normal
3145 normal = repo.dirstate.normal
3140
3146
3141 newlyaddedandmodifiedfiles = set()
3147 newlyaddedandmodifiedfiles = set()
3142 if interactive:
3148 if interactive:
3143 # Prompt the user for changes to revert
3149 # Prompt the user for changes to revert
3144 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3150 torevert = [f for f in actions['revert'][0] if f not in excluded_files]
3145 m = scmutil.matchfiles(repo, torevert)
3151 m = scmutil.matchfiles(repo, torevert)
3146 diffopts = patch.difffeatureopts(repo.ui, whitespace=True,
3152 diffopts = patch.difffeatureopts(repo.ui, whitespace=True,
3147 section='commands',
3153 section='commands',
3148 configprefix='revert.interactive.')
3154 configprefix='revert.interactive.')
3149 diffopts.nodates = True
3155 diffopts.nodates = True
3150 diffopts.git = True
3156 diffopts.git = True
3151 operation = 'apply'
3157 operation = 'apply'
3152 if node == parent:
3158 if node == parent:
3153 if repo.ui.configbool('experimental',
3159 if repo.ui.configbool('experimental',
3154 'revert.interactive.select-to-keep'):
3160 'revert.interactive.select-to-keep'):
3155 operation = 'keep'
3161 operation = 'keep'
3156 else:
3162 else:
3157 operation = 'discard'
3163 operation = 'discard'
3158
3164
3159 if operation == 'apply':
3165 if operation == 'apply':
3160 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3166 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3161 else:
3167 else:
3162 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3168 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3163 originalchunks = patch.parsepatch(diff)
3169 originalchunks = patch.parsepatch(diff)
3164
3170
3165 try:
3171 try:
3166
3172
3167 chunks, opts = recordfilter(repo.ui, originalchunks, match,
3173 chunks, opts = recordfilter(repo.ui, originalchunks, match,
3168 operation=operation)
3174 operation=operation)
3169 if operation == 'discard':
3175 if operation == 'discard':
3170 chunks = patch.reversehunks(chunks)
3176 chunks = patch.reversehunks(chunks)
3171
3177
3172 except error.PatchError as err:
3178 except error.PatchError as err:
3173 raise error.Abort(_('error parsing patch: %s') % err)
3179 raise error.Abort(_('error parsing patch: %s') % err)
3174
3180
3175 newlyaddedandmodifiedfiles = newandmodified(chunks, originalchunks)
3181 # FIXME: when doing an interactive revert of a copy, there's no way of
3182 # performing a partial revert of the added file, the only option is
3183 # "remove added file <name> (Yn)?", so we don't need to worry about the
3184 # alsorestore value. Ideally we'd be able to partially revert
3185 # copied/renamed files.
3186 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3187 chunks, originalchunks)
3176 if tobackup is None:
3188 if tobackup is None:
3177 tobackup = set()
3189 tobackup = set()
3178 # Apply changes
3190 # Apply changes
3179 fp = stringio()
3191 fp = stringio()
3180 # chunks are serialized per file, but files aren't sorted
3192 # chunks are serialized per file, but files aren't sorted
3181 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3193 for f in sorted(set(c.header.filename() for c in chunks if ishunk(c))):
3182 prntstatusmsg('revert', f)
3194 prntstatusmsg('revert', f)
3183 files = set()
3195 files = set()
3184 for c in chunks:
3196 for c in chunks:
3185 if ishunk(c):
3197 if ishunk(c):
3186 abs = c.header.filename()
3198 abs = c.header.filename()
3187 # Create a backup file only if this hunk should be backed up
3199 # Create a backup file only if this hunk should be backed up
3188 if c.header.filename() in tobackup:
3200 if c.header.filename() in tobackup:
3189 target = repo.wjoin(abs)
3201 target = repo.wjoin(abs)
3190 bakname = scmutil.backuppath(repo.ui, repo, abs)
3202 bakname = scmutil.backuppath(repo.ui, repo, abs)
3191 util.copyfile(target, bakname)
3203 util.copyfile(target, bakname)
3192 tobackup.remove(abs)
3204 tobackup.remove(abs)
3193 if abs not in files:
3205 if abs not in files:
3194 files.add(abs)
3206 files.add(abs)
3195 if operation == 'keep':
3207 if operation == 'keep':
3196 checkout(abs)
3208 checkout(abs)
3197 c.write(fp)
3209 c.write(fp)
3198 dopatch = fp.tell()
3210 dopatch = fp.tell()
3199 fp.seek(0)
3211 fp.seek(0)
3200 if dopatch:
3212 if dopatch:
3201 try:
3213 try:
3202 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3214 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3203 except error.PatchError as err:
3215 except error.PatchError as err:
3204 raise error.Abort(pycompat.bytestr(err))
3216 raise error.Abort(pycompat.bytestr(err))
3205 del fp
3217 del fp
3206 else:
3218 else:
3207 for f in actions['revert'][0]:
3219 for f in actions['revert'][0]:
3208 prntstatusmsg('revert', f)
3220 prntstatusmsg('revert', f)
3209 checkout(f)
3221 checkout(f)
3210 if normal:
3222 if normal:
3211 normal(f)
3223 normal(f)
3212
3224
3213 for f in actions['add'][0]:
3225 for f in actions['add'][0]:
3214 # Don't checkout modified files, they are already created by the diff
3226 # Don't checkout modified files, they are already created by the diff
3215 if f not in newlyaddedandmodifiedfiles:
3227 if f not in newlyaddedandmodifiedfiles:
3216 prntstatusmsg('add', f)
3228 prntstatusmsg('add', f)
3217 checkout(f)
3229 checkout(f)
3218 repo.dirstate.add(f)
3230 repo.dirstate.add(f)
3219
3231
3220 normal = repo.dirstate.normallookup
3232 normal = repo.dirstate.normallookup
3221 if node == parent and p2 == nullid:
3233 if node == parent and p2 == nullid:
3222 normal = repo.dirstate.normal
3234 normal = repo.dirstate.normal
3223 for f in actions['undelete'][0]:
3235 for f in actions['undelete'][0]:
3224 if interactive:
3236 if interactive:
3225 choice = repo.ui.promptchoice(
3237 choice = repo.ui.promptchoice(
3226 _("add back removed file %s (Yn)?$$ &Yes $$ &No") % f)
3238 _("add back removed file %s (Yn)?$$ &Yes $$ &No") % f)
3227 if choice == 0:
3239 if choice == 0:
3228 prntstatusmsg('undelete', f)
3240 prntstatusmsg('undelete', f)
3229 checkout(f)
3241 checkout(f)
3230 normal(f)
3242 normal(f)
3231 else:
3243 else:
3232 excluded_files.append(f)
3244 excluded_files.append(f)
3233 else:
3245 else:
3234 prntstatusmsg('undelete', f)
3246 prntstatusmsg('undelete', f)
3235 checkout(f)
3247 checkout(f)
3236 normal(f)
3248 normal(f)
3237
3249
3238 copied = copies.pathcopies(repo[parent], ctx)
3250 copied = copies.pathcopies(repo[parent], ctx)
3239
3251
3240 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3252 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3241 if f in copied:
3253 if f in copied:
3242 repo.dirstate.copy(copied[f], f)
3254 repo.dirstate.copy(copied[f], f)
3243
3255
3244 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3256 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3245 # commands.outgoing. "missing" is "missing" of the result of
3257 # commands.outgoing. "missing" is "missing" of the result of
3246 # "findcommonoutgoing()"
3258 # "findcommonoutgoing()"
3247 outgoinghooks = util.hooks()
3259 outgoinghooks = util.hooks()
3248
3260
3249 # a list of (ui, repo) functions called by commands.summary
3261 # a list of (ui, repo) functions called by commands.summary
3250 summaryhooks = util.hooks()
3262 summaryhooks = util.hooks()
3251
3263
3252 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3264 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3253 #
3265 #
3254 # functions should return tuple of booleans below, if 'changes' is None:
3266 # functions should return tuple of booleans below, if 'changes' is None:
3255 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3267 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3256 #
3268 #
3257 # otherwise, 'changes' is a tuple of tuples below:
3269 # otherwise, 'changes' is a tuple of tuples below:
3258 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3270 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3259 # - (desturl, destbranch, destpeer, outgoing)
3271 # - (desturl, destbranch, destpeer, outgoing)
3260 summaryremotehooks = util.hooks()
3272 summaryremotehooks = util.hooks()
3261
3273
3262
3274
3263 def checkunfinished(repo, commit=False, skipmerge=False):
3275 def checkunfinished(repo, commit=False, skipmerge=False):
3264 '''Look for an unfinished multistep operation, like graft, and abort
3276 '''Look for an unfinished multistep operation, like graft, and abort
3265 if found. It's probably good to check this right before
3277 if found. It's probably good to check this right before
3266 bailifchanged().
3278 bailifchanged().
3267 '''
3279 '''
3268 # Check for non-clearable states first, so things like rebase will take
3280 # Check for non-clearable states first, so things like rebase will take
3269 # precedence over update.
3281 # precedence over update.
3270 for state in statemod._unfinishedstates:
3282 for state in statemod._unfinishedstates:
3271 if (state._clearable or (commit and state._allowcommit) or
3283 if (state._clearable or (commit and state._allowcommit) or
3272 state._reportonly):
3284 state._reportonly):
3273 continue
3285 continue
3274 if state.isunfinished(repo):
3286 if state.isunfinished(repo):
3275 raise error.Abort(state.msg(), hint=state.hint())
3287 raise error.Abort(state.msg(), hint=state.hint())
3276
3288
3277 for s in statemod._unfinishedstates:
3289 for s in statemod._unfinishedstates:
3278 if (not s._clearable or (commit and s._allowcommit) or
3290 if (not s._clearable or (commit and s._allowcommit) or
3279 (s._opname == 'merge' and skipmerge) or s._reportonly):
3291 (s._opname == 'merge' and skipmerge) or s._reportonly):
3280 continue
3292 continue
3281 if s.isunfinished(repo):
3293 if s.isunfinished(repo):
3282 raise error.Abort(s.msg(), hint=s.hint())
3294 raise error.Abort(s.msg(), hint=s.hint())
3283
3295
3284 def clearunfinished(repo):
3296 def clearunfinished(repo):
3285 '''Check for unfinished operations (as above), and clear the ones
3297 '''Check for unfinished operations (as above), and clear the ones
3286 that are clearable.
3298 that are clearable.
3287 '''
3299 '''
3288 for state in statemod._unfinishedstates:
3300 for state in statemod._unfinishedstates:
3289 if state._reportonly:
3301 if state._reportonly:
3290 continue
3302 continue
3291 if not state._clearable and state.isunfinished(repo):
3303 if not state._clearable and state.isunfinished(repo):
3292 raise error.Abort(state.msg(), hint=state.hint())
3304 raise error.Abort(state.msg(), hint=state.hint())
3293
3305
3294 for s in statemod._unfinishedstates:
3306 for s in statemod._unfinishedstates:
3295 if s._opname == 'merge' or state._reportonly:
3307 if s._opname == 'merge' or state._reportonly:
3296 continue
3308 continue
3297 if s._clearable and s.isunfinished(repo):
3309 if s._clearable and s.isunfinished(repo):
3298 util.unlink(repo.vfs.join(s._fname))
3310 util.unlink(repo.vfs.join(s._fname))
3299
3311
3300 def getunfinishedstate(repo):
3312 def getunfinishedstate(repo):
3301 ''' Checks for unfinished operations and returns statecheck object
3313 ''' Checks for unfinished operations and returns statecheck object
3302 for it'''
3314 for it'''
3303 for state in statemod._unfinishedstates:
3315 for state in statemod._unfinishedstates:
3304 if state.isunfinished(repo):
3316 if state.isunfinished(repo):
3305 return state
3317 return state
3306 return None
3318 return None
3307
3319
3308 def howtocontinue(repo):
3320 def howtocontinue(repo):
3309 '''Check for an unfinished operation and return the command to finish
3321 '''Check for an unfinished operation and return the command to finish
3310 it.
3322 it.
3311
3323
3312 statemod._unfinishedstates list is checked for an unfinished operation
3324 statemod._unfinishedstates list is checked for an unfinished operation
3313 and the corresponding message to finish it is generated if a method to
3325 and the corresponding message to finish it is generated if a method to
3314 continue is supported by the operation.
3326 continue is supported by the operation.
3315
3327
3316 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3328 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3317 a boolean.
3329 a boolean.
3318 '''
3330 '''
3319 contmsg = _("continue: %s")
3331 contmsg = _("continue: %s")
3320 for state in statemod._unfinishedstates:
3332 for state in statemod._unfinishedstates:
3321 if not state._continueflag:
3333 if not state._continueflag:
3322 continue
3334 continue
3323 if state.isunfinished(repo):
3335 if state.isunfinished(repo):
3324 return contmsg % state.continuemsg(), True
3336 return contmsg % state.continuemsg(), True
3325 if repo[None].dirty(missing=True, merge=False, branch=False):
3337 if repo[None].dirty(missing=True, merge=False, branch=False):
3326 return contmsg % _("hg commit"), False
3338 return contmsg % _("hg commit"), False
3327 return None, None
3339 return None, None
3328
3340
3329 def checkafterresolved(repo):
3341 def checkafterresolved(repo):
3330 '''Inform the user about the next action after completing hg resolve
3342 '''Inform the user about the next action after completing hg resolve
3331
3343
3332 If there's a an unfinished operation that supports continue flag,
3344 If there's a an unfinished operation that supports continue flag,
3333 howtocontinue will yield repo.ui.warn as the reporter.
3345 howtocontinue will yield repo.ui.warn as the reporter.
3334
3346
3335 Otherwise, it will yield repo.ui.note.
3347 Otherwise, it will yield repo.ui.note.
3336 '''
3348 '''
3337 msg, warning = howtocontinue(repo)
3349 msg, warning = howtocontinue(repo)
3338 if msg is not None:
3350 if msg is not None:
3339 if warning:
3351 if warning:
3340 repo.ui.warn("%s\n" % msg)
3352 repo.ui.warn("%s\n" % msg)
3341 else:
3353 else:
3342 repo.ui.note("%s\n" % msg)
3354 repo.ui.note("%s\n" % msg)
3343
3355
3344 def wrongtooltocontinue(repo, task):
3356 def wrongtooltocontinue(repo, task):
3345 '''Raise an abort suggesting how to properly continue if there is an
3357 '''Raise an abort suggesting how to properly continue if there is an
3346 active task.
3358 active task.
3347
3359
3348 Uses howtocontinue() to find the active task.
3360 Uses howtocontinue() to find the active task.
3349
3361
3350 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3362 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3351 a hint.
3363 a hint.
3352 '''
3364 '''
3353 after = howtocontinue(repo)
3365 after = howtocontinue(repo)
3354 hint = None
3366 hint = None
3355 if after[1]:
3367 if after[1]:
3356 hint = after[0]
3368 hint = after[0]
3357 raise error.Abort(_('no %s in progress') % task, hint=hint)
3369 raise error.Abort(_('no %s in progress') % task, hint=hint)
3358
3370
3359 def abortgraft(ui, repo, graftstate):
3371 def abortgraft(ui, repo, graftstate):
3360 """abort the interrupted graft and rollbacks to the state before interrupted
3372 """abort the interrupted graft and rollbacks to the state before interrupted
3361 graft"""
3373 graft"""
3362 if not graftstate.exists():
3374 if not graftstate.exists():
3363 raise error.Abort(_("no interrupted graft to abort"))
3375 raise error.Abort(_("no interrupted graft to abort"))
3364 statedata = readgraftstate(repo, graftstate)
3376 statedata = readgraftstate(repo, graftstate)
3365 newnodes = statedata.get('newnodes')
3377 newnodes = statedata.get('newnodes')
3366 if newnodes is None:
3378 if newnodes is None:
3367 # and old graft state which does not have all the data required to abort
3379 # and old graft state which does not have all the data required to abort
3368 # the graft
3380 # the graft
3369 raise error.Abort(_("cannot abort using an old graftstate"))
3381 raise error.Abort(_("cannot abort using an old graftstate"))
3370
3382
3371 # changeset from which graft operation was started
3383 # changeset from which graft operation was started
3372 if len(newnodes) > 0:
3384 if len(newnodes) > 0:
3373 startctx = repo[newnodes[0]].p1()
3385 startctx = repo[newnodes[0]].p1()
3374 else:
3386 else:
3375 startctx = repo['.']
3387 startctx = repo['.']
3376 # whether to strip or not
3388 # whether to strip or not
3377 cleanup = False
3389 cleanup = False
3378 from . import hg
3390 from . import hg
3379 if newnodes:
3391 if newnodes:
3380 newnodes = [repo[r].rev() for r in newnodes]
3392 newnodes = [repo[r].rev() for r in newnodes]
3381 cleanup = True
3393 cleanup = True
3382 # checking that none of the newnodes turned public or is public
3394 # checking that none of the newnodes turned public or is public
3383 immutable = [c for c in newnodes if not repo[c].mutable()]
3395 immutable = [c for c in newnodes if not repo[c].mutable()]
3384 if immutable:
3396 if immutable:
3385 repo.ui.warn(_("cannot clean up public changesets %s\n")
3397 repo.ui.warn(_("cannot clean up public changesets %s\n")
3386 % ', '.join(bytes(repo[r]) for r in immutable),
3398 % ', '.join(bytes(repo[r]) for r in immutable),
3387 hint=_("see 'hg help phases' for details"))
3399 hint=_("see 'hg help phases' for details"))
3388 cleanup = False
3400 cleanup = False
3389
3401
3390 # checking that no new nodes are created on top of grafted revs
3402 # checking that no new nodes are created on top of grafted revs
3391 desc = set(repo.changelog.descendants(newnodes))
3403 desc = set(repo.changelog.descendants(newnodes))
3392 if desc - set(newnodes):
3404 if desc - set(newnodes):
3393 repo.ui.warn(_("new changesets detected on destination "
3405 repo.ui.warn(_("new changesets detected on destination "
3394 "branch, can't strip\n"))
3406 "branch, can't strip\n"))
3395 cleanup = False
3407 cleanup = False
3396
3408
3397 if cleanup:
3409 if cleanup:
3398 with repo.wlock(), repo.lock():
3410 with repo.wlock(), repo.lock():
3399 hg.updaterepo(repo, startctx.node(), overwrite=True)
3411 hg.updaterepo(repo, startctx.node(), overwrite=True)
3400 # stripping the new nodes created
3412 # stripping the new nodes created
3401 strippoints = [c.node() for c in repo.set("roots(%ld)",
3413 strippoints = [c.node() for c in repo.set("roots(%ld)",
3402 newnodes)]
3414 newnodes)]
3403 repair.strip(repo.ui, repo, strippoints, backup=False)
3415 repair.strip(repo.ui, repo, strippoints, backup=False)
3404
3416
3405 if not cleanup:
3417 if not cleanup:
3406 # we don't update to the startnode if we can't strip
3418 # we don't update to the startnode if we can't strip
3407 startctx = repo['.']
3419 startctx = repo['.']
3408 hg.updaterepo(repo, startctx.node(), overwrite=True)
3420 hg.updaterepo(repo, startctx.node(), overwrite=True)
3409
3421
3410 ui.status(_("graft aborted\n"))
3422 ui.status(_("graft aborted\n"))
3411 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
3423 ui.status(_("working directory is now at %s\n") % startctx.hex()[:12])
3412 graftstate.delete()
3424 graftstate.delete()
3413 return 0
3425 return 0
3414
3426
3415 def readgraftstate(repo, graftstate):
3427 def readgraftstate(repo, graftstate):
3416 """read the graft state file and return a dict of the data stored in it"""
3428 """read the graft state file and return a dict of the data stored in it"""
3417 try:
3429 try:
3418 return graftstate.read()
3430 return graftstate.read()
3419 except error.CorruptedState:
3431 except error.CorruptedState:
3420 nodes = repo.vfs.read('graftstate').splitlines()
3432 nodes = repo.vfs.read('graftstate').splitlines()
3421 return {'nodes': nodes}
3433 return {'nodes': nodes}
3422
3434
3423 def hgabortgraft(ui, repo):
3435 def hgabortgraft(ui, repo):
3424 """ abort logic for aborting graft using 'hg abort'"""
3436 """ abort logic for aborting graft using 'hg abort'"""
3425 with repo.wlock():
3437 with repo.wlock():
3426 graftstate = statemod.cmdstate(repo, 'graftstate')
3438 graftstate = statemod.cmdstate(repo, 'graftstate')
3427 return abortgraft(ui, repo, graftstate)
3439 return abortgraft(ui, repo, graftstate)
@@ -1,2869 +1,2869 b''
1 # patch.py - patch file parsing routines
1 # patch.py - patch file parsing routines
2 #
2 #
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
3 # Copyright 2006 Brendan Cully <brendan@kublai.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
4 # Copyright 2007 Chris Mason <chris.mason@oracle.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import, print_function
9 from __future__ import absolute_import, print_function
10
10
11 import collections
11 import collections
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import email
14 import email
15 import errno
15 import errno
16 import hashlib
16 import hashlib
17 import os
17 import os
18 import re
18 import re
19 import shutil
19 import shutil
20 import zlib
20 import zlib
21
21
22 from .i18n import _
22 from .i18n import _
23 from .node import (
23 from .node import (
24 hex,
24 hex,
25 short,
25 short,
26 )
26 )
27 from . import (
27 from . import (
28 copies,
28 copies,
29 diffhelper,
29 diffhelper,
30 diffutil,
30 diffutil,
31 encoding,
31 encoding,
32 error,
32 error,
33 mail,
33 mail,
34 mdiff,
34 mdiff,
35 pathutil,
35 pathutil,
36 pycompat,
36 pycompat,
37 scmutil,
37 scmutil,
38 similar,
38 similar,
39 util,
39 util,
40 vfs as vfsmod,
40 vfs as vfsmod,
41 )
41 )
42 from .utils import (
42 from .utils import (
43 dateutil,
43 dateutil,
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 stringio = util.stringio
48 stringio = util.stringio
49
49
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
50 gitre = re.compile(br'diff --git a/(.*) b/(.*)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
51 tabsplitter = re.compile(br'(\t+|[^\t]+)')
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
52 wordsplitter = re.compile(br'(\t+| +|[a-zA-Z0-9_\x80-\xff]+|'
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
53 b'[^ \ta-zA-Z0-9_\x80-\xff])')
54
54
55 PatchError = error.PatchError
55 PatchError = error.PatchError
56
56
57 # public functions
57 # public functions
58
58
59 def split(stream):
59 def split(stream):
60 '''return an iterator of individual patches from a stream'''
60 '''return an iterator of individual patches from a stream'''
61 def isheader(line, inheader):
61 def isheader(line, inheader):
62 if inheader and line.startswith((' ', '\t')):
62 if inheader and line.startswith((' ', '\t')):
63 # continuation
63 # continuation
64 return True
64 return True
65 if line.startswith((' ', '-', '+')):
65 if line.startswith((' ', '-', '+')):
66 # diff line - don't check for header pattern in there
66 # diff line - don't check for header pattern in there
67 return False
67 return False
68 l = line.split(': ', 1)
68 l = line.split(': ', 1)
69 return len(l) == 2 and ' ' not in l[0]
69 return len(l) == 2 and ' ' not in l[0]
70
70
71 def chunk(lines):
71 def chunk(lines):
72 return stringio(''.join(lines))
72 return stringio(''.join(lines))
73
73
74 def hgsplit(stream, cur):
74 def hgsplit(stream, cur):
75 inheader = True
75 inheader = True
76
76
77 for line in stream:
77 for line in stream:
78 if not line.strip():
78 if not line.strip():
79 inheader = False
79 inheader = False
80 if not inheader and line.startswith('# HG changeset patch'):
80 if not inheader and line.startswith('# HG changeset patch'):
81 yield chunk(cur)
81 yield chunk(cur)
82 cur = []
82 cur = []
83 inheader = True
83 inheader = True
84
84
85 cur.append(line)
85 cur.append(line)
86
86
87 if cur:
87 if cur:
88 yield chunk(cur)
88 yield chunk(cur)
89
89
90 def mboxsplit(stream, cur):
90 def mboxsplit(stream, cur):
91 for line in stream:
91 for line in stream:
92 if line.startswith('From '):
92 if line.startswith('From '):
93 for c in split(chunk(cur[1:])):
93 for c in split(chunk(cur[1:])):
94 yield c
94 yield c
95 cur = []
95 cur = []
96
96
97 cur.append(line)
97 cur.append(line)
98
98
99 if cur:
99 if cur:
100 for c in split(chunk(cur[1:])):
100 for c in split(chunk(cur[1:])):
101 yield c
101 yield c
102
102
103 def mimesplit(stream, cur):
103 def mimesplit(stream, cur):
104 def msgfp(m):
104 def msgfp(m):
105 fp = stringio()
105 fp = stringio()
106 g = email.Generator.Generator(fp, mangle_from_=False)
106 g = email.Generator.Generator(fp, mangle_from_=False)
107 g.flatten(m)
107 g.flatten(m)
108 fp.seek(0)
108 fp.seek(0)
109 return fp
109 return fp
110
110
111 for line in stream:
111 for line in stream:
112 cur.append(line)
112 cur.append(line)
113 c = chunk(cur)
113 c = chunk(cur)
114
114
115 m = mail.parse(c)
115 m = mail.parse(c)
116 if not m.is_multipart():
116 if not m.is_multipart():
117 yield msgfp(m)
117 yield msgfp(m)
118 else:
118 else:
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
119 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
120 for part in m.walk():
120 for part in m.walk():
121 ct = part.get_content_type()
121 ct = part.get_content_type()
122 if ct not in ok_types:
122 if ct not in ok_types:
123 continue
123 continue
124 yield msgfp(part)
124 yield msgfp(part)
125
125
126 def headersplit(stream, cur):
126 def headersplit(stream, cur):
127 inheader = False
127 inheader = False
128
128
129 for line in stream:
129 for line in stream:
130 if not inheader and isheader(line, inheader):
130 if not inheader and isheader(line, inheader):
131 yield chunk(cur)
131 yield chunk(cur)
132 cur = []
132 cur = []
133 inheader = True
133 inheader = True
134 if inheader and not isheader(line, inheader):
134 if inheader and not isheader(line, inheader):
135 inheader = False
135 inheader = False
136
136
137 cur.append(line)
137 cur.append(line)
138
138
139 if cur:
139 if cur:
140 yield chunk(cur)
140 yield chunk(cur)
141
141
142 def remainder(cur):
142 def remainder(cur):
143 yield chunk(cur)
143 yield chunk(cur)
144
144
145 class fiter(object):
145 class fiter(object):
146 def __init__(self, fp):
146 def __init__(self, fp):
147 self.fp = fp
147 self.fp = fp
148
148
149 def __iter__(self):
149 def __iter__(self):
150 return self
150 return self
151
151
152 def next(self):
152 def next(self):
153 l = self.fp.readline()
153 l = self.fp.readline()
154 if not l:
154 if not l:
155 raise StopIteration
155 raise StopIteration
156 return l
156 return l
157
157
158 __next__ = next
158 __next__ = next
159
159
160 inheader = False
160 inheader = False
161 cur = []
161 cur = []
162
162
163 mimeheaders = ['content-type']
163 mimeheaders = ['content-type']
164
164
165 if not util.safehasattr(stream, 'next'):
165 if not util.safehasattr(stream, 'next'):
166 # http responses, for example, have readline but not next
166 # http responses, for example, have readline but not next
167 stream = fiter(stream)
167 stream = fiter(stream)
168
168
169 for line in stream:
169 for line in stream:
170 cur.append(line)
170 cur.append(line)
171 if line.startswith('# HG changeset patch'):
171 if line.startswith('# HG changeset patch'):
172 return hgsplit(stream, cur)
172 return hgsplit(stream, cur)
173 elif line.startswith('From '):
173 elif line.startswith('From '):
174 return mboxsplit(stream, cur)
174 return mboxsplit(stream, cur)
175 elif isheader(line, inheader):
175 elif isheader(line, inheader):
176 inheader = True
176 inheader = True
177 if line.split(':', 1)[0].lower() in mimeheaders:
177 if line.split(':', 1)[0].lower() in mimeheaders:
178 # let email parser handle this
178 # let email parser handle this
179 return mimesplit(stream, cur)
179 return mimesplit(stream, cur)
180 elif line.startswith('--- ') and inheader:
180 elif line.startswith('--- ') and inheader:
181 # No evil headers seen by diff start, split by hand
181 # No evil headers seen by diff start, split by hand
182 return headersplit(stream, cur)
182 return headersplit(stream, cur)
183 # Not enough info, keep reading
183 # Not enough info, keep reading
184
184
185 # if we are here, we have a very plain patch
185 # if we are here, we have a very plain patch
186 return remainder(cur)
186 return remainder(cur)
187
187
188 ## Some facility for extensible patch parsing:
188 ## Some facility for extensible patch parsing:
189 # list of pairs ("header to match", "data key")
189 # list of pairs ("header to match", "data key")
190 patchheadermap = [('Date', 'date'),
190 patchheadermap = [('Date', 'date'),
191 ('Branch', 'branch'),
191 ('Branch', 'branch'),
192 ('Node ID', 'nodeid'),
192 ('Node ID', 'nodeid'),
193 ]
193 ]
194
194
195 @contextlib.contextmanager
195 @contextlib.contextmanager
196 def extract(ui, fileobj):
196 def extract(ui, fileobj):
197 '''extract patch from data read from fileobj.
197 '''extract patch from data read from fileobj.
198
198
199 patch can be a normal patch or contained in an email message.
199 patch can be a normal patch or contained in an email message.
200
200
201 return a dictionary. Standard keys are:
201 return a dictionary. Standard keys are:
202 - filename,
202 - filename,
203 - message,
203 - message,
204 - user,
204 - user,
205 - date,
205 - date,
206 - branch,
206 - branch,
207 - node,
207 - node,
208 - p1,
208 - p1,
209 - p2.
209 - p2.
210 Any item can be missing from the dictionary. If filename is missing,
210 Any item can be missing from the dictionary. If filename is missing,
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
211 fileobj did not contain a patch. Caller must unlink filename when done.'''
212
212
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
213 fd, tmpname = pycompat.mkstemp(prefix='hg-patch-')
214 tmpfp = os.fdopen(fd, r'wb')
214 tmpfp = os.fdopen(fd, r'wb')
215 try:
215 try:
216 yield _extract(ui, fileobj, tmpname, tmpfp)
216 yield _extract(ui, fileobj, tmpname, tmpfp)
217 finally:
217 finally:
218 tmpfp.close()
218 tmpfp.close()
219 os.unlink(tmpname)
219 os.unlink(tmpname)
220
220
221 def _extract(ui, fileobj, tmpname, tmpfp):
221 def _extract(ui, fileobj, tmpname, tmpfp):
222
222
223 # attempt to detect the start of a patch
223 # attempt to detect the start of a patch
224 # (this heuristic is borrowed from quilt)
224 # (this heuristic is borrowed from quilt)
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
225 diffre = re.compile(br'^(?:Index:[ \t]|diff[ \t]-|RCS file: |'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
226 br'retrieving revision [0-9]+(\.[0-9]+)*$|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
227 br'---[ \t].*?^\+\+\+[ \t]|'
228 br'\*\*\*[ \t].*?^---[ \t])',
228 br'\*\*\*[ \t].*?^---[ \t])',
229 re.MULTILINE | re.DOTALL)
229 re.MULTILINE | re.DOTALL)
230
230
231 data = {}
231 data = {}
232
232
233 msg = mail.parse(fileobj)
233 msg = mail.parse(fileobj)
234
234
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
235 subject = msg[r'Subject'] and mail.headdecode(msg[r'Subject'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
236 data['user'] = msg[r'From'] and mail.headdecode(msg[r'From'])
237 if not subject and not data['user']:
237 if not subject and not data['user']:
238 # Not an email, restore parsed headers if any
238 # Not an email, restore parsed headers if any
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
239 subject = '\n'.join(': '.join(map(encoding.strtolocal, h))
240 for h in msg.items()) + '\n'
240 for h in msg.items()) + '\n'
241
241
242 # should try to parse msg['Date']
242 # should try to parse msg['Date']
243 parents = []
243 parents = []
244
244
245 if subject:
245 if subject:
246 if subject.startswith('[PATCH'):
246 if subject.startswith('[PATCH'):
247 pend = subject.find(']')
247 pend = subject.find(']')
248 if pend >= 0:
248 if pend >= 0:
249 subject = subject[pend + 1:].lstrip()
249 subject = subject[pend + 1:].lstrip()
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
250 subject = re.sub(br'\n[ \t]+', ' ', subject)
251 ui.debug('Subject: %s\n' % subject)
251 ui.debug('Subject: %s\n' % subject)
252 if data['user']:
252 if data['user']:
253 ui.debug('From: %s\n' % data['user'])
253 ui.debug('From: %s\n' % data['user'])
254 diffs_seen = 0
254 diffs_seen = 0
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
255 ok_types = ('text/plain', 'text/x-diff', 'text/x-patch')
256 message = ''
256 message = ''
257 for part in msg.walk():
257 for part in msg.walk():
258 content_type = pycompat.bytestr(part.get_content_type())
258 content_type = pycompat.bytestr(part.get_content_type())
259 ui.debug('Content-Type: %s\n' % content_type)
259 ui.debug('Content-Type: %s\n' % content_type)
260 if content_type not in ok_types:
260 if content_type not in ok_types:
261 continue
261 continue
262 payload = part.get_payload(decode=True)
262 payload = part.get_payload(decode=True)
263 m = diffre.search(payload)
263 m = diffre.search(payload)
264 if m:
264 if m:
265 hgpatch = False
265 hgpatch = False
266 hgpatchheader = False
266 hgpatchheader = False
267 ignoretext = False
267 ignoretext = False
268
268
269 ui.debug('found patch at byte %d\n' % m.start(0))
269 ui.debug('found patch at byte %d\n' % m.start(0))
270 diffs_seen += 1
270 diffs_seen += 1
271 cfp = stringio()
271 cfp = stringio()
272 for line in payload[:m.start(0)].splitlines():
272 for line in payload[:m.start(0)].splitlines():
273 if line.startswith('# HG changeset patch') and not hgpatch:
273 if line.startswith('# HG changeset patch') and not hgpatch:
274 ui.debug('patch generated by hg export\n')
274 ui.debug('patch generated by hg export\n')
275 hgpatch = True
275 hgpatch = True
276 hgpatchheader = True
276 hgpatchheader = True
277 # drop earlier commit message content
277 # drop earlier commit message content
278 cfp.seek(0)
278 cfp.seek(0)
279 cfp.truncate()
279 cfp.truncate()
280 subject = None
280 subject = None
281 elif hgpatchheader:
281 elif hgpatchheader:
282 if line.startswith('# User '):
282 if line.startswith('# User '):
283 data['user'] = line[7:]
283 data['user'] = line[7:]
284 ui.debug('From: %s\n' % data['user'])
284 ui.debug('From: %s\n' % data['user'])
285 elif line.startswith("# Parent "):
285 elif line.startswith("# Parent "):
286 parents.append(line[9:].lstrip())
286 parents.append(line[9:].lstrip())
287 elif line.startswith("# "):
287 elif line.startswith("# "):
288 for header, key in patchheadermap:
288 for header, key in patchheadermap:
289 prefix = '# %s ' % header
289 prefix = '# %s ' % header
290 if line.startswith(prefix):
290 if line.startswith(prefix):
291 data[key] = line[len(prefix):]
291 data[key] = line[len(prefix):]
292 else:
292 else:
293 hgpatchheader = False
293 hgpatchheader = False
294 elif line == '---':
294 elif line == '---':
295 ignoretext = True
295 ignoretext = True
296 if not hgpatchheader and not ignoretext:
296 if not hgpatchheader and not ignoretext:
297 cfp.write(line)
297 cfp.write(line)
298 cfp.write('\n')
298 cfp.write('\n')
299 message = cfp.getvalue()
299 message = cfp.getvalue()
300 if tmpfp:
300 if tmpfp:
301 tmpfp.write(payload)
301 tmpfp.write(payload)
302 if not payload.endswith('\n'):
302 if not payload.endswith('\n'):
303 tmpfp.write('\n')
303 tmpfp.write('\n')
304 elif not diffs_seen and message and content_type == 'text/plain':
304 elif not diffs_seen and message and content_type == 'text/plain':
305 message += '\n' + payload
305 message += '\n' + payload
306
306
307 if subject and not message.startswith(subject):
307 if subject and not message.startswith(subject):
308 message = '%s\n%s' % (subject, message)
308 message = '%s\n%s' % (subject, message)
309 data['message'] = message
309 data['message'] = message
310 tmpfp.close()
310 tmpfp.close()
311 if parents:
311 if parents:
312 data['p1'] = parents.pop(0)
312 data['p1'] = parents.pop(0)
313 if parents:
313 if parents:
314 data['p2'] = parents.pop(0)
314 data['p2'] = parents.pop(0)
315
315
316 if diffs_seen:
316 if diffs_seen:
317 data['filename'] = tmpname
317 data['filename'] = tmpname
318
318
319 return data
319 return data
320
320
321 class patchmeta(object):
321 class patchmeta(object):
322 """Patched file metadata
322 """Patched file metadata
323
323
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
324 'op' is the performed operation within ADD, DELETE, RENAME, MODIFY
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
325 or COPY. 'path' is patched file path. 'oldpath' is set to the
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
326 origin file when 'op' is either COPY or RENAME, None otherwise. If
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
327 file mode is changed, 'mode' is a tuple (islink, isexec) where
328 'islink' is True if the file is a symlink and 'isexec' is True if
328 'islink' is True if the file is a symlink and 'isexec' is True if
329 the file is executable. Otherwise, 'mode' is None.
329 the file is executable. Otherwise, 'mode' is None.
330 """
330 """
331 def __init__(self, path):
331 def __init__(self, path):
332 self.path = path
332 self.path = path
333 self.oldpath = None
333 self.oldpath = None
334 self.mode = None
334 self.mode = None
335 self.op = 'MODIFY'
335 self.op = 'MODIFY'
336 self.binary = False
336 self.binary = False
337
337
338 def setmode(self, mode):
338 def setmode(self, mode):
339 islink = mode & 0o20000
339 islink = mode & 0o20000
340 isexec = mode & 0o100
340 isexec = mode & 0o100
341 self.mode = (islink, isexec)
341 self.mode = (islink, isexec)
342
342
343 def copy(self):
343 def copy(self):
344 other = patchmeta(self.path)
344 other = patchmeta(self.path)
345 other.oldpath = self.oldpath
345 other.oldpath = self.oldpath
346 other.mode = self.mode
346 other.mode = self.mode
347 other.op = self.op
347 other.op = self.op
348 other.binary = self.binary
348 other.binary = self.binary
349 return other
349 return other
350
350
351 def _ispatchinga(self, afile):
351 def _ispatchinga(self, afile):
352 if afile == '/dev/null':
352 if afile == '/dev/null':
353 return self.op == 'ADD'
353 return self.op == 'ADD'
354 return afile == 'a/' + (self.oldpath or self.path)
354 return afile == 'a/' + (self.oldpath or self.path)
355
355
356 def _ispatchingb(self, bfile):
356 def _ispatchingb(self, bfile):
357 if bfile == '/dev/null':
357 if bfile == '/dev/null':
358 return self.op == 'DELETE'
358 return self.op == 'DELETE'
359 return bfile == 'b/' + self.path
359 return bfile == 'b/' + self.path
360
360
361 def ispatching(self, afile, bfile):
361 def ispatching(self, afile, bfile):
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
362 return self._ispatchinga(afile) and self._ispatchingb(bfile)
363
363
364 def __repr__(self):
364 def __repr__(self):
365 return r"<patchmeta %s %r>" % (self.op, self.path)
365 return r"<patchmeta %s %r>" % (self.op, self.path)
366
366
367 def readgitpatch(lr):
367 def readgitpatch(lr):
368 """extract git-style metadata about patches from <patchname>"""
368 """extract git-style metadata about patches from <patchname>"""
369
369
370 # Filter patch for git information
370 # Filter patch for git information
371 gp = None
371 gp = None
372 gitpatches = []
372 gitpatches = []
373 for line in lr:
373 for line in lr:
374 line = line.rstrip(' \r\n')
374 line = line.rstrip(' \r\n')
375 if line.startswith('diff --git a/'):
375 if line.startswith('diff --git a/'):
376 m = gitre.match(line)
376 m = gitre.match(line)
377 if m:
377 if m:
378 if gp:
378 if gp:
379 gitpatches.append(gp)
379 gitpatches.append(gp)
380 dst = m.group(2)
380 dst = m.group(2)
381 gp = patchmeta(dst)
381 gp = patchmeta(dst)
382 elif gp:
382 elif gp:
383 if line.startswith('--- '):
383 if line.startswith('--- '):
384 gitpatches.append(gp)
384 gitpatches.append(gp)
385 gp = None
385 gp = None
386 continue
386 continue
387 if line.startswith('rename from '):
387 if line.startswith('rename from '):
388 gp.op = 'RENAME'
388 gp.op = 'RENAME'
389 gp.oldpath = line[12:]
389 gp.oldpath = line[12:]
390 elif line.startswith('rename to '):
390 elif line.startswith('rename to '):
391 gp.path = line[10:]
391 gp.path = line[10:]
392 elif line.startswith('copy from '):
392 elif line.startswith('copy from '):
393 gp.op = 'COPY'
393 gp.op = 'COPY'
394 gp.oldpath = line[10:]
394 gp.oldpath = line[10:]
395 elif line.startswith('copy to '):
395 elif line.startswith('copy to '):
396 gp.path = line[8:]
396 gp.path = line[8:]
397 elif line.startswith('deleted file'):
397 elif line.startswith('deleted file'):
398 gp.op = 'DELETE'
398 gp.op = 'DELETE'
399 elif line.startswith('new file mode '):
399 elif line.startswith('new file mode '):
400 gp.op = 'ADD'
400 gp.op = 'ADD'
401 gp.setmode(int(line[-6:], 8))
401 gp.setmode(int(line[-6:], 8))
402 elif line.startswith('new mode '):
402 elif line.startswith('new mode '):
403 gp.setmode(int(line[-6:], 8))
403 gp.setmode(int(line[-6:], 8))
404 elif line.startswith('GIT binary patch'):
404 elif line.startswith('GIT binary patch'):
405 gp.binary = True
405 gp.binary = True
406 if gp:
406 if gp:
407 gitpatches.append(gp)
407 gitpatches.append(gp)
408
408
409 return gitpatches
409 return gitpatches
410
410
411 class linereader(object):
411 class linereader(object):
412 # simple class to allow pushing lines back into the input stream
412 # simple class to allow pushing lines back into the input stream
413 def __init__(self, fp):
413 def __init__(self, fp):
414 self.fp = fp
414 self.fp = fp
415 self.buf = []
415 self.buf = []
416
416
417 def push(self, line):
417 def push(self, line):
418 if line is not None:
418 if line is not None:
419 self.buf.append(line)
419 self.buf.append(line)
420
420
421 def readline(self):
421 def readline(self):
422 if self.buf:
422 if self.buf:
423 l = self.buf[0]
423 l = self.buf[0]
424 del self.buf[0]
424 del self.buf[0]
425 return l
425 return l
426 return self.fp.readline()
426 return self.fp.readline()
427
427
428 def __iter__(self):
428 def __iter__(self):
429 return iter(self.readline, '')
429 return iter(self.readline, '')
430
430
431 class abstractbackend(object):
431 class abstractbackend(object):
432 def __init__(self, ui):
432 def __init__(self, ui):
433 self.ui = ui
433 self.ui = ui
434
434
435 def getfile(self, fname):
435 def getfile(self, fname):
436 """Return target file data and flags as a (data, (islink,
436 """Return target file data and flags as a (data, (islink,
437 isexec)) tuple. Data is None if file is missing/deleted.
437 isexec)) tuple. Data is None if file is missing/deleted.
438 """
438 """
439 raise NotImplementedError
439 raise NotImplementedError
440
440
441 def setfile(self, fname, data, mode, copysource):
441 def setfile(self, fname, data, mode, copysource):
442 """Write data to target file fname and set its mode. mode is a
442 """Write data to target file fname and set its mode. mode is a
443 (islink, isexec) tuple. If data is None, the file content should
443 (islink, isexec) tuple. If data is None, the file content should
444 be left unchanged. If the file is modified after being copied,
444 be left unchanged. If the file is modified after being copied,
445 copysource is set to the original file name.
445 copysource is set to the original file name.
446 """
446 """
447 raise NotImplementedError
447 raise NotImplementedError
448
448
449 def unlink(self, fname):
449 def unlink(self, fname):
450 """Unlink target file."""
450 """Unlink target file."""
451 raise NotImplementedError
451 raise NotImplementedError
452
452
453 def writerej(self, fname, failed, total, lines):
453 def writerej(self, fname, failed, total, lines):
454 """Write rejected lines for fname. total is the number of hunks
454 """Write rejected lines for fname. total is the number of hunks
455 which failed to apply and total the total number of hunks for this
455 which failed to apply and total the total number of hunks for this
456 files.
456 files.
457 """
457 """
458
458
459 def exists(self, fname):
459 def exists(self, fname):
460 raise NotImplementedError
460 raise NotImplementedError
461
461
462 def close(self):
462 def close(self):
463 raise NotImplementedError
463 raise NotImplementedError
464
464
465 class fsbackend(abstractbackend):
465 class fsbackend(abstractbackend):
466 def __init__(self, ui, basedir):
466 def __init__(self, ui, basedir):
467 super(fsbackend, self).__init__(ui)
467 super(fsbackend, self).__init__(ui)
468 self.opener = vfsmod.vfs(basedir)
468 self.opener = vfsmod.vfs(basedir)
469
469
470 def getfile(self, fname):
470 def getfile(self, fname):
471 if self.opener.islink(fname):
471 if self.opener.islink(fname):
472 return (self.opener.readlink(fname), (True, False))
472 return (self.opener.readlink(fname), (True, False))
473
473
474 isexec = False
474 isexec = False
475 try:
475 try:
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
476 isexec = self.opener.lstat(fname).st_mode & 0o100 != 0
477 except OSError as e:
477 except OSError as e:
478 if e.errno != errno.ENOENT:
478 if e.errno != errno.ENOENT:
479 raise
479 raise
480 try:
480 try:
481 return (self.opener.read(fname), (False, isexec))
481 return (self.opener.read(fname), (False, isexec))
482 except IOError as e:
482 except IOError as e:
483 if e.errno != errno.ENOENT:
483 if e.errno != errno.ENOENT:
484 raise
484 raise
485 return None, None
485 return None, None
486
486
487 def setfile(self, fname, data, mode, copysource):
487 def setfile(self, fname, data, mode, copysource):
488 islink, isexec = mode
488 islink, isexec = mode
489 if data is None:
489 if data is None:
490 self.opener.setflags(fname, islink, isexec)
490 self.opener.setflags(fname, islink, isexec)
491 return
491 return
492 if islink:
492 if islink:
493 self.opener.symlink(data, fname)
493 self.opener.symlink(data, fname)
494 else:
494 else:
495 self.opener.write(fname, data)
495 self.opener.write(fname, data)
496 if isexec:
496 if isexec:
497 self.opener.setflags(fname, False, True)
497 self.opener.setflags(fname, False, True)
498
498
499 def unlink(self, fname):
499 def unlink(self, fname):
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
500 rmdir = self.ui.configbool('experimental', 'removeemptydirs')
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
501 self.opener.unlinkpath(fname, ignoremissing=True, rmdir=rmdir)
502
502
503 def writerej(self, fname, failed, total, lines):
503 def writerej(self, fname, failed, total, lines):
504 fname = fname + ".rej"
504 fname = fname + ".rej"
505 self.ui.warn(
505 self.ui.warn(
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
506 _("%d out of %d hunks FAILED -- saving rejects to file %s\n") %
507 (failed, total, fname))
507 (failed, total, fname))
508 fp = self.opener(fname, 'w')
508 fp = self.opener(fname, 'w')
509 fp.writelines(lines)
509 fp.writelines(lines)
510 fp.close()
510 fp.close()
511
511
512 def exists(self, fname):
512 def exists(self, fname):
513 return self.opener.lexists(fname)
513 return self.opener.lexists(fname)
514
514
515 class workingbackend(fsbackend):
515 class workingbackend(fsbackend):
516 def __init__(self, ui, repo, similarity):
516 def __init__(self, ui, repo, similarity):
517 super(workingbackend, self).__init__(ui, repo.root)
517 super(workingbackend, self).__init__(ui, repo.root)
518 self.repo = repo
518 self.repo = repo
519 self.similarity = similarity
519 self.similarity = similarity
520 self.removed = set()
520 self.removed = set()
521 self.changed = set()
521 self.changed = set()
522 self.copied = []
522 self.copied = []
523
523
524 def _checkknown(self, fname):
524 def _checkknown(self, fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
525 if self.repo.dirstate[fname] == '?' and self.exists(fname):
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
526 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
527
527
528 def setfile(self, fname, data, mode, copysource):
528 def setfile(self, fname, data, mode, copysource):
529 self._checkknown(fname)
529 self._checkknown(fname)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
530 super(workingbackend, self).setfile(fname, data, mode, copysource)
531 if copysource is not None:
531 if copysource is not None:
532 self.copied.append((copysource, fname))
532 self.copied.append((copysource, fname))
533 self.changed.add(fname)
533 self.changed.add(fname)
534
534
535 def unlink(self, fname):
535 def unlink(self, fname):
536 self._checkknown(fname)
536 self._checkknown(fname)
537 super(workingbackend, self).unlink(fname)
537 super(workingbackend, self).unlink(fname)
538 self.removed.add(fname)
538 self.removed.add(fname)
539 self.changed.add(fname)
539 self.changed.add(fname)
540
540
541 def close(self):
541 def close(self):
542 wctx = self.repo[None]
542 wctx = self.repo[None]
543 changed = set(self.changed)
543 changed = set(self.changed)
544 for src, dst in self.copied:
544 for src, dst in self.copied:
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
545 scmutil.dirstatecopy(self.ui, self.repo, wctx, src, dst)
546 if self.removed:
546 if self.removed:
547 wctx.forget(sorted(self.removed))
547 wctx.forget(sorted(self.removed))
548 for f in self.removed:
548 for f in self.removed:
549 if f not in self.repo.dirstate:
549 if f not in self.repo.dirstate:
550 # File was deleted and no longer belongs to the
550 # File was deleted and no longer belongs to the
551 # dirstate, it was probably marked added then
551 # dirstate, it was probably marked added then
552 # deleted, and should not be considered by
552 # deleted, and should not be considered by
553 # marktouched().
553 # marktouched().
554 changed.discard(f)
554 changed.discard(f)
555 if changed:
555 if changed:
556 scmutil.marktouched(self.repo, changed, self.similarity)
556 scmutil.marktouched(self.repo, changed, self.similarity)
557 return sorted(self.changed)
557 return sorted(self.changed)
558
558
559 class filestore(object):
559 class filestore(object):
560 def __init__(self, maxsize=None):
560 def __init__(self, maxsize=None):
561 self.opener = None
561 self.opener = None
562 self.files = {}
562 self.files = {}
563 self.created = 0
563 self.created = 0
564 self.maxsize = maxsize
564 self.maxsize = maxsize
565 if self.maxsize is None:
565 if self.maxsize is None:
566 self.maxsize = 4*(2**20)
566 self.maxsize = 4*(2**20)
567 self.size = 0
567 self.size = 0
568 self.data = {}
568 self.data = {}
569
569
570 def setfile(self, fname, data, mode, copied=None):
570 def setfile(self, fname, data, mode, copied=None):
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
571 if self.maxsize < 0 or (len(data) + self.size) <= self.maxsize:
572 self.data[fname] = (data, mode, copied)
572 self.data[fname] = (data, mode, copied)
573 self.size += len(data)
573 self.size += len(data)
574 else:
574 else:
575 if self.opener is None:
575 if self.opener is None:
576 root = pycompat.mkdtemp(prefix='hg-patch-')
576 root = pycompat.mkdtemp(prefix='hg-patch-')
577 self.opener = vfsmod.vfs(root)
577 self.opener = vfsmod.vfs(root)
578 # Avoid filename issues with these simple names
578 # Avoid filename issues with these simple names
579 fn = '%d' % self.created
579 fn = '%d' % self.created
580 self.opener.write(fn, data)
580 self.opener.write(fn, data)
581 self.created += 1
581 self.created += 1
582 self.files[fname] = (fn, mode, copied)
582 self.files[fname] = (fn, mode, copied)
583
583
584 def getfile(self, fname):
584 def getfile(self, fname):
585 if fname in self.data:
585 if fname in self.data:
586 return self.data[fname]
586 return self.data[fname]
587 if not self.opener or fname not in self.files:
587 if not self.opener or fname not in self.files:
588 return None, None, None
588 return None, None, None
589 fn, mode, copied = self.files[fname]
589 fn, mode, copied = self.files[fname]
590 return self.opener.read(fn), mode, copied
590 return self.opener.read(fn), mode, copied
591
591
592 def close(self):
592 def close(self):
593 if self.opener:
593 if self.opener:
594 shutil.rmtree(self.opener.base)
594 shutil.rmtree(self.opener.base)
595
595
596 class repobackend(abstractbackend):
596 class repobackend(abstractbackend):
597 def __init__(self, ui, repo, ctx, store):
597 def __init__(self, ui, repo, ctx, store):
598 super(repobackend, self).__init__(ui)
598 super(repobackend, self).__init__(ui)
599 self.repo = repo
599 self.repo = repo
600 self.ctx = ctx
600 self.ctx = ctx
601 self.store = store
601 self.store = store
602 self.changed = set()
602 self.changed = set()
603 self.removed = set()
603 self.removed = set()
604 self.copied = {}
604 self.copied = {}
605
605
606 def _checkknown(self, fname):
606 def _checkknown(self, fname):
607 if fname not in self.ctx:
607 if fname not in self.ctx:
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
608 raise PatchError(_('cannot patch %s: file is not tracked') % fname)
609
609
610 def getfile(self, fname):
610 def getfile(self, fname):
611 try:
611 try:
612 fctx = self.ctx[fname]
612 fctx = self.ctx[fname]
613 except error.LookupError:
613 except error.LookupError:
614 return None, None
614 return None, None
615 flags = fctx.flags()
615 flags = fctx.flags()
616 return fctx.data(), ('l' in flags, 'x' in flags)
616 return fctx.data(), ('l' in flags, 'x' in flags)
617
617
618 def setfile(self, fname, data, mode, copysource):
618 def setfile(self, fname, data, mode, copysource):
619 if copysource:
619 if copysource:
620 self._checkknown(copysource)
620 self._checkknown(copysource)
621 if data is None:
621 if data is None:
622 data = self.ctx[fname].data()
622 data = self.ctx[fname].data()
623 self.store.setfile(fname, data, mode, copysource)
623 self.store.setfile(fname, data, mode, copysource)
624 self.changed.add(fname)
624 self.changed.add(fname)
625 if copysource:
625 if copysource:
626 self.copied[fname] = copysource
626 self.copied[fname] = copysource
627
627
628 def unlink(self, fname):
628 def unlink(self, fname):
629 self._checkknown(fname)
629 self._checkknown(fname)
630 self.removed.add(fname)
630 self.removed.add(fname)
631
631
632 def exists(self, fname):
632 def exists(self, fname):
633 return fname in self.ctx
633 return fname in self.ctx
634
634
635 def close(self):
635 def close(self):
636 return self.changed | self.removed
636 return self.changed | self.removed
637
637
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
638 # @@ -start,len +start,len @@ or @@ -start +start @@ if len is 1
639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
639 unidesc = re.compile(br'@@ -(\d+)(?:,(\d+))? \+(\d+)(?:,(\d+))? @@')
640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
640 contextdesc = re.compile(br'(?:---|\*\*\*) (\d+)(?:,(\d+))? (?:---|\*\*\*)')
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
641 eolmodes = ['strict', 'crlf', 'lf', 'auto']
642
642
643 class patchfile(object):
643 class patchfile(object):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
644 def __init__(self, ui, gp, backend, store, eolmode='strict'):
645 self.fname = gp.path
645 self.fname = gp.path
646 self.eolmode = eolmode
646 self.eolmode = eolmode
647 self.eol = None
647 self.eol = None
648 self.backend = backend
648 self.backend = backend
649 self.ui = ui
649 self.ui = ui
650 self.lines = []
650 self.lines = []
651 self.exists = False
651 self.exists = False
652 self.missing = True
652 self.missing = True
653 self.mode = gp.mode
653 self.mode = gp.mode
654 self.copysource = gp.oldpath
654 self.copysource = gp.oldpath
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
655 self.create = gp.op in ('ADD', 'COPY', 'RENAME')
656 self.remove = gp.op == 'DELETE'
656 self.remove = gp.op == 'DELETE'
657 if self.copysource is None:
657 if self.copysource is None:
658 data, mode = backend.getfile(self.fname)
658 data, mode = backend.getfile(self.fname)
659 else:
659 else:
660 data, mode = store.getfile(self.copysource)[:2]
660 data, mode = store.getfile(self.copysource)[:2]
661 if data is not None:
661 if data is not None:
662 self.exists = self.copysource is None or backend.exists(self.fname)
662 self.exists = self.copysource is None or backend.exists(self.fname)
663 self.missing = False
663 self.missing = False
664 if data:
664 if data:
665 self.lines = mdiff.splitnewlines(data)
665 self.lines = mdiff.splitnewlines(data)
666 if self.mode is None:
666 if self.mode is None:
667 self.mode = mode
667 self.mode = mode
668 if self.lines:
668 if self.lines:
669 # Normalize line endings
669 # Normalize line endings
670 if self.lines[0].endswith('\r\n'):
670 if self.lines[0].endswith('\r\n'):
671 self.eol = '\r\n'
671 self.eol = '\r\n'
672 elif self.lines[0].endswith('\n'):
672 elif self.lines[0].endswith('\n'):
673 self.eol = '\n'
673 self.eol = '\n'
674 if eolmode != 'strict':
674 if eolmode != 'strict':
675 nlines = []
675 nlines = []
676 for l in self.lines:
676 for l in self.lines:
677 if l.endswith('\r\n'):
677 if l.endswith('\r\n'):
678 l = l[:-2] + '\n'
678 l = l[:-2] + '\n'
679 nlines.append(l)
679 nlines.append(l)
680 self.lines = nlines
680 self.lines = nlines
681 else:
681 else:
682 if self.create:
682 if self.create:
683 self.missing = False
683 self.missing = False
684 if self.mode is None:
684 if self.mode is None:
685 self.mode = (False, False)
685 self.mode = (False, False)
686 if self.missing:
686 if self.missing:
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
687 self.ui.warn(_("unable to find '%s' for patching\n") % self.fname)
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
688 self.ui.warn(_("(use '--prefix' to apply patch relative to the "
689 "current directory)\n"))
689 "current directory)\n"))
690
690
691 self.hash = {}
691 self.hash = {}
692 self.dirty = 0
692 self.dirty = 0
693 self.offset = 0
693 self.offset = 0
694 self.skew = 0
694 self.skew = 0
695 self.rej = []
695 self.rej = []
696 self.fileprinted = False
696 self.fileprinted = False
697 self.printfile(False)
697 self.printfile(False)
698 self.hunks = 0
698 self.hunks = 0
699
699
700 def writelines(self, fname, lines, mode):
700 def writelines(self, fname, lines, mode):
701 if self.eolmode == 'auto':
701 if self.eolmode == 'auto':
702 eol = self.eol
702 eol = self.eol
703 elif self.eolmode == 'crlf':
703 elif self.eolmode == 'crlf':
704 eol = '\r\n'
704 eol = '\r\n'
705 else:
705 else:
706 eol = '\n'
706 eol = '\n'
707
707
708 if self.eolmode != 'strict' and eol and eol != '\n':
708 if self.eolmode != 'strict' and eol and eol != '\n':
709 rawlines = []
709 rawlines = []
710 for l in lines:
710 for l in lines:
711 if l and l.endswith('\n'):
711 if l and l.endswith('\n'):
712 l = l[:-1] + eol
712 l = l[:-1] + eol
713 rawlines.append(l)
713 rawlines.append(l)
714 lines = rawlines
714 lines = rawlines
715
715
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
716 self.backend.setfile(fname, ''.join(lines), mode, self.copysource)
717
717
718 def printfile(self, warn):
718 def printfile(self, warn):
719 if self.fileprinted:
719 if self.fileprinted:
720 return
720 return
721 if warn or self.ui.verbose:
721 if warn or self.ui.verbose:
722 self.fileprinted = True
722 self.fileprinted = True
723 s = _("patching file %s\n") % self.fname
723 s = _("patching file %s\n") % self.fname
724 if warn:
724 if warn:
725 self.ui.warn(s)
725 self.ui.warn(s)
726 else:
726 else:
727 self.ui.note(s)
727 self.ui.note(s)
728
728
729
729
730 def findlines(self, l, linenum):
730 def findlines(self, l, linenum):
731 # looks through the hash and finds candidate lines. The
731 # looks through the hash and finds candidate lines. The
732 # result is a list of line numbers sorted based on distance
732 # result is a list of line numbers sorted based on distance
733 # from linenum
733 # from linenum
734
734
735 cand = self.hash.get(l, [])
735 cand = self.hash.get(l, [])
736 if len(cand) > 1:
736 if len(cand) > 1:
737 # resort our list of potentials forward then back.
737 # resort our list of potentials forward then back.
738 cand.sort(key=lambda x: abs(x - linenum))
738 cand.sort(key=lambda x: abs(x - linenum))
739 return cand
739 return cand
740
740
741 def write_rej(self):
741 def write_rej(self):
742 # our rejects are a little different from patch(1). This always
742 # our rejects are a little different from patch(1). This always
743 # creates rejects in the same form as the original patch. A file
743 # creates rejects in the same form as the original patch. A file
744 # header is inserted so that you can run the reject through patch again
744 # header is inserted so that you can run the reject through patch again
745 # without having to type the filename.
745 # without having to type the filename.
746 if not self.rej:
746 if not self.rej:
747 return
747 return
748 base = os.path.basename(self.fname)
748 base = os.path.basename(self.fname)
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
749 lines = ["--- %s\n+++ %s\n" % (base, base)]
750 for x in self.rej:
750 for x in self.rej:
751 for l in x.hunk:
751 for l in x.hunk:
752 lines.append(l)
752 lines.append(l)
753 if l[-1:] != '\n':
753 if l[-1:] != '\n':
754 lines.append("\n\\ No newline at end of file\n")
754 lines.append("\n\\ No newline at end of file\n")
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
755 self.backend.writerej(self.fname, len(self.rej), self.hunks, lines)
756
756
757 def apply(self, h):
757 def apply(self, h):
758 if not h.complete():
758 if not h.complete():
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
759 raise PatchError(_("bad hunk #%d %s (%d %d %d %d)") %
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
760 (h.number, h.desc, len(h.a), h.lena, len(h.b),
761 h.lenb))
761 h.lenb))
762
762
763 self.hunks += 1
763 self.hunks += 1
764
764
765 if self.missing:
765 if self.missing:
766 self.rej.append(h)
766 self.rej.append(h)
767 return -1
767 return -1
768
768
769 if self.exists and self.create:
769 if self.exists and self.create:
770 if self.copysource:
770 if self.copysource:
771 self.ui.warn(_("cannot create %s: destination already "
771 self.ui.warn(_("cannot create %s: destination already "
772 "exists\n") % self.fname)
772 "exists\n") % self.fname)
773 else:
773 else:
774 self.ui.warn(_("file %s already exists\n") % self.fname)
774 self.ui.warn(_("file %s already exists\n") % self.fname)
775 self.rej.append(h)
775 self.rej.append(h)
776 return -1
776 return -1
777
777
778 if isinstance(h, binhunk):
778 if isinstance(h, binhunk):
779 if self.remove:
779 if self.remove:
780 self.backend.unlink(self.fname)
780 self.backend.unlink(self.fname)
781 else:
781 else:
782 l = h.new(self.lines)
782 l = h.new(self.lines)
783 self.lines[:] = l
783 self.lines[:] = l
784 self.offset += len(l)
784 self.offset += len(l)
785 self.dirty = True
785 self.dirty = True
786 return 0
786 return 0
787
787
788 horig = h
788 horig = h
789 if (self.eolmode in ('crlf', 'lf')
789 if (self.eolmode in ('crlf', 'lf')
790 or self.eolmode == 'auto' and self.eol):
790 or self.eolmode == 'auto' and self.eol):
791 # If new eols are going to be normalized, then normalize
791 # If new eols are going to be normalized, then normalize
792 # hunk data before patching. Otherwise, preserve input
792 # hunk data before patching. Otherwise, preserve input
793 # line-endings.
793 # line-endings.
794 h = h.getnormalized()
794 h = h.getnormalized()
795
795
796 # fast case first, no offsets, no fuzz
796 # fast case first, no offsets, no fuzz
797 old, oldstart, new, newstart = h.fuzzit(0, False)
797 old, oldstart, new, newstart = h.fuzzit(0, False)
798 oldstart += self.offset
798 oldstart += self.offset
799 orig_start = oldstart
799 orig_start = oldstart
800 # if there's skew we want to emit the "(offset %d lines)" even
800 # if there's skew we want to emit the "(offset %d lines)" even
801 # when the hunk cleanly applies at start + skew, so skip the
801 # when the hunk cleanly applies at start + skew, so skip the
802 # fast case code
802 # fast case code
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
803 if self.skew == 0 and diffhelper.testhunk(old, self.lines, oldstart):
804 if self.remove:
804 if self.remove:
805 self.backend.unlink(self.fname)
805 self.backend.unlink(self.fname)
806 else:
806 else:
807 self.lines[oldstart:oldstart + len(old)] = new
807 self.lines[oldstart:oldstart + len(old)] = new
808 self.offset += len(new) - len(old)
808 self.offset += len(new) - len(old)
809 self.dirty = True
809 self.dirty = True
810 return 0
810 return 0
811
811
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
812 # ok, we couldn't match the hunk. Lets look for offsets and fuzz it
813 self.hash = {}
813 self.hash = {}
814 for x, s in enumerate(self.lines):
814 for x, s in enumerate(self.lines):
815 self.hash.setdefault(s, []).append(x)
815 self.hash.setdefault(s, []).append(x)
816
816
817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
817 for fuzzlen in pycompat.xrange(self.ui.configint("patch", "fuzz") + 1):
818 for toponly in [True, False]:
818 for toponly in [True, False]:
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
819 old, oldstart, new, newstart = h.fuzzit(fuzzlen, toponly)
820 oldstart = oldstart + self.offset + self.skew
820 oldstart = oldstart + self.offset + self.skew
821 oldstart = min(oldstart, len(self.lines))
821 oldstart = min(oldstart, len(self.lines))
822 if old:
822 if old:
823 cand = self.findlines(old[0][1:], oldstart)
823 cand = self.findlines(old[0][1:], oldstart)
824 else:
824 else:
825 # Only adding lines with no or fuzzed context, just
825 # Only adding lines with no or fuzzed context, just
826 # take the skew in account
826 # take the skew in account
827 cand = [oldstart]
827 cand = [oldstart]
828
828
829 for l in cand:
829 for l in cand:
830 if not old or diffhelper.testhunk(old, self.lines, l):
830 if not old or diffhelper.testhunk(old, self.lines, l):
831 self.lines[l : l + len(old)] = new
831 self.lines[l : l + len(old)] = new
832 self.offset += len(new) - len(old)
832 self.offset += len(new) - len(old)
833 self.skew = l - orig_start
833 self.skew = l - orig_start
834 self.dirty = True
834 self.dirty = True
835 offset = l - orig_start - fuzzlen
835 offset = l - orig_start - fuzzlen
836 if fuzzlen:
836 if fuzzlen:
837 msg = _("Hunk #%d succeeded at %d "
837 msg = _("Hunk #%d succeeded at %d "
838 "with fuzz %d "
838 "with fuzz %d "
839 "(offset %d lines).\n")
839 "(offset %d lines).\n")
840 self.printfile(True)
840 self.printfile(True)
841 self.ui.warn(msg %
841 self.ui.warn(msg %
842 (h.number, l + 1, fuzzlen, offset))
842 (h.number, l + 1, fuzzlen, offset))
843 else:
843 else:
844 msg = _("Hunk #%d succeeded at %d "
844 msg = _("Hunk #%d succeeded at %d "
845 "(offset %d lines).\n")
845 "(offset %d lines).\n")
846 self.ui.note(msg % (h.number, l + 1, offset))
846 self.ui.note(msg % (h.number, l + 1, offset))
847 return fuzzlen
847 return fuzzlen
848 self.printfile(True)
848 self.printfile(True)
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
849 self.ui.warn(_("Hunk #%d FAILED at %d\n") % (h.number, orig_start))
850 self.rej.append(horig)
850 self.rej.append(horig)
851 return -1
851 return -1
852
852
853 def close(self):
853 def close(self):
854 if self.dirty:
854 if self.dirty:
855 self.writelines(self.fname, self.lines, self.mode)
855 self.writelines(self.fname, self.lines, self.mode)
856 self.write_rej()
856 self.write_rej()
857 return len(self.rej)
857 return len(self.rej)
858
858
859 class header(object):
859 class header(object):
860 """patch header
860 """patch header
861 """
861 """
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
862 diffgit_re = re.compile('diff --git a/(.*) b/(.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
863 diff_re = re.compile('diff -r .* (.*)$')
864 allhunks_re = re.compile('(?:index|deleted file) ')
864 allhunks_re = re.compile('(?:index|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
865 pretty_re = re.compile('(?:new file|deleted file) ')
866 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ')
866 special_re = re.compile('(?:index|deleted|copy|rename|new mode) ')
867 newfile_re = re.compile('(?:new file|copy to)')
867 newfile_re = re.compile('(?:new file|copy to|rename to)')
868
868
869 def __init__(self, header):
869 def __init__(self, header):
870 self.header = header
870 self.header = header
871 self.hunks = []
871 self.hunks = []
872
872
873 def binary(self):
873 def binary(self):
874 return any(h.startswith('index ') for h in self.header)
874 return any(h.startswith('index ') for h in self.header)
875
875
876 def pretty(self, fp):
876 def pretty(self, fp):
877 for h in self.header:
877 for h in self.header:
878 if h.startswith('index '):
878 if h.startswith('index '):
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
879 fp.write(_('this modifies a binary file (all or nothing)\n'))
880 break
880 break
881 if self.pretty_re.match(h):
881 if self.pretty_re.match(h):
882 fp.write(h)
882 fp.write(h)
883 if self.binary():
883 if self.binary():
884 fp.write(_('this is a binary file\n'))
884 fp.write(_('this is a binary file\n'))
885 break
885 break
886 if h.startswith('---'):
886 if h.startswith('---'):
887 fp.write(_('%d hunks, %d lines changed\n') %
887 fp.write(_('%d hunks, %d lines changed\n') %
888 (len(self.hunks),
888 (len(self.hunks),
889 sum([max(h.added, h.removed) for h in self.hunks])))
889 sum([max(h.added, h.removed) for h in self.hunks])))
890 break
890 break
891 fp.write(h)
891 fp.write(h)
892
892
893 def write(self, fp):
893 def write(self, fp):
894 fp.write(''.join(self.header))
894 fp.write(''.join(self.header))
895
895
896 def allhunks(self):
896 def allhunks(self):
897 return any(self.allhunks_re.match(h) for h in self.header)
897 return any(self.allhunks_re.match(h) for h in self.header)
898
898
899 def files(self):
899 def files(self):
900 match = self.diffgit_re.match(self.header[0])
900 match = self.diffgit_re.match(self.header[0])
901 if match:
901 if match:
902 fromfile, tofile = match.groups()
902 fromfile, tofile = match.groups()
903 if fromfile == tofile:
903 if fromfile == tofile:
904 return [fromfile]
904 return [fromfile]
905 return [fromfile, tofile]
905 return [fromfile, tofile]
906 else:
906 else:
907 return self.diff_re.match(self.header[0]).groups()
907 return self.diff_re.match(self.header[0]).groups()
908
908
909 def filename(self):
909 def filename(self):
910 return self.files()[-1]
910 return self.files()[-1]
911
911
912 def __repr__(self):
912 def __repr__(self):
913 return '<header %s>' % (' '.join(map(repr, self.files())))
913 return '<header %s>' % (' '.join(map(repr, self.files())))
914
914
915 def isnewfile(self):
915 def isnewfile(self):
916 return any(self.newfile_re.match(h) for h in self.header)
916 return any(self.newfile_re.match(h) for h in self.header)
917
917
918 def special(self):
918 def special(self):
919 # Special files are shown only at the header level and not at the hunk
919 # Special files are shown only at the header level and not at the hunk
920 # level for example a file that has been deleted is a special file.
920 # level for example a file that has been deleted is a special file.
921 # The user cannot change the content of the operation, in the case of
921 # The user cannot change the content of the operation, in the case of
922 # the deleted file he has to take the deletion or not take it, he
922 # the deleted file he has to take the deletion or not take it, he
923 # cannot take some of it.
923 # cannot take some of it.
924 # Newly added files are special if they are empty, they are not special
924 # Newly added files are special if they are empty, they are not special
925 # if they have some content as we want to be able to change it
925 # if they have some content as we want to be able to change it
926 nocontent = len(self.header) == 2
926 nocontent = len(self.header) == 2
927 emptynewfile = self.isnewfile() and nocontent
927 emptynewfile = self.isnewfile() and nocontent
928 return (emptynewfile
928 return (emptynewfile
929 or any(self.special_re.match(h) for h in self.header))
929 or any(self.special_re.match(h) for h in self.header))
930
930
931 class recordhunk(object):
931 class recordhunk(object):
932 """patch hunk
932 """patch hunk
933
933
934 XXX shouldn't we merge this with the other hunk class?
934 XXX shouldn't we merge this with the other hunk class?
935 """
935 """
936
936
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
937 def __init__(self, header, fromline, toline, proc, before, hunk, after,
938 maxcontext=None):
938 maxcontext=None):
939 def trimcontext(lines, reverse=False):
939 def trimcontext(lines, reverse=False):
940 if maxcontext is not None:
940 if maxcontext is not None:
941 delta = len(lines) - maxcontext
941 delta = len(lines) - maxcontext
942 if delta > 0:
942 if delta > 0:
943 if reverse:
943 if reverse:
944 return delta, lines[delta:]
944 return delta, lines[delta:]
945 else:
945 else:
946 return delta, lines[:maxcontext]
946 return delta, lines[:maxcontext]
947 return 0, lines
947 return 0, lines
948
948
949 self.header = header
949 self.header = header
950 trimedbefore, self.before = trimcontext(before, True)
950 trimedbefore, self.before = trimcontext(before, True)
951 self.fromline = fromline + trimedbefore
951 self.fromline = fromline + trimedbefore
952 self.toline = toline + trimedbefore
952 self.toline = toline + trimedbefore
953 _trimedafter, self.after = trimcontext(after, False)
953 _trimedafter, self.after = trimcontext(after, False)
954 self.proc = proc
954 self.proc = proc
955 self.hunk = hunk
955 self.hunk = hunk
956 self.added, self.removed = self.countchanges(self.hunk)
956 self.added, self.removed = self.countchanges(self.hunk)
957
957
958 def __eq__(self, v):
958 def __eq__(self, v):
959 if not isinstance(v, recordhunk):
959 if not isinstance(v, recordhunk):
960 return False
960 return False
961
961
962 return ((v.hunk == self.hunk) and
962 return ((v.hunk == self.hunk) and
963 (v.proc == self.proc) and
963 (v.proc == self.proc) and
964 (self.fromline == v.fromline) and
964 (self.fromline == v.fromline) and
965 (self.header.files() == v.header.files()))
965 (self.header.files() == v.header.files()))
966
966
967 def __hash__(self):
967 def __hash__(self):
968 return hash((tuple(self.hunk),
968 return hash((tuple(self.hunk),
969 tuple(self.header.files()),
969 tuple(self.header.files()),
970 self.fromline,
970 self.fromline,
971 self.proc))
971 self.proc))
972
972
973 def countchanges(self, hunk):
973 def countchanges(self, hunk):
974 """hunk -> (n+,n-)"""
974 """hunk -> (n+,n-)"""
975 add = len([h for h in hunk if h.startswith('+')])
975 add = len([h for h in hunk if h.startswith('+')])
976 rem = len([h for h in hunk if h.startswith('-')])
976 rem = len([h for h in hunk if h.startswith('-')])
977 return add, rem
977 return add, rem
978
978
979 def reversehunk(self):
979 def reversehunk(self):
980 """return another recordhunk which is the reverse of the hunk
980 """return another recordhunk which is the reverse of the hunk
981
981
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
982 If this hunk is diff(A, B), the returned hunk is diff(B, A). To do
983 that, swap fromline/toline and +/- signs while keep other things
983 that, swap fromline/toline and +/- signs while keep other things
984 unchanged.
984 unchanged.
985 """
985 """
986 m = {'+': '-', '-': '+', '\\': '\\'}
986 m = {'+': '-', '-': '+', '\\': '\\'}
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
987 hunk = ['%s%s' % (m[l[0:1]], l[1:]) for l in self.hunk]
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
988 return recordhunk(self.header, self.toline, self.fromline, self.proc,
989 self.before, hunk, self.after)
989 self.before, hunk, self.after)
990
990
991 def write(self, fp):
991 def write(self, fp):
992 delta = len(self.before) + len(self.after)
992 delta = len(self.before) + len(self.after)
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
993 if self.after and self.after[-1] == '\\ No newline at end of file\n':
994 delta -= 1
994 delta -= 1
995 fromlen = delta + self.removed
995 fromlen = delta + self.removed
996 tolen = delta + self.added
996 tolen = delta + self.added
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
997 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
998 (self.fromline, fromlen, self.toline, tolen,
998 (self.fromline, fromlen, self.toline, tolen,
999 self.proc and (' ' + self.proc)))
999 self.proc and (' ' + self.proc)))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1000 fp.write(''.join(self.before + self.hunk + self.after))
1001
1001
1002 pretty = write
1002 pretty = write
1003
1003
1004 def filename(self):
1004 def filename(self):
1005 return self.header.filename()
1005 return self.header.filename()
1006
1006
1007 def __repr__(self):
1007 def __repr__(self):
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1008 return '<hunk %r@%d>' % (self.filename(), self.fromline)
1009
1009
1010 def getmessages():
1010 def getmessages():
1011 return {
1011 return {
1012 'multiple': {
1012 'multiple': {
1013 'apply': _("apply change %d/%d to '%s'?"),
1013 'apply': _("apply change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1014 'discard': _("discard change %d/%d to '%s'?"),
1015 'keep': _("keep change %d/%d to '%s'?"),
1015 'keep': _("keep change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1016 'record': _("record change %d/%d to '%s'?"),
1017 },
1017 },
1018 'single': {
1018 'single': {
1019 'apply': _("apply this change to '%s'?"),
1019 'apply': _("apply this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1020 'discard': _("discard this change to '%s'?"),
1021 'keep': _("keep this change to '%s'?"),
1021 'keep': _("keep this change to '%s'?"),
1022 'record': _("record this change to '%s'?"),
1022 'record': _("record this change to '%s'?"),
1023 },
1023 },
1024 'help': {
1024 'help': {
1025 'apply': _('[Ynesfdaq?]'
1025 'apply': _('[Ynesfdaq?]'
1026 '$$ &Yes, apply this change'
1026 '$$ &Yes, apply this change'
1027 '$$ &No, skip this change'
1027 '$$ &No, skip this change'
1028 '$$ &Edit this change manually'
1028 '$$ &Edit this change manually'
1029 '$$ &Skip remaining changes to this file'
1029 '$$ &Skip remaining changes to this file'
1030 '$$ Apply remaining changes to this &file'
1030 '$$ Apply remaining changes to this &file'
1031 '$$ &Done, skip remaining changes and files'
1031 '$$ &Done, skip remaining changes and files'
1032 '$$ Apply &all changes to all remaining files'
1032 '$$ Apply &all changes to all remaining files'
1033 '$$ &Quit, applying no changes'
1033 '$$ &Quit, applying no changes'
1034 '$$ &? (display help)'),
1034 '$$ &? (display help)'),
1035 'discard': _('[Ynesfdaq?]'
1035 'discard': _('[Ynesfdaq?]'
1036 '$$ &Yes, discard this change'
1036 '$$ &Yes, discard this change'
1037 '$$ &No, skip this change'
1037 '$$ &No, skip this change'
1038 '$$ &Edit this change manually'
1038 '$$ &Edit this change manually'
1039 '$$ &Skip remaining changes to this file'
1039 '$$ &Skip remaining changes to this file'
1040 '$$ Discard remaining changes to this &file'
1040 '$$ Discard remaining changes to this &file'
1041 '$$ &Done, skip remaining changes and files'
1041 '$$ &Done, skip remaining changes and files'
1042 '$$ Discard &all changes to all remaining files'
1042 '$$ Discard &all changes to all remaining files'
1043 '$$ &Quit, discarding no changes'
1043 '$$ &Quit, discarding no changes'
1044 '$$ &? (display help)'),
1044 '$$ &? (display help)'),
1045 'keep': _('[Ynesfdaq?]'
1045 'keep': _('[Ynesfdaq?]'
1046 '$$ &Yes, keep this change'
1046 '$$ &Yes, keep this change'
1047 '$$ &No, skip this change'
1047 '$$ &No, skip this change'
1048 '$$ &Edit this change manually'
1048 '$$ &Edit this change manually'
1049 '$$ &Skip remaining changes to this file'
1049 '$$ &Skip remaining changes to this file'
1050 '$$ Keep remaining changes to this &file'
1050 '$$ Keep remaining changes to this &file'
1051 '$$ &Done, skip remaining changes and files'
1051 '$$ &Done, skip remaining changes and files'
1052 '$$ Keep &all changes to all remaining files'
1052 '$$ Keep &all changes to all remaining files'
1053 '$$ &Quit, keeping all changes'
1053 '$$ &Quit, keeping all changes'
1054 '$$ &? (display help)'),
1054 '$$ &? (display help)'),
1055 'record': _('[Ynesfdaq?]'
1055 'record': _('[Ynesfdaq?]'
1056 '$$ &Yes, record this change'
1056 '$$ &Yes, record this change'
1057 '$$ &No, skip this change'
1057 '$$ &No, skip this change'
1058 '$$ &Edit this change manually'
1058 '$$ &Edit this change manually'
1059 '$$ &Skip remaining changes to this file'
1059 '$$ &Skip remaining changes to this file'
1060 '$$ Record remaining changes to this &file'
1060 '$$ Record remaining changes to this &file'
1061 '$$ &Done, skip remaining changes and files'
1061 '$$ &Done, skip remaining changes and files'
1062 '$$ Record &all changes to all remaining files'
1062 '$$ Record &all changes to all remaining files'
1063 '$$ &Quit, recording no changes'
1063 '$$ &Quit, recording no changes'
1064 '$$ &? (display help)'),
1064 '$$ &? (display help)'),
1065 }
1065 }
1066 }
1066 }
1067
1067
1068 def filterpatch(ui, headers, match, operation=None):
1068 def filterpatch(ui, headers, match, operation=None):
1069 """Interactively filter patch chunks into applied-only chunks"""
1069 """Interactively filter patch chunks into applied-only chunks"""
1070 messages = getmessages()
1070 messages = getmessages()
1071
1071
1072 if operation is None:
1072 if operation is None:
1073 operation = 'record'
1073 operation = 'record'
1074
1074
1075 def prompt(skipfile, skipall, query, chunk):
1075 def prompt(skipfile, skipall, query, chunk):
1076 """prompt query, and process base inputs
1076 """prompt query, and process base inputs
1077
1077
1078 - y/n for the rest of file
1078 - y/n for the rest of file
1079 - y/n for the rest
1079 - y/n for the rest
1080 - ? (help)
1080 - ? (help)
1081 - q (quit)
1081 - q (quit)
1082
1082
1083 Return True/False and possibly updated skipfile and skipall.
1083 Return True/False and possibly updated skipfile and skipall.
1084 """
1084 """
1085 newpatches = None
1085 newpatches = None
1086 if skipall is not None:
1086 if skipall is not None:
1087 return skipall, skipfile, skipall, newpatches
1087 return skipall, skipfile, skipall, newpatches
1088 if skipfile is not None:
1088 if skipfile is not None:
1089 return skipfile, skipfile, skipall, newpatches
1089 return skipfile, skipfile, skipall, newpatches
1090 while True:
1090 while True:
1091 resps = messages['help'][operation]
1091 resps = messages['help'][operation]
1092 # IMPORTANT: keep the last line of this prompt short (<40 english
1092 # IMPORTANT: keep the last line of this prompt short (<40 english
1093 # chars is a good target) because of issue6158.
1093 # chars is a good target) because of issue6158.
1094 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps))
1094 r = ui.promptchoice("%s\n(enter ? for help) %s" % (query, resps))
1095 ui.write("\n")
1095 ui.write("\n")
1096 if r == 8: # ?
1096 if r == 8: # ?
1097 for c, t in ui.extractchoices(resps)[1]:
1097 for c, t in ui.extractchoices(resps)[1]:
1098 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1098 ui.write('%s - %s\n' % (c, encoding.lower(t)))
1099 continue
1099 continue
1100 elif r == 0: # yes
1100 elif r == 0: # yes
1101 ret = True
1101 ret = True
1102 elif r == 1: # no
1102 elif r == 1: # no
1103 ret = False
1103 ret = False
1104 elif r == 2: # Edit patch
1104 elif r == 2: # Edit patch
1105 if chunk is None:
1105 if chunk is None:
1106 ui.write(_('cannot edit patch for whole file'))
1106 ui.write(_('cannot edit patch for whole file'))
1107 ui.write("\n")
1107 ui.write("\n")
1108 continue
1108 continue
1109 if chunk.header.binary():
1109 if chunk.header.binary():
1110 ui.write(_('cannot edit patch for binary file'))
1110 ui.write(_('cannot edit patch for binary file'))
1111 ui.write("\n")
1111 ui.write("\n")
1112 continue
1112 continue
1113 # Patch comment based on the Git one (based on comment at end of
1113 # Patch comment based on the Git one (based on comment at end of
1114 # https://mercurial-scm.org/wiki/RecordExtension)
1114 # https://mercurial-scm.org/wiki/RecordExtension)
1115 phelp = '---' + _("""
1115 phelp = '---' + _("""
1116 To remove '-' lines, make them ' ' lines (context).
1116 To remove '-' lines, make them ' ' lines (context).
1117 To remove '+' lines, delete them.
1117 To remove '+' lines, delete them.
1118 Lines starting with # will be removed from the patch.
1118 Lines starting with # will be removed from the patch.
1119
1119
1120 If the patch applies cleanly, the edited hunk will immediately be
1120 If the patch applies cleanly, the edited hunk will immediately be
1121 added to the record list. If it does not apply cleanly, a rejects
1121 added to the record list. If it does not apply cleanly, a rejects
1122 file will be generated: you can use that when you try again. If
1122 file will be generated: you can use that when you try again. If
1123 all lines of the hunk are removed, then the edit is aborted and
1123 all lines of the hunk are removed, then the edit is aborted and
1124 the hunk is left unchanged.
1124 the hunk is left unchanged.
1125 """)
1125 """)
1126 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1126 (patchfd, patchfn) = pycompat.mkstemp(prefix="hg-editor-",
1127 suffix=".diff")
1127 suffix=".diff")
1128 ncpatchfp = None
1128 ncpatchfp = None
1129 try:
1129 try:
1130 # Write the initial patch
1130 # Write the initial patch
1131 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1131 f = util.nativeeolwriter(os.fdopen(patchfd, r'wb'))
1132 chunk.header.write(f)
1132 chunk.header.write(f)
1133 chunk.write(f)
1133 chunk.write(f)
1134 f.write(''.join(['# ' + i + '\n'
1134 f.write(''.join(['# ' + i + '\n'
1135 for i in phelp.splitlines()]))
1135 for i in phelp.splitlines()]))
1136 f.close()
1136 f.close()
1137 # Start the editor and wait for it to complete
1137 # Start the editor and wait for it to complete
1138 editor = ui.geteditor()
1138 editor = ui.geteditor()
1139 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1139 ret = ui.system("%s \"%s\"" % (editor, patchfn),
1140 environ={'HGUSER': ui.username()},
1140 environ={'HGUSER': ui.username()},
1141 blockedtag='filterpatch')
1141 blockedtag='filterpatch')
1142 if ret != 0:
1142 if ret != 0:
1143 ui.warn(_("editor exited with exit code %d\n") % ret)
1143 ui.warn(_("editor exited with exit code %d\n") % ret)
1144 continue
1144 continue
1145 # Remove comment lines
1145 # Remove comment lines
1146 patchfp = open(patchfn, r'rb')
1146 patchfp = open(patchfn, r'rb')
1147 ncpatchfp = stringio()
1147 ncpatchfp = stringio()
1148 for line in util.iterfile(patchfp):
1148 for line in util.iterfile(patchfp):
1149 line = util.fromnativeeol(line)
1149 line = util.fromnativeeol(line)
1150 if not line.startswith('#'):
1150 if not line.startswith('#'):
1151 ncpatchfp.write(line)
1151 ncpatchfp.write(line)
1152 patchfp.close()
1152 patchfp.close()
1153 ncpatchfp.seek(0)
1153 ncpatchfp.seek(0)
1154 newpatches = parsepatch(ncpatchfp)
1154 newpatches = parsepatch(ncpatchfp)
1155 finally:
1155 finally:
1156 os.unlink(patchfn)
1156 os.unlink(patchfn)
1157 del ncpatchfp
1157 del ncpatchfp
1158 # Signal that the chunk shouldn't be applied as-is, but
1158 # Signal that the chunk shouldn't be applied as-is, but
1159 # provide the new patch to be used instead.
1159 # provide the new patch to be used instead.
1160 ret = False
1160 ret = False
1161 elif r == 3: # Skip
1161 elif r == 3: # Skip
1162 ret = skipfile = False
1162 ret = skipfile = False
1163 elif r == 4: # file (Record remaining)
1163 elif r == 4: # file (Record remaining)
1164 ret = skipfile = True
1164 ret = skipfile = True
1165 elif r == 5: # done, skip remaining
1165 elif r == 5: # done, skip remaining
1166 ret = skipall = False
1166 ret = skipall = False
1167 elif r == 6: # all
1167 elif r == 6: # all
1168 ret = skipall = True
1168 ret = skipall = True
1169 elif r == 7: # quit
1169 elif r == 7: # quit
1170 raise error.Abort(_('user quit'))
1170 raise error.Abort(_('user quit'))
1171 return ret, skipfile, skipall, newpatches
1171 return ret, skipfile, skipall, newpatches
1172
1172
1173 seen = set()
1173 seen = set()
1174 applied = {} # 'filename' -> [] of chunks
1174 applied = {} # 'filename' -> [] of chunks
1175 skipfile, skipall = None, None
1175 skipfile, skipall = None, None
1176 pos, total = 1, sum(len(h.hunks) for h in headers)
1176 pos, total = 1, sum(len(h.hunks) for h in headers)
1177 for h in headers:
1177 for h in headers:
1178 pos += len(h.hunks)
1178 pos += len(h.hunks)
1179 skipfile = None
1179 skipfile = None
1180 fixoffset = 0
1180 fixoffset = 0
1181 hdr = ''.join(h.header)
1181 hdr = ''.join(h.header)
1182 if hdr in seen:
1182 if hdr in seen:
1183 continue
1183 continue
1184 seen.add(hdr)
1184 seen.add(hdr)
1185 if skipall is None:
1185 if skipall is None:
1186 h.pretty(ui)
1186 h.pretty(ui)
1187 files = h.files()
1187 files = h.files()
1188 msg = (_('examine changes to %s?') %
1188 msg = (_('examine changes to %s?') %
1189 _(' and ').join("'%s'" % f for f in files))
1189 _(' and ').join("'%s'" % f for f in files))
1190 if all(match.exact(f) for f in files):
1190 if all(match.exact(f) for f in files):
1191 r, skipall, np = True, None, None
1191 r, skipall, np = True, None, None
1192 else:
1192 else:
1193 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1193 r, skipfile, skipall, np = prompt(skipfile, skipall, msg, None)
1194 if not r:
1194 if not r:
1195 continue
1195 continue
1196 applied[h.filename()] = [h]
1196 applied[h.filename()] = [h]
1197 if h.allhunks():
1197 if h.allhunks():
1198 applied[h.filename()] += h.hunks
1198 applied[h.filename()] += h.hunks
1199 continue
1199 continue
1200 for i, chunk in enumerate(h.hunks):
1200 for i, chunk in enumerate(h.hunks):
1201 if skipfile is None and skipall is None:
1201 if skipfile is None and skipall is None:
1202 chunk.pretty(ui)
1202 chunk.pretty(ui)
1203 if total == 1:
1203 if total == 1:
1204 msg = messages['single'][operation] % chunk.filename()
1204 msg = messages['single'][operation] % chunk.filename()
1205 else:
1205 else:
1206 idx = pos - len(h.hunks) + i
1206 idx = pos - len(h.hunks) + i
1207 msg = messages['multiple'][operation] % (idx, total,
1207 msg = messages['multiple'][operation] % (idx, total,
1208 chunk.filename())
1208 chunk.filename())
1209 r, skipfile, skipall, newpatches = prompt(skipfile,
1209 r, skipfile, skipall, newpatches = prompt(skipfile,
1210 skipall, msg, chunk)
1210 skipall, msg, chunk)
1211 if r:
1211 if r:
1212 if fixoffset:
1212 if fixoffset:
1213 chunk = copy.copy(chunk)
1213 chunk = copy.copy(chunk)
1214 chunk.toline += fixoffset
1214 chunk.toline += fixoffset
1215 applied[chunk.filename()].append(chunk)
1215 applied[chunk.filename()].append(chunk)
1216 elif newpatches is not None:
1216 elif newpatches is not None:
1217 for newpatch in newpatches:
1217 for newpatch in newpatches:
1218 for newhunk in newpatch.hunks:
1218 for newhunk in newpatch.hunks:
1219 if fixoffset:
1219 if fixoffset:
1220 newhunk.toline += fixoffset
1220 newhunk.toline += fixoffset
1221 applied[newhunk.filename()].append(newhunk)
1221 applied[newhunk.filename()].append(newhunk)
1222 else:
1222 else:
1223 fixoffset += chunk.removed - chunk.added
1223 fixoffset += chunk.removed - chunk.added
1224 return (sum([h for h in applied.itervalues()
1224 return (sum([h for h in applied.itervalues()
1225 if h[0].special() or len(h) > 1], []), {})
1225 if h[0].special() or len(h) > 1], []), {})
1226 class hunk(object):
1226 class hunk(object):
1227 def __init__(self, desc, num, lr, context):
1227 def __init__(self, desc, num, lr, context):
1228 self.number = num
1228 self.number = num
1229 self.desc = desc
1229 self.desc = desc
1230 self.hunk = [desc]
1230 self.hunk = [desc]
1231 self.a = []
1231 self.a = []
1232 self.b = []
1232 self.b = []
1233 self.starta = self.lena = None
1233 self.starta = self.lena = None
1234 self.startb = self.lenb = None
1234 self.startb = self.lenb = None
1235 if lr is not None:
1235 if lr is not None:
1236 if context:
1236 if context:
1237 self.read_context_hunk(lr)
1237 self.read_context_hunk(lr)
1238 else:
1238 else:
1239 self.read_unified_hunk(lr)
1239 self.read_unified_hunk(lr)
1240
1240
1241 def getnormalized(self):
1241 def getnormalized(self):
1242 """Return a copy with line endings normalized to LF."""
1242 """Return a copy with line endings normalized to LF."""
1243
1243
1244 def normalize(lines):
1244 def normalize(lines):
1245 nlines = []
1245 nlines = []
1246 for line in lines:
1246 for line in lines:
1247 if line.endswith('\r\n'):
1247 if line.endswith('\r\n'):
1248 line = line[:-2] + '\n'
1248 line = line[:-2] + '\n'
1249 nlines.append(line)
1249 nlines.append(line)
1250 return nlines
1250 return nlines
1251
1251
1252 # Dummy object, it is rebuilt manually
1252 # Dummy object, it is rebuilt manually
1253 nh = hunk(self.desc, self.number, None, None)
1253 nh = hunk(self.desc, self.number, None, None)
1254 nh.number = self.number
1254 nh.number = self.number
1255 nh.desc = self.desc
1255 nh.desc = self.desc
1256 nh.hunk = self.hunk
1256 nh.hunk = self.hunk
1257 nh.a = normalize(self.a)
1257 nh.a = normalize(self.a)
1258 nh.b = normalize(self.b)
1258 nh.b = normalize(self.b)
1259 nh.starta = self.starta
1259 nh.starta = self.starta
1260 nh.startb = self.startb
1260 nh.startb = self.startb
1261 nh.lena = self.lena
1261 nh.lena = self.lena
1262 nh.lenb = self.lenb
1262 nh.lenb = self.lenb
1263 return nh
1263 return nh
1264
1264
1265 def read_unified_hunk(self, lr):
1265 def read_unified_hunk(self, lr):
1266 m = unidesc.match(self.desc)
1266 m = unidesc.match(self.desc)
1267 if not m:
1267 if not m:
1268 raise PatchError(_("bad hunk #%d") % self.number)
1268 raise PatchError(_("bad hunk #%d") % self.number)
1269 self.starta, self.lena, self.startb, self.lenb = m.groups()
1269 self.starta, self.lena, self.startb, self.lenb = m.groups()
1270 if self.lena is None:
1270 if self.lena is None:
1271 self.lena = 1
1271 self.lena = 1
1272 else:
1272 else:
1273 self.lena = int(self.lena)
1273 self.lena = int(self.lena)
1274 if self.lenb is None:
1274 if self.lenb is None:
1275 self.lenb = 1
1275 self.lenb = 1
1276 else:
1276 else:
1277 self.lenb = int(self.lenb)
1277 self.lenb = int(self.lenb)
1278 self.starta = int(self.starta)
1278 self.starta = int(self.starta)
1279 self.startb = int(self.startb)
1279 self.startb = int(self.startb)
1280 try:
1280 try:
1281 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1281 diffhelper.addlines(lr, self.hunk, self.lena, self.lenb,
1282 self.a, self.b)
1282 self.a, self.b)
1283 except error.ParseError as e:
1283 except error.ParseError as e:
1284 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1284 raise PatchError(_("bad hunk #%d: %s") % (self.number, e))
1285 # if we hit eof before finishing out the hunk, the last line will
1285 # if we hit eof before finishing out the hunk, the last line will
1286 # be zero length. Lets try to fix it up.
1286 # be zero length. Lets try to fix it up.
1287 while len(self.hunk[-1]) == 0:
1287 while len(self.hunk[-1]) == 0:
1288 del self.hunk[-1]
1288 del self.hunk[-1]
1289 del self.a[-1]
1289 del self.a[-1]
1290 del self.b[-1]
1290 del self.b[-1]
1291 self.lena -= 1
1291 self.lena -= 1
1292 self.lenb -= 1
1292 self.lenb -= 1
1293 self._fixnewline(lr)
1293 self._fixnewline(lr)
1294
1294
1295 def read_context_hunk(self, lr):
1295 def read_context_hunk(self, lr):
1296 self.desc = lr.readline()
1296 self.desc = lr.readline()
1297 m = contextdesc.match(self.desc)
1297 m = contextdesc.match(self.desc)
1298 if not m:
1298 if not m:
1299 raise PatchError(_("bad hunk #%d") % self.number)
1299 raise PatchError(_("bad hunk #%d") % self.number)
1300 self.starta, aend = m.groups()
1300 self.starta, aend = m.groups()
1301 self.starta = int(self.starta)
1301 self.starta = int(self.starta)
1302 if aend is None:
1302 if aend is None:
1303 aend = self.starta
1303 aend = self.starta
1304 self.lena = int(aend) - self.starta
1304 self.lena = int(aend) - self.starta
1305 if self.starta:
1305 if self.starta:
1306 self.lena += 1
1306 self.lena += 1
1307 for x in pycompat.xrange(self.lena):
1307 for x in pycompat.xrange(self.lena):
1308 l = lr.readline()
1308 l = lr.readline()
1309 if l.startswith('---'):
1309 if l.startswith('---'):
1310 # lines addition, old block is empty
1310 # lines addition, old block is empty
1311 lr.push(l)
1311 lr.push(l)
1312 break
1312 break
1313 s = l[2:]
1313 s = l[2:]
1314 if l.startswith('- ') or l.startswith('! '):
1314 if l.startswith('- ') or l.startswith('! '):
1315 u = '-' + s
1315 u = '-' + s
1316 elif l.startswith(' '):
1316 elif l.startswith(' '):
1317 u = ' ' + s
1317 u = ' ' + s
1318 else:
1318 else:
1319 raise PatchError(_("bad hunk #%d old text line %d") %
1319 raise PatchError(_("bad hunk #%d old text line %d") %
1320 (self.number, x))
1320 (self.number, x))
1321 self.a.append(u)
1321 self.a.append(u)
1322 self.hunk.append(u)
1322 self.hunk.append(u)
1323
1323
1324 l = lr.readline()
1324 l = lr.readline()
1325 if l.startswith(br'\ '):
1325 if l.startswith(br'\ '):
1326 s = self.a[-1][:-1]
1326 s = self.a[-1][:-1]
1327 self.a[-1] = s
1327 self.a[-1] = s
1328 self.hunk[-1] = s
1328 self.hunk[-1] = s
1329 l = lr.readline()
1329 l = lr.readline()
1330 m = contextdesc.match(l)
1330 m = contextdesc.match(l)
1331 if not m:
1331 if not m:
1332 raise PatchError(_("bad hunk #%d") % self.number)
1332 raise PatchError(_("bad hunk #%d") % self.number)
1333 self.startb, bend = m.groups()
1333 self.startb, bend = m.groups()
1334 self.startb = int(self.startb)
1334 self.startb = int(self.startb)
1335 if bend is None:
1335 if bend is None:
1336 bend = self.startb
1336 bend = self.startb
1337 self.lenb = int(bend) - self.startb
1337 self.lenb = int(bend) - self.startb
1338 if self.startb:
1338 if self.startb:
1339 self.lenb += 1
1339 self.lenb += 1
1340 hunki = 1
1340 hunki = 1
1341 for x in pycompat.xrange(self.lenb):
1341 for x in pycompat.xrange(self.lenb):
1342 l = lr.readline()
1342 l = lr.readline()
1343 if l.startswith(br'\ '):
1343 if l.startswith(br'\ '):
1344 # XXX: the only way to hit this is with an invalid line range.
1344 # XXX: the only way to hit this is with an invalid line range.
1345 # The no-eol marker is not counted in the line range, but I
1345 # The no-eol marker is not counted in the line range, but I
1346 # guess there are diff(1) out there which behave differently.
1346 # guess there are diff(1) out there which behave differently.
1347 s = self.b[-1][:-1]
1347 s = self.b[-1][:-1]
1348 self.b[-1] = s
1348 self.b[-1] = s
1349 self.hunk[hunki - 1] = s
1349 self.hunk[hunki - 1] = s
1350 continue
1350 continue
1351 if not l:
1351 if not l:
1352 # line deletions, new block is empty and we hit EOF
1352 # line deletions, new block is empty and we hit EOF
1353 lr.push(l)
1353 lr.push(l)
1354 break
1354 break
1355 s = l[2:]
1355 s = l[2:]
1356 if l.startswith('+ ') or l.startswith('! '):
1356 if l.startswith('+ ') or l.startswith('! '):
1357 u = '+' + s
1357 u = '+' + s
1358 elif l.startswith(' '):
1358 elif l.startswith(' '):
1359 u = ' ' + s
1359 u = ' ' + s
1360 elif len(self.b) == 0:
1360 elif len(self.b) == 0:
1361 # line deletions, new block is empty
1361 # line deletions, new block is empty
1362 lr.push(l)
1362 lr.push(l)
1363 break
1363 break
1364 else:
1364 else:
1365 raise PatchError(_("bad hunk #%d old text line %d") %
1365 raise PatchError(_("bad hunk #%d old text line %d") %
1366 (self.number, x))
1366 (self.number, x))
1367 self.b.append(s)
1367 self.b.append(s)
1368 while True:
1368 while True:
1369 if hunki >= len(self.hunk):
1369 if hunki >= len(self.hunk):
1370 h = ""
1370 h = ""
1371 else:
1371 else:
1372 h = self.hunk[hunki]
1372 h = self.hunk[hunki]
1373 hunki += 1
1373 hunki += 1
1374 if h == u:
1374 if h == u:
1375 break
1375 break
1376 elif h.startswith('-'):
1376 elif h.startswith('-'):
1377 continue
1377 continue
1378 else:
1378 else:
1379 self.hunk.insert(hunki - 1, u)
1379 self.hunk.insert(hunki - 1, u)
1380 break
1380 break
1381
1381
1382 if not self.a:
1382 if not self.a:
1383 # this happens when lines were only added to the hunk
1383 # this happens when lines were only added to the hunk
1384 for x in self.hunk:
1384 for x in self.hunk:
1385 if x.startswith('-') or x.startswith(' '):
1385 if x.startswith('-') or x.startswith(' '):
1386 self.a.append(x)
1386 self.a.append(x)
1387 if not self.b:
1387 if not self.b:
1388 # this happens when lines were only deleted from the hunk
1388 # this happens when lines were only deleted from the hunk
1389 for x in self.hunk:
1389 for x in self.hunk:
1390 if x.startswith('+') or x.startswith(' '):
1390 if x.startswith('+') or x.startswith(' '):
1391 self.b.append(x[1:])
1391 self.b.append(x[1:])
1392 # @@ -start,len +start,len @@
1392 # @@ -start,len +start,len @@
1393 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1393 self.desc = "@@ -%d,%d +%d,%d @@\n" % (self.starta, self.lena,
1394 self.startb, self.lenb)
1394 self.startb, self.lenb)
1395 self.hunk[0] = self.desc
1395 self.hunk[0] = self.desc
1396 self._fixnewline(lr)
1396 self._fixnewline(lr)
1397
1397
1398 def _fixnewline(self, lr):
1398 def _fixnewline(self, lr):
1399 l = lr.readline()
1399 l = lr.readline()
1400 if l.startswith(br'\ '):
1400 if l.startswith(br'\ '):
1401 diffhelper.fixnewline(self.hunk, self.a, self.b)
1401 diffhelper.fixnewline(self.hunk, self.a, self.b)
1402 else:
1402 else:
1403 lr.push(l)
1403 lr.push(l)
1404
1404
1405 def complete(self):
1405 def complete(self):
1406 return len(self.a) == self.lena and len(self.b) == self.lenb
1406 return len(self.a) == self.lena and len(self.b) == self.lenb
1407
1407
1408 def _fuzzit(self, old, new, fuzz, toponly):
1408 def _fuzzit(self, old, new, fuzz, toponly):
1409 # this removes context lines from the top and bottom of list 'l'. It
1409 # this removes context lines from the top and bottom of list 'l'. It
1410 # checks the hunk to make sure only context lines are removed, and then
1410 # checks the hunk to make sure only context lines are removed, and then
1411 # returns a new shortened list of lines.
1411 # returns a new shortened list of lines.
1412 fuzz = min(fuzz, len(old))
1412 fuzz = min(fuzz, len(old))
1413 if fuzz:
1413 if fuzz:
1414 top = 0
1414 top = 0
1415 bot = 0
1415 bot = 0
1416 hlen = len(self.hunk)
1416 hlen = len(self.hunk)
1417 for x in pycompat.xrange(hlen - 1):
1417 for x in pycompat.xrange(hlen - 1):
1418 # the hunk starts with the @@ line, so use x+1
1418 # the hunk starts with the @@ line, so use x+1
1419 if self.hunk[x + 1].startswith(' '):
1419 if self.hunk[x + 1].startswith(' '):
1420 top += 1
1420 top += 1
1421 else:
1421 else:
1422 break
1422 break
1423 if not toponly:
1423 if not toponly:
1424 for x in pycompat.xrange(hlen - 1):
1424 for x in pycompat.xrange(hlen - 1):
1425 if self.hunk[hlen - bot - 1].startswith(' '):
1425 if self.hunk[hlen - bot - 1].startswith(' '):
1426 bot += 1
1426 bot += 1
1427 else:
1427 else:
1428 break
1428 break
1429
1429
1430 bot = min(fuzz, bot)
1430 bot = min(fuzz, bot)
1431 top = min(fuzz, top)
1431 top = min(fuzz, top)
1432 return old[top:len(old) - bot], new[top:len(new) - bot], top
1432 return old[top:len(old) - bot], new[top:len(new) - bot], top
1433 return old, new, 0
1433 return old, new, 0
1434
1434
1435 def fuzzit(self, fuzz, toponly):
1435 def fuzzit(self, fuzz, toponly):
1436 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1436 old, new, top = self._fuzzit(self.a, self.b, fuzz, toponly)
1437 oldstart = self.starta + top
1437 oldstart = self.starta + top
1438 newstart = self.startb + top
1438 newstart = self.startb + top
1439 # zero length hunk ranges already have their start decremented
1439 # zero length hunk ranges already have their start decremented
1440 if self.lena and oldstart > 0:
1440 if self.lena and oldstart > 0:
1441 oldstart -= 1
1441 oldstart -= 1
1442 if self.lenb and newstart > 0:
1442 if self.lenb and newstart > 0:
1443 newstart -= 1
1443 newstart -= 1
1444 return old, oldstart, new, newstart
1444 return old, oldstart, new, newstart
1445
1445
1446 class binhunk(object):
1446 class binhunk(object):
1447 'A binary patch file.'
1447 'A binary patch file.'
1448 def __init__(self, lr, fname):
1448 def __init__(self, lr, fname):
1449 self.text = None
1449 self.text = None
1450 self.delta = False
1450 self.delta = False
1451 self.hunk = ['GIT binary patch\n']
1451 self.hunk = ['GIT binary patch\n']
1452 self._fname = fname
1452 self._fname = fname
1453 self._read(lr)
1453 self._read(lr)
1454
1454
1455 def complete(self):
1455 def complete(self):
1456 return self.text is not None
1456 return self.text is not None
1457
1457
1458 def new(self, lines):
1458 def new(self, lines):
1459 if self.delta:
1459 if self.delta:
1460 return [applybindelta(self.text, ''.join(lines))]
1460 return [applybindelta(self.text, ''.join(lines))]
1461 return [self.text]
1461 return [self.text]
1462
1462
1463 def _read(self, lr):
1463 def _read(self, lr):
1464 def getline(lr, hunk):
1464 def getline(lr, hunk):
1465 l = lr.readline()
1465 l = lr.readline()
1466 hunk.append(l)
1466 hunk.append(l)
1467 return l.rstrip('\r\n')
1467 return l.rstrip('\r\n')
1468
1468
1469 while True:
1469 while True:
1470 line = getline(lr, self.hunk)
1470 line = getline(lr, self.hunk)
1471 if not line:
1471 if not line:
1472 raise PatchError(_('could not extract "%s" binary data')
1472 raise PatchError(_('could not extract "%s" binary data')
1473 % self._fname)
1473 % self._fname)
1474 if line.startswith('literal '):
1474 if line.startswith('literal '):
1475 size = int(line[8:].rstrip())
1475 size = int(line[8:].rstrip())
1476 break
1476 break
1477 if line.startswith('delta '):
1477 if line.startswith('delta '):
1478 size = int(line[6:].rstrip())
1478 size = int(line[6:].rstrip())
1479 self.delta = True
1479 self.delta = True
1480 break
1480 break
1481 dec = []
1481 dec = []
1482 line = getline(lr, self.hunk)
1482 line = getline(lr, self.hunk)
1483 while len(line) > 1:
1483 while len(line) > 1:
1484 l = line[0:1]
1484 l = line[0:1]
1485 if l <= 'Z' and l >= 'A':
1485 if l <= 'Z' and l >= 'A':
1486 l = ord(l) - ord('A') + 1
1486 l = ord(l) - ord('A') + 1
1487 else:
1487 else:
1488 l = ord(l) - ord('a') + 27
1488 l = ord(l) - ord('a') + 27
1489 try:
1489 try:
1490 dec.append(util.b85decode(line[1:])[:l])
1490 dec.append(util.b85decode(line[1:])[:l])
1491 except ValueError as e:
1491 except ValueError as e:
1492 raise PatchError(_('could not decode "%s" binary patch: %s')
1492 raise PatchError(_('could not decode "%s" binary patch: %s')
1493 % (self._fname, stringutil.forcebytestr(e)))
1493 % (self._fname, stringutil.forcebytestr(e)))
1494 line = getline(lr, self.hunk)
1494 line = getline(lr, self.hunk)
1495 text = zlib.decompress(''.join(dec))
1495 text = zlib.decompress(''.join(dec))
1496 if len(text) != size:
1496 if len(text) != size:
1497 raise PatchError(_('"%s" length is %d bytes, should be %d')
1497 raise PatchError(_('"%s" length is %d bytes, should be %d')
1498 % (self._fname, len(text), size))
1498 % (self._fname, len(text), size))
1499 self.text = text
1499 self.text = text
1500
1500
1501 def parsefilename(str):
1501 def parsefilename(str):
1502 # --- filename \t|space stuff
1502 # --- filename \t|space stuff
1503 s = str[4:].rstrip('\r\n')
1503 s = str[4:].rstrip('\r\n')
1504 i = s.find('\t')
1504 i = s.find('\t')
1505 if i < 0:
1505 if i < 0:
1506 i = s.find(' ')
1506 i = s.find(' ')
1507 if i < 0:
1507 if i < 0:
1508 return s
1508 return s
1509 return s[:i]
1509 return s[:i]
1510
1510
1511 def reversehunks(hunks):
1511 def reversehunks(hunks):
1512 '''reverse the signs in the hunks given as argument
1512 '''reverse the signs in the hunks given as argument
1513
1513
1514 This function operates on hunks coming out of patch.filterpatch, that is
1514 This function operates on hunks coming out of patch.filterpatch, that is
1515 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1515 a list of the form: [header1, hunk1, hunk2, header2...]. Example usage:
1516
1516
1517 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1517 >>> rawpatch = b"""diff --git a/folder1/g b/folder1/g
1518 ... --- a/folder1/g
1518 ... --- a/folder1/g
1519 ... +++ b/folder1/g
1519 ... +++ b/folder1/g
1520 ... @@ -1,7 +1,7 @@
1520 ... @@ -1,7 +1,7 @@
1521 ... +firstline
1521 ... +firstline
1522 ... c
1522 ... c
1523 ... 1
1523 ... 1
1524 ... 2
1524 ... 2
1525 ... + 3
1525 ... + 3
1526 ... -4
1526 ... -4
1527 ... 5
1527 ... 5
1528 ... d
1528 ... d
1529 ... +lastline"""
1529 ... +lastline"""
1530 >>> hunks = parsepatch([rawpatch])
1530 >>> hunks = parsepatch([rawpatch])
1531 >>> hunkscomingfromfilterpatch = []
1531 >>> hunkscomingfromfilterpatch = []
1532 >>> for h in hunks:
1532 >>> for h in hunks:
1533 ... hunkscomingfromfilterpatch.append(h)
1533 ... hunkscomingfromfilterpatch.append(h)
1534 ... hunkscomingfromfilterpatch.extend(h.hunks)
1534 ... hunkscomingfromfilterpatch.extend(h.hunks)
1535
1535
1536 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1536 >>> reversedhunks = reversehunks(hunkscomingfromfilterpatch)
1537 >>> from . import util
1537 >>> from . import util
1538 >>> fp = util.stringio()
1538 >>> fp = util.stringio()
1539 >>> for c in reversedhunks:
1539 >>> for c in reversedhunks:
1540 ... c.write(fp)
1540 ... c.write(fp)
1541 >>> fp.seek(0) or None
1541 >>> fp.seek(0) or None
1542 >>> reversedpatch = fp.read()
1542 >>> reversedpatch = fp.read()
1543 >>> print(pycompat.sysstr(reversedpatch))
1543 >>> print(pycompat.sysstr(reversedpatch))
1544 diff --git a/folder1/g b/folder1/g
1544 diff --git a/folder1/g b/folder1/g
1545 --- a/folder1/g
1545 --- a/folder1/g
1546 +++ b/folder1/g
1546 +++ b/folder1/g
1547 @@ -1,4 +1,3 @@
1547 @@ -1,4 +1,3 @@
1548 -firstline
1548 -firstline
1549 c
1549 c
1550 1
1550 1
1551 2
1551 2
1552 @@ -2,6 +1,6 @@
1552 @@ -2,6 +1,6 @@
1553 c
1553 c
1554 1
1554 1
1555 2
1555 2
1556 - 3
1556 - 3
1557 +4
1557 +4
1558 5
1558 5
1559 d
1559 d
1560 @@ -6,3 +5,2 @@
1560 @@ -6,3 +5,2 @@
1561 5
1561 5
1562 d
1562 d
1563 -lastline
1563 -lastline
1564
1564
1565 '''
1565 '''
1566
1566
1567 newhunks = []
1567 newhunks = []
1568 for c in hunks:
1568 for c in hunks:
1569 if util.safehasattr(c, 'reversehunk'):
1569 if util.safehasattr(c, 'reversehunk'):
1570 c = c.reversehunk()
1570 c = c.reversehunk()
1571 newhunks.append(c)
1571 newhunks.append(c)
1572 return newhunks
1572 return newhunks
1573
1573
1574 def parsepatch(originalchunks, maxcontext=None):
1574 def parsepatch(originalchunks, maxcontext=None):
1575 """patch -> [] of headers -> [] of hunks
1575 """patch -> [] of headers -> [] of hunks
1576
1576
1577 If maxcontext is not None, trim context lines if necessary.
1577 If maxcontext is not None, trim context lines if necessary.
1578
1578
1579 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1579 >>> rawpatch = b'''diff --git a/folder1/g b/folder1/g
1580 ... --- a/folder1/g
1580 ... --- a/folder1/g
1581 ... +++ b/folder1/g
1581 ... +++ b/folder1/g
1582 ... @@ -1,8 +1,10 @@
1582 ... @@ -1,8 +1,10 @@
1583 ... 1
1583 ... 1
1584 ... 2
1584 ... 2
1585 ... -3
1585 ... -3
1586 ... 4
1586 ... 4
1587 ... 5
1587 ... 5
1588 ... 6
1588 ... 6
1589 ... +6.1
1589 ... +6.1
1590 ... +6.2
1590 ... +6.2
1591 ... 7
1591 ... 7
1592 ... 8
1592 ... 8
1593 ... +9'''
1593 ... +9'''
1594 >>> out = util.stringio()
1594 >>> out = util.stringio()
1595 >>> headers = parsepatch([rawpatch], maxcontext=1)
1595 >>> headers = parsepatch([rawpatch], maxcontext=1)
1596 >>> for header in headers:
1596 >>> for header in headers:
1597 ... header.write(out)
1597 ... header.write(out)
1598 ... for hunk in header.hunks:
1598 ... for hunk in header.hunks:
1599 ... hunk.write(out)
1599 ... hunk.write(out)
1600 >>> print(pycompat.sysstr(out.getvalue()))
1600 >>> print(pycompat.sysstr(out.getvalue()))
1601 diff --git a/folder1/g b/folder1/g
1601 diff --git a/folder1/g b/folder1/g
1602 --- a/folder1/g
1602 --- a/folder1/g
1603 +++ b/folder1/g
1603 +++ b/folder1/g
1604 @@ -2,3 +2,2 @@
1604 @@ -2,3 +2,2 @@
1605 2
1605 2
1606 -3
1606 -3
1607 4
1607 4
1608 @@ -6,2 +5,4 @@
1608 @@ -6,2 +5,4 @@
1609 6
1609 6
1610 +6.1
1610 +6.1
1611 +6.2
1611 +6.2
1612 7
1612 7
1613 @@ -8,1 +9,2 @@
1613 @@ -8,1 +9,2 @@
1614 8
1614 8
1615 +9
1615 +9
1616 """
1616 """
1617 class parser(object):
1617 class parser(object):
1618 """patch parsing state machine"""
1618 """patch parsing state machine"""
1619 def __init__(self):
1619 def __init__(self):
1620 self.fromline = 0
1620 self.fromline = 0
1621 self.toline = 0
1621 self.toline = 0
1622 self.proc = ''
1622 self.proc = ''
1623 self.header = None
1623 self.header = None
1624 self.context = []
1624 self.context = []
1625 self.before = []
1625 self.before = []
1626 self.hunk = []
1626 self.hunk = []
1627 self.headers = []
1627 self.headers = []
1628
1628
1629 def addrange(self, limits):
1629 def addrange(self, limits):
1630 self.addcontext([])
1630 self.addcontext([])
1631 fromstart, fromend, tostart, toend, proc = limits
1631 fromstart, fromend, tostart, toend, proc = limits
1632 self.fromline = int(fromstart)
1632 self.fromline = int(fromstart)
1633 self.toline = int(tostart)
1633 self.toline = int(tostart)
1634 self.proc = proc
1634 self.proc = proc
1635
1635
1636 def addcontext(self, context):
1636 def addcontext(self, context):
1637 if self.hunk:
1637 if self.hunk:
1638 h = recordhunk(self.header, self.fromline, self.toline,
1638 h = recordhunk(self.header, self.fromline, self.toline,
1639 self.proc, self.before, self.hunk, context, maxcontext)
1639 self.proc, self.before, self.hunk, context, maxcontext)
1640 self.header.hunks.append(h)
1640 self.header.hunks.append(h)
1641 self.fromline += len(self.before) + h.removed
1641 self.fromline += len(self.before) + h.removed
1642 self.toline += len(self.before) + h.added
1642 self.toline += len(self.before) + h.added
1643 self.before = []
1643 self.before = []
1644 self.hunk = []
1644 self.hunk = []
1645 self.context = context
1645 self.context = context
1646
1646
1647 def addhunk(self, hunk):
1647 def addhunk(self, hunk):
1648 if self.context:
1648 if self.context:
1649 self.before = self.context
1649 self.before = self.context
1650 self.context = []
1650 self.context = []
1651 if self.hunk:
1651 if self.hunk:
1652 self.addcontext([])
1652 self.addcontext([])
1653 self.hunk = hunk
1653 self.hunk = hunk
1654
1654
1655 def newfile(self, hdr):
1655 def newfile(self, hdr):
1656 self.addcontext([])
1656 self.addcontext([])
1657 h = header(hdr)
1657 h = header(hdr)
1658 self.headers.append(h)
1658 self.headers.append(h)
1659 self.header = h
1659 self.header = h
1660
1660
1661 def addother(self, line):
1661 def addother(self, line):
1662 pass # 'other' lines are ignored
1662 pass # 'other' lines are ignored
1663
1663
1664 def finished(self):
1664 def finished(self):
1665 self.addcontext([])
1665 self.addcontext([])
1666 return self.headers
1666 return self.headers
1667
1667
1668 transitions = {
1668 transitions = {
1669 'file': {'context': addcontext,
1669 'file': {'context': addcontext,
1670 'file': newfile,
1670 'file': newfile,
1671 'hunk': addhunk,
1671 'hunk': addhunk,
1672 'range': addrange},
1672 'range': addrange},
1673 'context': {'file': newfile,
1673 'context': {'file': newfile,
1674 'hunk': addhunk,
1674 'hunk': addhunk,
1675 'range': addrange,
1675 'range': addrange,
1676 'other': addother},
1676 'other': addother},
1677 'hunk': {'context': addcontext,
1677 'hunk': {'context': addcontext,
1678 'file': newfile,
1678 'file': newfile,
1679 'range': addrange},
1679 'range': addrange},
1680 'range': {'context': addcontext,
1680 'range': {'context': addcontext,
1681 'hunk': addhunk},
1681 'hunk': addhunk},
1682 'other': {'other': addother},
1682 'other': {'other': addother},
1683 }
1683 }
1684
1684
1685 p = parser()
1685 p = parser()
1686 fp = stringio()
1686 fp = stringio()
1687 fp.write(''.join(originalchunks))
1687 fp.write(''.join(originalchunks))
1688 fp.seek(0)
1688 fp.seek(0)
1689
1689
1690 state = 'context'
1690 state = 'context'
1691 for newstate, data in scanpatch(fp):
1691 for newstate, data in scanpatch(fp):
1692 try:
1692 try:
1693 p.transitions[state][newstate](p, data)
1693 p.transitions[state][newstate](p, data)
1694 except KeyError:
1694 except KeyError:
1695 raise PatchError('unhandled transition: %s -> %s' %
1695 raise PatchError('unhandled transition: %s -> %s' %
1696 (state, newstate))
1696 (state, newstate))
1697 state = newstate
1697 state = newstate
1698 del fp
1698 del fp
1699 return p.finished()
1699 return p.finished()
1700
1700
1701 def pathtransform(path, strip, prefix):
1701 def pathtransform(path, strip, prefix):
1702 '''turn a path from a patch into a path suitable for the repository
1702 '''turn a path from a patch into a path suitable for the repository
1703
1703
1704 prefix, if not empty, is expected to be normalized with a / at the end.
1704 prefix, if not empty, is expected to be normalized with a / at the end.
1705
1705
1706 Returns (stripped components, path in repository).
1706 Returns (stripped components, path in repository).
1707
1707
1708 >>> pathtransform(b'a/b/c', 0, b'')
1708 >>> pathtransform(b'a/b/c', 0, b'')
1709 ('', 'a/b/c')
1709 ('', 'a/b/c')
1710 >>> pathtransform(b' a/b/c ', 0, b'')
1710 >>> pathtransform(b' a/b/c ', 0, b'')
1711 ('', ' a/b/c')
1711 ('', ' a/b/c')
1712 >>> pathtransform(b' a/b/c ', 2, b'')
1712 >>> pathtransform(b' a/b/c ', 2, b'')
1713 ('a/b/', 'c')
1713 ('a/b/', 'c')
1714 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1714 >>> pathtransform(b'a/b/c', 0, b'd/e/')
1715 ('', 'd/e/a/b/c')
1715 ('', 'd/e/a/b/c')
1716 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1716 >>> pathtransform(b' a//b/c ', 2, b'd/e/')
1717 ('a//b/', 'd/e/c')
1717 ('a//b/', 'd/e/c')
1718 >>> pathtransform(b'a/b/c', 3, b'')
1718 >>> pathtransform(b'a/b/c', 3, b'')
1719 Traceback (most recent call last):
1719 Traceback (most recent call last):
1720 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1720 PatchError: unable to strip away 1 of 3 dirs from a/b/c
1721 '''
1721 '''
1722 pathlen = len(path)
1722 pathlen = len(path)
1723 i = 0
1723 i = 0
1724 if strip == 0:
1724 if strip == 0:
1725 return '', prefix + path.rstrip()
1725 return '', prefix + path.rstrip()
1726 count = strip
1726 count = strip
1727 while count > 0:
1727 while count > 0:
1728 i = path.find('/', i)
1728 i = path.find('/', i)
1729 if i == -1:
1729 if i == -1:
1730 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1730 raise PatchError(_("unable to strip away %d of %d dirs from %s") %
1731 (count, strip, path))
1731 (count, strip, path))
1732 i += 1
1732 i += 1
1733 # consume '//' in the path
1733 # consume '//' in the path
1734 while i < pathlen - 1 and path[i:i + 1] == '/':
1734 while i < pathlen - 1 and path[i:i + 1] == '/':
1735 i += 1
1735 i += 1
1736 count -= 1
1736 count -= 1
1737 return path[:i].lstrip(), prefix + path[i:].rstrip()
1737 return path[:i].lstrip(), prefix + path[i:].rstrip()
1738
1738
1739 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1739 def makepatchmeta(backend, afile_orig, bfile_orig, hunk, strip, prefix):
1740 nulla = afile_orig == "/dev/null"
1740 nulla = afile_orig == "/dev/null"
1741 nullb = bfile_orig == "/dev/null"
1741 nullb = bfile_orig == "/dev/null"
1742 create = nulla and hunk.starta == 0 and hunk.lena == 0
1742 create = nulla and hunk.starta == 0 and hunk.lena == 0
1743 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1743 remove = nullb and hunk.startb == 0 and hunk.lenb == 0
1744 abase, afile = pathtransform(afile_orig, strip, prefix)
1744 abase, afile = pathtransform(afile_orig, strip, prefix)
1745 gooda = not nulla and backend.exists(afile)
1745 gooda = not nulla and backend.exists(afile)
1746 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1746 bbase, bfile = pathtransform(bfile_orig, strip, prefix)
1747 if afile == bfile:
1747 if afile == bfile:
1748 goodb = gooda
1748 goodb = gooda
1749 else:
1749 else:
1750 goodb = not nullb and backend.exists(bfile)
1750 goodb = not nullb and backend.exists(bfile)
1751 missing = not goodb and not gooda and not create
1751 missing = not goodb and not gooda and not create
1752
1752
1753 # some diff programs apparently produce patches where the afile is
1753 # some diff programs apparently produce patches where the afile is
1754 # not /dev/null, but afile starts with bfile
1754 # not /dev/null, but afile starts with bfile
1755 abasedir = afile[:afile.rfind('/') + 1]
1755 abasedir = afile[:afile.rfind('/') + 1]
1756 bbasedir = bfile[:bfile.rfind('/') + 1]
1756 bbasedir = bfile[:bfile.rfind('/') + 1]
1757 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1757 if (missing and abasedir == bbasedir and afile.startswith(bfile)
1758 and hunk.starta == 0 and hunk.lena == 0):
1758 and hunk.starta == 0 and hunk.lena == 0):
1759 create = True
1759 create = True
1760 missing = False
1760 missing = False
1761
1761
1762 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1762 # If afile is "a/b/foo" and bfile is "a/b/foo.orig" we assume the
1763 # diff is between a file and its backup. In this case, the original
1763 # diff is between a file and its backup. In this case, the original
1764 # file should be patched (see original mpatch code).
1764 # file should be patched (see original mpatch code).
1765 isbackup = (abase == bbase and bfile.startswith(afile))
1765 isbackup = (abase == bbase and bfile.startswith(afile))
1766 fname = None
1766 fname = None
1767 if not missing:
1767 if not missing:
1768 if gooda and goodb:
1768 if gooda and goodb:
1769 if isbackup:
1769 if isbackup:
1770 fname = afile
1770 fname = afile
1771 else:
1771 else:
1772 fname = bfile
1772 fname = bfile
1773 elif gooda:
1773 elif gooda:
1774 fname = afile
1774 fname = afile
1775
1775
1776 if not fname:
1776 if not fname:
1777 if not nullb:
1777 if not nullb:
1778 if isbackup:
1778 if isbackup:
1779 fname = afile
1779 fname = afile
1780 else:
1780 else:
1781 fname = bfile
1781 fname = bfile
1782 elif not nulla:
1782 elif not nulla:
1783 fname = afile
1783 fname = afile
1784 else:
1784 else:
1785 raise PatchError(_("undefined source and destination files"))
1785 raise PatchError(_("undefined source and destination files"))
1786
1786
1787 gp = patchmeta(fname)
1787 gp = patchmeta(fname)
1788 if create:
1788 if create:
1789 gp.op = 'ADD'
1789 gp.op = 'ADD'
1790 elif remove:
1790 elif remove:
1791 gp.op = 'DELETE'
1791 gp.op = 'DELETE'
1792 return gp
1792 return gp
1793
1793
1794 def scanpatch(fp):
1794 def scanpatch(fp):
1795 """like patch.iterhunks, but yield different events
1795 """like patch.iterhunks, but yield different events
1796
1796
1797 - ('file', [header_lines + fromfile + tofile])
1797 - ('file', [header_lines + fromfile + tofile])
1798 - ('context', [context_lines])
1798 - ('context', [context_lines])
1799 - ('hunk', [hunk_lines])
1799 - ('hunk', [hunk_lines])
1800 - ('range', (-start,len, +start,len, proc))
1800 - ('range', (-start,len, +start,len, proc))
1801 """
1801 """
1802 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1802 lines_re = re.compile(br'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
1803 lr = linereader(fp)
1803 lr = linereader(fp)
1804
1804
1805 def scanwhile(first, p):
1805 def scanwhile(first, p):
1806 """scan lr while predicate holds"""
1806 """scan lr while predicate holds"""
1807 lines = [first]
1807 lines = [first]
1808 for line in iter(lr.readline, ''):
1808 for line in iter(lr.readline, ''):
1809 if p(line):
1809 if p(line):
1810 lines.append(line)
1810 lines.append(line)
1811 else:
1811 else:
1812 lr.push(line)
1812 lr.push(line)
1813 break
1813 break
1814 return lines
1814 return lines
1815
1815
1816 for line in iter(lr.readline, ''):
1816 for line in iter(lr.readline, ''):
1817 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1817 if line.startswith('diff --git a/') or line.startswith('diff -r '):
1818 def notheader(line):
1818 def notheader(line):
1819 s = line.split(None, 1)
1819 s = line.split(None, 1)
1820 return not s or s[0] not in ('---', 'diff')
1820 return not s or s[0] not in ('---', 'diff')
1821 header = scanwhile(line, notheader)
1821 header = scanwhile(line, notheader)
1822 fromfile = lr.readline()
1822 fromfile = lr.readline()
1823 if fromfile.startswith('---'):
1823 if fromfile.startswith('---'):
1824 tofile = lr.readline()
1824 tofile = lr.readline()
1825 header += [fromfile, tofile]
1825 header += [fromfile, tofile]
1826 else:
1826 else:
1827 lr.push(fromfile)
1827 lr.push(fromfile)
1828 yield 'file', header
1828 yield 'file', header
1829 elif line.startswith(' '):
1829 elif line.startswith(' '):
1830 cs = (' ', '\\')
1830 cs = (' ', '\\')
1831 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1831 yield 'context', scanwhile(line, lambda l: l.startswith(cs))
1832 elif line.startswith(('-', '+')):
1832 elif line.startswith(('-', '+')):
1833 cs = ('-', '+', '\\')
1833 cs = ('-', '+', '\\')
1834 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1834 yield 'hunk', scanwhile(line, lambda l: l.startswith(cs))
1835 else:
1835 else:
1836 m = lines_re.match(line)
1836 m = lines_re.match(line)
1837 if m:
1837 if m:
1838 yield 'range', m.groups()
1838 yield 'range', m.groups()
1839 else:
1839 else:
1840 yield 'other', line
1840 yield 'other', line
1841
1841
1842 def scangitpatch(lr, firstline):
1842 def scangitpatch(lr, firstline):
1843 """
1843 """
1844 Git patches can emit:
1844 Git patches can emit:
1845 - rename a to b
1845 - rename a to b
1846 - change b
1846 - change b
1847 - copy a to c
1847 - copy a to c
1848 - change c
1848 - change c
1849
1849
1850 We cannot apply this sequence as-is, the renamed 'a' could not be
1850 We cannot apply this sequence as-is, the renamed 'a' could not be
1851 found for it would have been renamed already. And we cannot copy
1851 found for it would have been renamed already. And we cannot copy
1852 from 'b' instead because 'b' would have been changed already. So
1852 from 'b' instead because 'b' would have been changed already. So
1853 we scan the git patch for copy and rename commands so we can
1853 we scan the git patch for copy and rename commands so we can
1854 perform the copies ahead of time.
1854 perform the copies ahead of time.
1855 """
1855 """
1856 pos = 0
1856 pos = 0
1857 try:
1857 try:
1858 pos = lr.fp.tell()
1858 pos = lr.fp.tell()
1859 fp = lr.fp
1859 fp = lr.fp
1860 except IOError:
1860 except IOError:
1861 fp = stringio(lr.fp.read())
1861 fp = stringio(lr.fp.read())
1862 gitlr = linereader(fp)
1862 gitlr = linereader(fp)
1863 gitlr.push(firstline)
1863 gitlr.push(firstline)
1864 gitpatches = readgitpatch(gitlr)
1864 gitpatches = readgitpatch(gitlr)
1865 fp.seek(pos)
1865 fp.seek(pos)
1866 return gitpatches
1866 return gitpatches
1867
1867
1868 def iterhunks(fp):
1868 def iterhunks(fp):
1869 """Read a patch and yield the following events:
1869 """Read a patch and yield the following events:
1870 - ("file", afile, bfile, firsthunk): select a new target file.
1870 - ("file", afile, bfile, firsthunk): select a new target file.
1871 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1871 - ("hunk", hunk): a new hunk is ready to be applied, follows a
1872 "file" event.
1872 "file" event.
1873 - ("git", gitchanges): current diff is in git format, gitchanges
1873 - ("git", gitchanges): current diff is in git format, gitchanges
1874 maps filenames to gitpatch records. Unique event.
1874 maps filenames to gitpatch records. Unique event.
1875 """
1875 """
1876 afile = ""
1876 afile = ""
1877 bfile = ""
1877 bfile = ""
1878 state = None
1878 state = None
1879 hunknum = 0
1879 hunknum = 0
1880 emitfile = newfile = False
1880 emitfile = newfile = False
1881 gitpatches = None
1881 gitpatches = None
1882
1882
1883 # our states
1883 # our states
1884 BFILE = 1
1884 BFILE = 1
1885 context = None
1885 context = None
1886 lr = linereader(fp)
1886 lr = linereader(fp)
1887
1887
1888 for x in iter(lr.readline, ''):
1888 for x in iter(lr.readline, ''):
1889 if state == BFILE and (
1889 if state == BFILE and (
1890 (not context and x.startswith('@'))
1890 (not context and x.startswith('@'))
1891 or (context is not False and x.startswith('***************'))
1891 or (context is not False and x.startswith('***************'))
1892 or x.startswith('GIT binary patch')):
1892 or x.startswith('GIT binary patch')):
1893 gp = None
1893 gp = None
1894 if (gitpatches and
1894 if (gitpatches and
1895 gitpatches[-1].ispatching(afile, bfile)):
1895 gitpatches[-1].ispatching(afile, bfile)):
1896 gp = gitpatches.pop()
1896 gp = gitpatches.pop()
1897 if x.startswith('GIT binary patch'):
1897 if x.startswith('GIT binary patch'):
1898 h = binhunk(lr, gp.path)
1898 h = binhunk(lr, gp.path)
1899 else:
1899 else:
1900 if context is None and x.startswith('***************'):
1900 if context is None and x.startswith('***************'):
1901 context = True
1901 context = True
1902 h = hunk(x, hunknum + 1, lr, context)
1902 h = hunk(x, hunknum + 1, lr, context)
1903 hunknum += 1
1903 hunknum += 1
1904 if emitfile:
1904 if emitfile:
1905 emitfile = False
1905 emitfile = False
1906 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1906 yield 'file', (afile, bfile, h, gp and gp.copy() or None)
1907 yield 'hunk', h
1907 yield 'hunk', h
1908 elif x.startswith('diff --git a/'):
1908 elif x.startswith('diff --git a/'):
1909 m = gitre.match(x.rstrip(' \r\n'))
1909 m = gitre.match(x.rstrip(' \r\n'))
1910 if not m:
1910 if not m:
1911 continue
1911 continue
1912 if gitpatches is None:
1912 if gitpatches is None:
1913 # scan whole input for git metadata
1913 # scan whole input for git metadata
1914 gitpatches = scangitpatch(lr, x)
1914 gitpatches = scangitpatch(lr, x)
1915 yield 'git', [g.copy() for g in gitpatches
1915 yield 'git', [g.copy() for g in gitpatches
1916 if g.op in ('COPY', 'RENAME')]
1916 if g.op in ('COPY', 'RENAME')]
1917 gitpatches.reverse()
1917 gitpatches.reverse()
1918 afile = 'a/' + m.group(1)
1918 afile = 'a/' + m.group(1)
1919 bfile = 'b/' + m.group(2)
1919 bfile = 'b/' + m.group(2)
1920 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1920 while gitpatches and not gitpatches[-1].ispatching(afile, bfile):
1921 gp = gitpatches.pop()
1921 gp = gitpatches.pop()
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1922 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1923 if not gitpatches:
1923 if not gitpatches:
1924 raise PatchError(_('failed to synchronize metadata for "%s"')
1924 raise PatchError(_('failed to synchronize metadata for "%s"')
1925 % afile[2:])
1925 % afile[2:])
1926 newfile = True
1926 newfile = True
1927 elif x.startswith('---'):
1927 elif x.startswith('---'):
1928 # check for a unified diff
1928 # check for a unified diff
1929 l2 = lr.readline()
1929 l2 = lr.readline()
1930 if not l2.startswith('+++'):
1930 if not l2.startswith('+++'):
1931 lr.push(l2)
1931 lr.push(l2)
1932 continue
1932 continue
1933 newfile = True
1933 newfile = True
1934 context = False
1934 context = False
1935 afile = parsefilename(x)
1935 afile = parsefilename(x)
1936 bfile = parsefilename(l2)
1936 bfile = parsefilename(l2)
1937 elif x.startswith('***'):
1937 elif x.startswith('***'):
1938 # check for a context diff
1938 # check for a context diff
1939 l2 = lr.readline()
1939 l2 = lr.readline()
1940 if not l2.startswith('---'):
1940 if not l2.startswith('---'):
1941 lr.push(l2)
1941 lr.push(l2)
1942 continue
1942 continue
1943 l3 = lr.readline()
1943 l3 = lr.readline()
1944 lr.push(l3)
1944 lr.push(l3)
1945 if not l3.startswith("***************"):
1945 if not l3.startswith("***************"):
1946 lr.push(l2)
1946 lr.push(l2)
1947 continue
1947 continue
1948 newfile = True
1948 newfile = True
1949 context = True
1949 context = True
1950 afile = parsefilename(x)
1950 afile = parsefilename(x)
1951 bfile = parsefilename(l2)
1951 bfile = parsefilename(l2)
1952
1952
1953 if newfile:
1953 if newfile:
1954 newfile = False
1954 newfile = False
1955 emitfile = True
1955 emitfile = True
1956 state = BFILE
1956 state = BFILE
1957 hunknum = 0
1957 hunknum = 0
1958
1958
1959 while gitpatches:
1959 while gitpatches:
1960 gp = gitpatches.pop()
1960 gp = gitpatches.pop()
1961 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1961 yield 'file', ('a/' + gp.path, 'b/' + gp.path, None, gp.copy())
1962
1962
1963 def applybindelta(binchunk, data):
1963 def applybindelta(binchunk, data):
1964 """Apply a binary delta hunk
1964 """Apply a binary delta hunk
1965 The algorithm used is the algorithm from git's patch-delta.c
1965 The algorithm used is the algorithm from git's patch-delta.c
1966 """
1966 """
1967 def deltahead(binchunk):
1967 def deltahead(binchunk):
1968 i = 0
1968 i = 0
1969 for c in pycompat.bytestr(binchunk):
1969 for c in pycompat.bytestr(binchunk):
1970 i += 1
1970 i += 1
1971 if not (ord(c) & 0x80):
1971 if not (ord(c) & 0x80):
1972 return i
1972 return i
1973 return i
1973 return i
1974 out = ""
1974 out = ""
1975 s = deltahead(binchunk)
1975 s = deltahead(binchunk)
1976 binchunk = binchunk[s:]
1976 binchunk = binchunk[s:]
1977 s = deltahead(binchunk)
1977 s = deltahead(binchunk)
1978 binchunk = binchunk[s:]
1978 binchunk = binchunk[s:]
1979 i = 0
1979 i = 0
1980 while i < len(binchunk):
1980 while i < len(binchunk):
1981 cmd = ord(binchunk[i:i + 1])
1981 cmd = ord(binchunk[i:i + 1])
1982 i += 1
1982 i += 1
1983 if (cmd & 0x80):
1983 if (cmd & 0x80):
1984 offset = 0
1984 offset = 0
1985 size = 0
1985 size = 0
1986 if (cmd & 0x01):
1986 if (cmd & 0x01):
1987 offset = ord(binchunk[i:i + 1])
1987 offset = ord(binchunk[i:i + 1])
1988 i += 1
1988 i += 1
1989 if (cmd & 0x02):
1989 if (cmd & 0x02):
1990 offset |= ord(binchunk[i:i + 1]) << 8
1990 offset |= ord(binchunk[i:i + 1]) << 8
1991 i += 1
1991 i += 1
1992 if (cmd & 0x04):
1992 if (cmd & 0x04):
1993 offset |= ord(binchunk[i:i + 1]) << 16
1993 offset |= ord(binchunk[i:i + 1]) << 16
1994 i += 1
1994 i += 1
1995 if (cmd & 0x08):
1995 if (cmd & 0x08):
1996 offset |= ord(binchunk[i:i + 1]) << 24
1996 offset |= ord(binchunk[i:i + 1]) << 24
1997 i += 1
1997 i += 1
1998 if (cmd & 0x10):
1998 if (cmd & 0x10):
1999 size = ord(binchunk[i:i + 1])
1999 size = ord(binchunk[i:i + 1])
2000 i += 1
2000 i += 1
2001 if (cmd & 0x20):
2001 if (cmd & 0x20):
2002 size |= ord(binchunk[i:i + 1]) << 8
2002 size |= ord(binchunk[i:i + 1]) << 8
2003 i += 1
2003 i += 1
2004 if (cmd & 0x40):
2004 if (cmd & 0x40):
2005 size |= ord(binchunk[i:i + 1]) << 16
2005 size |= ord(binchunk[i:i + 1]) << 16
2006 i += 1
2006 i += 1
2007 if size == 0:
2007 if size == 0:
2008 size = 0x10000
2008 size = 0x10000
2009 offset_end = offset + size
2009 offset_end = offset + size
2010 out += data[offset:offset_end]
2010 out += data[offset:offset_end]
2011 elif cmd != 0:
2011 elif cmd != 0:
2012 offset_end = i + cmd
2012 offset_end = i + cmd
2013 out += binchunk[i:offset_end]
2013 out += binchunk[i:offset_end]
2014 i += cmd
2014 i += cmd
2015 else:
2015 else:
2016 raise PatchError(_('unexpected delta opcode 0'))
2016 raise PatchError(_('unexpected delta opcode 0'))
2017 return out
2017 return out
2018
2018
2019 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2019 def applydiff(ui, fp, backend, store, strip=1, prefix='', eolmode='strict'):
2020 """Reads a patch from fp and tries to apply it.
2020 """Reads a patch from fp and tries to apply it.
2021
2021
2022 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2022 Returns 0 for a clean patch, -1 if any rejects were found and 1 if
2023 there was any fuzz.
2023 there was any fuzz.
2024
2024
2025 If 'eolmode' is 'strict', the patch content and patched file are
2025 If 'eolmode' is 'strict', the patch content and patched file are
2026 read in binary mode. Otherwise, line endings are ignored when
2026 read in binary mode. Otherwise, line endings are ignored when
2027 patching then normalized according to 'eolmode'.
2027 patching then normalized according to 'eolmode'.
2028 """
2028 """
2029 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2029 return _applydiff(ui, fp, patchfile, backend, store, strip=strip,
2030 prefix=prefix, eolmode=eolmode)
2030 prefix=prefix, eolmode=eolmode)
2031
2031
2032 def _canonprefix(repo, prefix):
2032 def _canonprefix(repo, prefix):
2033 if prefix:
2033 if prefix:
2034 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2034 prefix = pathutil.canonpath(repo.root, repo.getcwd(), prefix)
2035 if prefix != '':
2035 if prefix != '':
2036 prefix += '/'
2036 prefix += '/'
2037 return prefix
2037 return prefix
2038
2038
2039 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2039 def _applydiff(ui, fp, patcher, backend, store, strip=1, prefix='',
2040 eolmode='strict'):
2040 eolmode='strict'):
2041 prefix = _canonprefix(backend.repo, prefix)
2041 prefix = _canonprefix(backend.repo, prefix)
2042 def pstrip(p):
2042 def pstrip(p):
2043 return pathtransform(p, strip - 1, prefix)[1]
2043 return pathtransform(p, strip - 1, prefix)[1]
2044
2044
2045 rejects = 0
2045 rejects = 0
2046 err = 0
2046 err = 0
2047 current_file = None
2047 current_file = None
2048
2048
2049 for state, values in iterhunks(fp):
2049 for state, values in iterhunks(fp):
2050 if state == 'hunk':
2050 if state == 'hunk':
2051 if not current_file:
2051 if not current_file:
2052 continue
2052 continue
2053 ret = current_file.apply(values)
2053 ret = current_file.apply(values)
2054 if ret > 0:
2054 if ret > 0:
2055 err = 1
2055 err = 1
2056 elif state == 'file':
2056 elif state == 'file':
2057 if current_file:
2057 if current_file:
2058 rejects += current_file.close()
2058 rejects += current_file.close()
2059 current_file = None
2059 current_file = None
2060 afile, bfile, first_hunk, gp = values
2060 afile, bfile, first_hunk, gp = values
2061 if gp:
2061 if gp:
2062 gp.path = pstrip(gp.path)
2062 gp.path = pstrip(gp.path)
2063 if gp.oldpath:
2063 if gp.oldpath:
2064 gp.oldpath = pstrip(gp.oldpath)
2064 gp.oldpath = pstrip(gp.oldpath)
2065 else:
2065 else:
2066 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2066 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2067 prefix)
2067 prefix)
2068 if gp.op == 'RENAME':
2068 if gp.op == 'RENAME':
2069 backend.unlink(gp.oldpath)
2069 backend.unlink(gp.oldpath)
2070 if not first_hunk:
2070 if not first_hunk:
2071 if gp.op == 'DELETE':
2071 if gp.op == 'DELETE':
2072 backend.unlink(gp.path)
2072 backend.unlink(gp.path)
2073 continue
2073 continue
2074 data, mode = None, None
2074 data, mode = None, None
2075 if gp.op in ('RENAME', 'COPY'):
2075 if gp.op in ('RENAME', 'COPY'):
2076 data, mode = store.getfile(gp.oldpath)[:2]
2076 data, mode = store.getfile(gp.oldpath)[:2]
2077 if data is None:
2077 if data is None:
2078 # This means that the old path does not exist
2078 # This means that the old path does not exist
2079 raise PatchError(_("source file '%s' does not exist")
2079 raise PatchError(_("source file '%s' does not exist")
2080 % gp.oldpath)
2080 % gp.oldpath)
2081 if gp.mode:
2081 if gp.mode:
2082 mode = gp.mode
2082 mode = gp.mode
2083 if gp.op == 'ADD':
2083 if gp.op == 'ADD':
2084 # Added files without content have no hunk and
2084 # Added files without content have no hunk and
2085 # must be created
2085 # must be created
2086 data = ''
2086 data = ''
2087 if data or mode:
2087 if data or mode:
2088 if (gp.op in ('ADD', 'RENAME', 'COPY')
2088 if (gp.op in ('ADD', 'RENAME', 'COPY')
2089 and backend.exists(gp.path)):
2089 and backend.exists(gp.path)):
2090 raise PatchError(_("cannot create %s: destination "
2090 raise PatchError(_("cannot create %s: destination "
2091 "already exists") % gp.path)
2091 "already exists") % gp.path)
2092 backend.setfile(gp.path, data, mode, gp.oldpath)
2092 backend.setfile(gp.path, data, mode, gp.oldpath)
2093 continue
2093 continue
2094 try:
2094 try:
2095 current_file = patcher(ui, gp, backend, store,
2095 current_file = patcher(ui, gp, backend, store,
2096 eolmode=eolmode)
2096 eolmode=eolmode)
2097 except PatchError as inst:
2097 except PatchError as inst:
2098 ui.warn(str(inst) + '\n')
2098 ui.warn(str(inst) + '\n')
2099 current_file = None
2099 current_file = None
2100 rejects += 1
2100 rejects += 1
2101 continue
2101 continue
2102 elif state == 'git':
2102 elif state == 'git':
2103 for gp in values:
2103 for gp in values:
2104 path = pstrip(gp.oldpath)
2104 path = pstrip(gp.oldpath)
2105 data, mode = backend.getfile(path)
2105 data, mode = backend.getfile(path)
2106 if data is None:
2106 if data is None:
2107 # The error ignored here will trigger a getfile()
2107 # The error ignored here will trigger a getfile()
2108 # error in a place more appropriate for error
2108 # error in a place more appropriate for error
2109 # handling, and will not interrupt the patching
2109 # handling, and will not interrupt the patching
2110 # process.
2110 # process.
2111 pass
2111 pass
2112 else:
2112 else:
2113 store.setfile(path, data, mode)
2113 store.setfile(path, data, mode)
2114 else:
2114 else:
2115 raise error.Abort(_('unsupported parser state: %s') % state)
2115 raise error.Abort(_('unsupported parser state: %s') % state)
2116
2116
2117 if current_file:
2117 if current_file:
2118 rejects += current_file.close()
2118 rejects += current_file.close()
2119
2119
2120 if rejects:
2120 if rejects:
2121 return -1
2121 return -1
2122 return err
2122 return err
2123
2123
2124 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2124 def _externalpatch(ui, repo, patcher, patchname, strip, files,
2125 similarity):
2125 similarity):
2126 """use <patcher> to apply <patchname> to the working directory.
2126 """use <patcher> to apply <patchname> to the working directory.
2127 returns whether patch was applied with fuzz factor."""
2127 returns whether patch was applied with fuzz factor."""
2128
2128
2129 fuzz = False
2129 fuzz = False
2130 args = []
2130 args = []
2131 cwd = repo.root
2131 cwd = repo.root
2132 if cwd:
2132 if cwd:
2133 args.append('-d %s' % procutil.shellquote(cwd))
2133 args.append('-d %s' % procutil.shellquote(cwd))
2134 cmd = ('%s %s -p%d < %s'
2134 cmd = ('%s %s -p%d < %s'
2135 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2135 % (patcher, ' '.join(args), strip, procutil.shellquote(patchname)))
2136 ui.debug('Using external patch tool: %s\n' % cmd)
2136 ui.debug('Using external patch tool: %s\n' % cmd)
2137 fp = procutil.popen(cmd, 'rb')
2137 fp = procutil.popen(cmd, 'rb')
2138 try:
2138 try:
2139 for line in util.iterfile(fp):
2139 for line in util.iterfile(fp):
2140 line = line.rstrip()
2140 line = line.rstrip()
2141 ui.note(line + '\n')
2141 ui.note(line + '\n')
2142 if line.startswith('patching file '):
2142 if line.startswith('patching file '):
2143 pf = util.parsepatchoutput(line)
2143 pf = util.parsepatchoutput(line)
2144 printed_file = False
2144 printed_file = False
2145 files.add(pf)
2145 files.add(pf)
2146 elif line.find('with fuzz') >= 0:
2146 elif line.find('with fuzz') >= 0:
2147 fuzz = True
2147 fuzz = True
2148 if not printed_file:
2148 if not printed_file:
2149 ui.warn(pf + '\n')
2149 ui.warn(pf + '\n')
2150 printed_file = True
2150 printed_file = True
2151 ui.warn(line + '\n')
2151 ui.warn(line + '\n')
2152 elif line.find('saving rejects to file') >= 0:
2152 elif line.find('saving rejects to file') >= 0:
2153 ui.warn(line + '\n')
2153 ui.warn(line + '\n')
2154 elif line.find('FAILED') >= 0:
2154 elif line.find('FAILED') >= 0:
2155 if not printed_file:
2155 if not printed_file:
2156 ui.warn(pf + '\n')
2156 ui.warn(pf + '\n')
2157 printed_file = True
2157 printed_file = True
2158 ui.warn(line + '\n')
2158 ui.warn(line + '\n')
2159 finally:
2159 finally:
2160 if files:
2160 if files:
2161 scmutil.marktouched(repo, files, similarity)
2161 scmutil.marktouched(repo, files, similarity)
2162 code = fp.close()
2162 code = fp.close()
2163 if code:
2163 if code:
2164 raise PatchError(_("patch command failed: %s") %
2164 raise PatchError(_("patch command failed: %s") %
2165 procutil.explainexit(code))
2165 procutil.explainexit(code))
2166 return fuzz
2166 return fuzz
2167
2167
2168 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2168 def patchbackend(ui, backend, patchobj, strip, prefix, files=None,
2169 eolmode='strict'):
2169 eolmode='strict'):
2170 if files is None:
2170 if files is None:
2171 files = set()
2171 files = set()
2172 if eolmode is None:
2172 if eolmode is None:
2173 eolmode = ui.config('patch', 'eol')
2173 eolmode = ui.config('patch', 'eol')
2174 if eolmode.lower() not in eolmodes:
2174 if eolmode.lower() not in eolmodes:
2175 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2175 raise error.Abort(_('unsupported line endings type: %s') % eolmode)
2176 eolmode = eolmode.lower()
2176 eolmode = eolmode.lower()
2177
2177
2178 store = filestore()
2178 store = filestore()
2179 try:
2179 try:
2180 fp = open(patchobj, 'rb')
2180 fp = open(patchobj, 'rb')
2181 except TypeError:
2181 except TypeError:
2182 fp = patchobj
2182 fp = patchobj
2183 try:
2183 try:
2184 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2184 ret = applydiff(ui, fp, backend, store, strip=strip, prefix=prefix,
2185 eolmode=eolmode)
2185 eolmode=eolmode)
2186 finally:
2186 finally:
2187 if fp != patchobj:
2187 if fp != patchobj:
2188 fp.close()
2188 fp.close()
2189 files.update(backend.close())
2189 files.update(backend.close())
2190 store.close()
2190 store.close()
2191 if ret < 0:
2191 if ret < 0:
2192 raise PatchError(_('patch failed to apply'))
2192 raise PatchError(_('patch failed to apply'))
2193 return ret > 0
2193 return ret > 0
2194
2194
2195 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2195 def internalpatch(ui, repo, patchobj, strip, prefix='', files=None,
2196 eolmode='strict', similarity=0):
2196 eolmode='strict', similarity=0):
2197 """use builtin patch to apply <patchobj> to the working directory.
2197 """use builtin patch to apply <patchobj> to the working directory.
2198 returns whether patch was applied with fuzz factor."""
2198 returns whether patch was applied with fuzz factor."""
2199 backend = workingbackend(ui, repo, similarity)
2199 backend = workingbackend(ui, repo, similarity)
2200 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2200 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2201
2201
2202 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2202 def patchrepo(ui, repo, ctx, store, patchobj, strip, prefix, files=None,
2203 eolmode='strict'):
2203 eolmode='strict'):
2204 backend = repobackend(ui, repo, ctx, store)
2204 backend = repobackend(ui, repo, ctx, store)
2205 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2205 return patchbackend(ui, backend, patchobj, strip, prefix, files, eolmode)
2206
2206
2207 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2207 def patch(ui, repo, patchname, strip=1, prefix='', files=None, eolmode='strict',
2208 similarity=0):
2208 similarity=0):
2209 """Apply <patchname> to the working directory.
2209 """Apply <patchname> to the working directory.
2210
2210
2211 'eolmode' specifies how end of lines should be handled. It can be:
2211 'eolmode' specifies how end of lines should be handled. It can be:
2212 - 'strict': inputs are read in binary mode, EOLs are preserved
2212 - 'strict': inputs are read in binary mode, EOLs are preserved
2213 - 'crlf': EOLs are ignored when patching and reset to CRLF
2213 - 'crlf': EOLs are ignored when patching and reset to CRLF
2214 - 'lf': EOLs are ignored when patching and reset to LF
2214 - 'lf': EOLs are ignored when patching and reset to LF
2215 - None: get it from user settings, default to 'strict'
2215 - None: get it from user settings, default to 'strict'
2216 'eolmode' is ignored when using an external patcher program.
2216 'eolmode' is ignored when using an external patcher program.
2217
2217
2218 Returns whether patch was applied with fuzz factor.
2218 Returns whether patch was applied with fuzz factor.
2219 """
2219 """
2220 patcher = ui.config('ui', 'patch')
2220 patcher = ui.config('ui', 'patch')
2221 if files is None:
2221 if files is None:
2222 files = set()
2222 files = set()
2223 if patcher:
2223 if patcher:
2224 return _externalpatch(ui, repo, patcher, patchname, strip,
2224 return _externalpatch(ui, repo, patcher, patchname, strip,
2225 files, similarity)
2225 files, similarity)
2226 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2226 return internalpatch(ui, repo, patchname, strip, prefix, files, eolmode,
2227 similarity)
2227 similarity)
2228
2228
2229 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2229 def changedfiles(ui, repo, patchpath, strip=1, prefix=''):
2230 backend = fsbackend(ui, repo.root)
2230 backend = fsbackend(ui, repo.root)
2231 prefix = _canonprefix(repo, prefix)
2231 prefix = _canonprefix(repo, prefix)
2232 with open(patchpath, 'rb') as fp:
2232 with open(patchpath, 'rb') as fp:
2233 changed = set()
2233 changed = set()
2234 for state, values in iterhunks(fp):
2234 for state, values in iterhunks(fp):
2235 if state == 'file':
2235 if state == 'file':
2236 afile, bfile, first_hunk, gp = values
2236 afile, bfile, first_hunk, gp = values
2237 if gp:
2237 if gp:
2238 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2238 gp.path = pathtransform(gp.path, strip - 1, prefix)[1]
2239 if gp.oldpath:
2239 if gp.oldpath:
2240 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2240 gp.oldpath = pathtransform(gp.oldpath, strip - 1,
2241 prefix)[1]
2241 prefix)[1]
2242 else:
2242 else:
2243 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2243 gp = makepatchmeta(backend, afile, bfile, first_hunk, strip,
2244 prefix)
2244 prefix)
2245 changed.add(gp.path)
2245 changed.add(gp.path)
2246 if gp.op == 'RENAME':
2246 if gp.op == 'RENAME':
2247 changed.add(gp.oldpath)
2247 changed.add(gp.oldpath)
2248 elif state not in ('hunk', 'git'):
2248 elif state not in ('hunk', 'git'):
2249 raise error.Abort(_('unsupported parser state: %s') % state)
2249 raise error.Abort(_('unsupported parser state: %s') % state)
2250 return changed
2250 return changed
2251
2251
2252 class GitDiffRequired(Exception):
2252 class GitDiffRequired(Exception):
2253 pass
2253 pass
2254
2254
2255 diffopts = diffutil.diffallopts
2255 diffopts = diffutil.diffallopts
2256 diffallopts = diffutil.diffallopts
2256 diffallopts = diffutil.diffallopts
2257 difffeatureopts = diffutil.difffeatureopts
2257 difffeatureopts = diffutil.difffeatureopts
2258
2258
2259 def diff(repo, node1=None, node2=None, match=None, changes=None,
2259 def diff(repo, node1=None, node2=None, match=None, changes=None,
2260 opts=None, losedatafn=None, pathfn=None, copy=None,
2260 opts=None, losedatafn=None, pathfn=None, copy=None,
2261 copysourcematch=None, hunksfilterfn=None):
2261 copysourcematch=None, hunksfilterfn=None):
2262 '''yields diff of changes to files between two nodes, or node and
2262 '''yields diff of changes to files between two nodes, or node and
2263 working directory.
2263 working directory.
2264
2264
2265 if node1 is None, use first dirstate parent instead.
2265 if node1 is None, use first dirstate parent instead.
2266 if node2 is None, compare node1 with working directory.
2266 if node2 is None, compare node1 with working directory.
2267
2267
2268 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2268 losedatafn(**kwarg) is a callable run when opts.upgrade=True and
2269 every time some change cannot be represented with the current
2269 every time some change cannot be represented with the current
2270 patch format. Return False to upgrade to git patch format, True to
2270 patch format. Return False to upgrade to git patch format, True to
2271 accept the loss or raise an exception to abort the diff. It is
2271 accept the loss or raise an exception to abort the diff. It is
2272 called with the name of current file being diffed as 'fn'. If set
2272 called with the name of current file being diffed as 'fn'. If set
2273 to None, patches will always be upgraded to git format when
2273 to None, patches will always be upgraded to git format when
2274 necessary.
2274 necessary.
2275
2275
2276 prefix is a filename prefix that is prepended to all filenames on
2276 prefix is a filename prefix that is prepended to all filenames on
2277 display (used for subrepos).
2277 display (used for subrepos).
2278
2278
2279 relroot, if not empty, must be normalized with a trailing /. Any match
2279 relroot, if not empty, must be normalized with a trailing /. Any match
2280 patterns that fall outside it will be ignored.
2280 patterns that fall outside it will be ignored.
2281
2281
2282 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2282 copy, if not empty, should contain mappings {dst@y: src@x} of copy
2283 information.
2283 information.
2284
2284
2285 if copysourcematch is not None, then copy sources will be filtered by this
2285 if copysourcematch is not None, then copy sources will be filtered by this
2286 matcher
2286 matcher
2287
2287
2288 hunksfilterfn, if not None, should be a function taking a filectx and
2288 hunksfilterfn, if not None, should be a function taking a filectx and
2289 hunks generator that may yield filtered hunks.
2289 hunks generator that may yield filtered hunks.
2290 '''
2290 '''
2291 if not node1 and not node2:
2291 if not node1 and not node2:
2292 node1 = repo.dirstate.p1()
2292 node1 = repo.dirstate.p1()
2293
2293
2294 ctx1 = repo[node1]
2294 ctx1 = repo[node1]
2295 ctx2 = repo[node2]
2295 ctx2 = repo[node2]
2296
2296
2297 for fctx1, fctx2, hdr, hunks in diffhunks(
2297 for fctx1, fctx2, hdr, hunks in diffhunks(
2298 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2298 repo, ctx1=ctx1, ctx2=ctx2, match=match, changes=changes, opts=opts,
2299 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2299 losedatafn=losedatafn, pathfn=pathfn, copy=copy,
2300 copysourcematch=copysourcematch):
2300 copysourcematch=copysourcematch):
2301 if hunksfilterfn is not None:
2301 if hunksfilterfn is not None:
2302 # If the file has been removed, fctx2 is None; but this should
2302 # If the file has been removed, fctx2 is None; but this should
2303 # not occur here since we catch removed files early in
2303 # not occur here since we catch removed files early in
2304 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2304 # logcmdutil.getlinerangerevs() for 'hg log -L'.
2305 assert fctx2 is not None, (
2305 assert fctx2 is not None, (
2306 'fctx2 unexpectly None in diff hunks filtering')
2306 'fctx2 unexpectly None in diff hunks filtering')
2307 hunks = hunksfilterfn(fctx2, hunks)
2307 hunks = hunksfilterfn(fctx2, hunks)
2308 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2308 text = ''.join(sum((list(hlines) for hrange, hlines in hunks), []))
2309 if hdr and (text or len(hdr) > 1):
2309 if hdr and (text or len(hdr) > 1):
2310 yield '\n'.join(hdr) + '\n'
2310 yield '\n'.join(hdr) + '\n'
2311 if text:
2311 if text:
2312 yield text
2312 yield text
2313
2313
2314 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2314 def diffhunks(repo, ctx1, ctx2, match=None, changes=None, opts=None,
2315 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2315 losedatafn=None, pathfn=None, copy=None, copysourcematch=None):
2316 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2316 """Yield diff of changes to files in the form of (`header`, `hunks`) tuples
2317 where `header` is a list of diff headers and `hunks` is an iterable of
2317 where `header` is a list of diff headers and `hunks` is an iterable of
2318 (`hunkrange`, `hunklines`) tuples.
2318 (`hunkrange`, `hunklines`) tuples.
2319
2319
2320 See diff() for the meaning of parameters.
2320 See diff() for the meaning of parameters.
2321 """
2321 """
2322
2322
2323 if opts is None:
2323 if opts is None:
2324 opts = mdiff.defaultopts
2324 opts = mdiff.defaultopts
2325
2325
2326 def lrugetfilectx():
2326 def lrugetfilectx():
2327 cache = {}
2327 cache = {}
2328 order = collections.deque()
2328 order = collections.deque()
2329 def getfilectx(f, ctx):
2329 def getfilectx(f, ctx):
2330 fctx = ctx.filectx(f, filelog=cache.get(f))
2330 fctx = ctx.filectx(f, filelog=cache.get(f))
2331 if f not in cache:
2331 if f not in cache:
2332 if len(cache) > 20:
2332 if len(cache) > 20:
2333 del cache[order.popleft()]
2333 del cache[order.popleft()]
2334 cache[f] = fctx.filelog()
2334 cache[f] = fctx.filelog()
2335 else:
2335 else:
2336 order.remove(f)
2336 order.remove(f)
2337 order.append(f)
2337 order.append(f)
2338 return fctx
2338 return fctx
2339 return getfilectx
2339 return getfilectx
2340 getfilectx = lrugetfilectx()
2340 getfilectx = lrugetfilectx()
2341
2341
2342 if not changes:
2342 if not changes:
2343 changes = ctx1.status(ctx2, match=match)
2343 changes = ctx1.status(ctx2, match=match)
2344 modified, added, removed = changes[:3]
2344 modified, added, removed = changes[:3]
2345
2345
2346 if not modified and not added and not removed:
2346 if not modified and not added and not removed:
2347 return []
2347 return []
2348
2348
2349 if repo.ui.debugflag:
2349 if repo.ui.debugflag:
2350 hexfunc = hex
2350 hexfunc = hex
2351 else:
2351 else:
2352 hexfunc = short
2352 hexfunc = short
2353 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2353 revs = [hexfunc(node) for node in [ctx1.node(), ctx2.node()] if node]
2354
2354
2355 if copy is None:
2355 if copy is None:
2356 copy = {}
2356 copy = {}
2357 if opts.git or opts.upgrade:
2357 if opts.git or opts.upgrade:
2358 copy = copies.pathcopies(ctx1, ctx2, match=match)
2358 copy = copies.pathcopies(ctx1, ctx2, match=match)
2359
2359
2360 if copysourcematch:
2360 if copysourcematch:
2361 # filter out copies where source side isn't inside the matcher
2361 # filter out copies where source side isn't inside the matcher
2362 # (copies.pathcopies() already filtered out the destination)
2362 # (copies.pathcopies() already filtered out the destination)
2363 copy = {dst: src for dst, src in copy.iteritems()
2363 copy = {dst: src for dst, src in copy.iteritems()
2364 if copysourcematch(src)}
2364 if copysourcematch(src)}
2365
2365
2366 modifiedset = set(modified)
2366 modifiedset = set(modified)
2367 addedset = set(added)
2367 addedset = set(added)
2368 removedset = set(removed)
2368 removedset = set(removed)
2369 for f in modified:
2369 for f in modified:
2370 if f not in ctx1:
2370 if f not in ctx1:
2371 # Fix up added, since merged-in additions appear as
2371 # Fix up added, since merged-in additions appear as
2372 # modifications during merges
2372 # modifications during merges
2373 modifiedset.remove(f)
2373 modifiedset.remove(f)
2374 addedset.add(f)
2374 addedset.add(f)
2375 for f in removed:
2375 for f in removed:
2376 if f not in ctx1:
2376 if f not in ctx1:
2377 # Merged-in additions that are then removed are reported as removed.
2377 # Merged-in additions that are then removed are reported as removed.
2378 # They are not in ctx1, so We don't want to show them in the diff.
2378 # They are not in ctx1, so We don't want to show them in the diff.
2379 removedset.remove(f)
2379 removedset.remove(f)
2380 modified = sorted(modifiedset)
2380 modified = sorted(modifiedset)
2381 added = sorted(addedset)
2381 added = sorted(addedset)
2382 removed = sorted(removedset)
2382 removed = sorted(removedset)
2383 for dst, src in list(copy.items()):
2383 for dst, src in list(copy.items()):
2384 if src not in ctx1:
2384 if src not in ctx1:
2385 # Files merged in during a merge and then copied/renamed are
2385 # Files merged in during a merge and then copied/renamed are
2386 # reported as copies. We want to show them in the diff as additions.
2386 # reported as copies. We want to show them in the diff as additions.
2387 del copy[dst]
2387 del copy[dst]
2388
2388
2389 prefetchmatch = scmutil.matchfiles(
2389 prefetchmatch = scmutil.matchfiles(
2390 repo, list(modifiedset | addedset | removedset))
2390 repo, list(modifiedset | addedset | removedset))
2391 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2391 scmutil.prefetchfiles(repo, [ctx1.rev(), ctx2.rev()], prefetchmatch)
2392
2392
2393 def difffn(opts, losedata):
2393 def difffn(opts, losedata):
2394 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2394 return trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2395 copy, getfilectx, opts, losedata, pathfn)
2395 copy, getfilectx, opts, losedata, pathfn)
2396 if opts.upgrade and not opts.git:
2396 if opts.upgrade and not opts.git:
2397 try:
2397 try:
2398 def losedata(fn):
2398 def losedata(fn):
2399 if not losedatafn or not losedatafn(fn=fn):
2399 if not losedatafn or not losedatafn(fn=fn):
2400 raise GitDiffRequired
2400 raise GitDiffRequired
2401 # Buffer the whole output until we are sure it can be generated
2401 # Buffer the whole output until we are sure it can be generated
2402 return list(difffn(opts.copy(git=False), losedata))
2402 return list(difffn(opts.copy(git=False), losedata))
2403 except GitDiffRequired:
2403 except GitDiffRequired:
2404 return difffn(opts.copy(git=True), None)
2404 return difffn(opts.copy(git=True), None)
2405 else:
2405 else:
2406 return difffn(opts, None)
2406 return difffn(opts, None)
2407
2407
2408 def diffsinglehunk(hunklines):
2408 def diffsinglehunk(hunklines):
2409 """yield tokens for a list of lines in a single hunk"""
2409 """yield tokens for a list of lines in a single hunk"""
2410 for line in hunklines:
2410 for line in hunklines:
2411 # chomp
2411 # chomp
2412 chompline = line.rstrip('\r\n')
2412 chompline = line.rstrip('\r\n')
2413 # highlight tabs and trailing whitespace
2413 # highlight tabs and trailing whitespace
2414 stripline = chompline.rstrip()
2414 stripline = chompline.rstrip()
2415 if line.startswith('-'):
2415 if line.startswith('-'):
2416 label = 'diff.deleted'
2416 label = 'diff.deleted'
2417 elif line.startswith('+'):
2417 elif line.startswith('+'):
2418 label = 'diff.inserted'
2418 label = 'diff.inserted'
2419 else:
2419 else:
2420 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2420 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2421 for token in tabsplitter.findall(stripline):
2421 for token in tabsplitter.findall(stripline):
2422 if token.startswith('\t'):
2422 if token.startswith('\t'):
2423 yield (token, 'diff.tab')
2423 yield (token, 'diff.tab')
2424 else:
2424 else:
2425 yield (token, label)
2425 yield (token, label)
2426
2426
2427 if chompline != stripline:
2427 if chompline != stripline:
2428 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2428 yield (chompline[len(stripline):], 'diff.trailingwhitespace')
2429 if chompline != line:
2429 if chompline != line:
2430 yield (line[len(chompline):], '')
2430 yield (line[len(chompline):], '')
2431
2431
2432 def diffsinglehunkinline(hunklines):
2432 def diffsinglehunkinline(hunklines):
2433 """yield tokens for a list of lines in a single hunk, with inline colors"""
2433 """yield tokens for a list of lines in a single hunk, with inline colors"""
2434 # prepare deleted, and inserted content
2434 # prepare deleted, and inserted content
2435 a = ''
2435 a = ''
2436 b = ''
2436 b = ''
2437 for line in hunklines:
2437 for line in hunklines:
2438 if line[0:1] == '-':
2438 if line[0:1] == '-':
2439 a += line[1:]
2439 a += line[1:]
2440 elif line[0:1] == '+':
2440 elif line[0:1] == '+':
2441 b += line[1:]
2441 b += line[1:]
2442 else:
2442 else:
2443 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2443 raise error.ProgrammingError('unexpected hunk line: %s' % line)
2444 # fast path: if either side is empty, use diffsinglehunk
2444 # fast path: if either side is empty, use diffsinglehunk
2445 if not a or not b:
2445 if not a or not b:
2446 for t in diffsinglehunk(hunklines):
2446 for t in diffsinglehunk(hunklines):
2447 yield t
2447 yield t
2448 return
2448 return
2449 # re-split the content into words
2449 # re-split the content into words
2450 al = wordsplitter.findall(a)
2450 al = wordsplitter.findall(a)
2451 bl = wordsplitter.findall(b)
2451 bl = wordsplitter.findall(b)
2452 # re-arrange the words to lines since the diff algorithm is line-based
2452 # re-arrange the words to lines since the diff algorithm is line-based
2453 aln = [s if s == '\n' else s + '\n' for s in al]
2453 aln = [s if s == '\n' else s + '\n' for s in al]
2454 bln = [s if s == '\n' else s + '\n' for s in bl]
2454 bln = [s if s == '\n' else s + '\n' for s in bl]
2455 an = ''.join(aln)
2455 an = ''.join(aln)
2456 bn = ''.join(bln)
2456 bn = ''.join(bln)
2457 # run the diff algorithm, prepare atokens and btokens
2457 # run the diff algorithm, prepare atokens and btokens
2458 atokens = []
2458 atokens = []
2459 btokens = []
2459 btokens = []
2460 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2460 blocks = mdiff.allblocks(an, bn, lines1=aln, lines2=bln)
2461 for (a1, a2, b1, b2), btype in blocks:
2461 for (a1, a2, b1, b2), btype in blocks:
2462 changed = btype == '!'
2462 changed = btype == '!'
2463 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2463 for token in mdiff.splitnewlines(''.join(al[a1:a2])):
2464 atokens.append((changed, token))
2464 atokens.append((changed, token))
2465 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2465 for token in mdiff.splitnewlines(''.join(bl[b1:b2])):
2466 btokens.append((changed, token))
2466 btokens.append((changed, token))
2467
2467
2468 # yield deleted tokens, then inserted ones
2468 # yield deleted tokens, then inserted ones
2469 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2469 for prefix, label, tokens in [('-', 'diff.deleted', atokens),
2470 ('+', 'diff.inserted', btokens)]:
2470 ('+', 'diff.inserted', btokens)]:
2471 nextisnewline = True
2471 nextisnewline = True
2472 for changed, token in tokens:
2472 for changed, token in tokens:
2473 if nextisnewline:
2473 if nextisnewline:
2474 yield (prefix, label)
2474 yield (prefix, label)
2475 nextisnewline = False
2475 nextisnewline = False
2476 # special handling line end
2476 # special handling line end
2477 isendofline = token.endswith('\n')
2477 isendofline = token.endswith('\n')
2478 if isendofline:
2478 if isendofline:
2479 chomp = token[:-1] # chomp
2479 chomp = token[:-1] # chomp
2480 if chomp.endswith('\r'):
2480 if chomp.endswith('\r'):
2481 chomp = chomp[:-1]
2481 chomp = chomp[:-1]
2482 endofline = token[len(chomp):]
2482 endofline = token[len(chomp):]
2483 token = chomp.rstrip() # detect spaces at the end
2483 token = chomp.rstrip() # detect spaces at the end
2484 endspaces = chomp[len(token):]
2484 endspaces = chomp[len(token):]
2485 # scan tabs
2485 # scan tabs
2486 for maybetab in tabsplitter.findall(token):
2486 for maybetab in tabsplitter.findall(token):
2487 if b'\t' == maybetab[0:1]:
2487 if b'\t' == maybetab[0:1]:
2488 currentlabel = 'diff.tab'
2488 currentlabel = 'diff.tab'
2489 else:
2489 else:
2490 if changed:
2490 if changed:
2491 currentlabel = label + '.changed'
2491 currentlabel = label + '.changed'
2492 else:
2492 else:
2493 currentlabel = label + '.unchanged'
2493 currentlabel = label + '.unchanged'
2494 yield (maybetab, currentlabel)
2494 yield (maybetab, currentlabel)
2495 if isendofline:
2495 if isendofline:
2496 if endspaces:
2496 if endspaces:
2497 yield (endspaces, 'diff.trailingwhitespace')
2497 yield (endspaces, 'diff.trailingwhitespace')
2498 yield (endofline, '')
2498 yield (endofline, '')
2499 nextisnewline = True
2499 nextisnewline = True
2500
2500
2501 def difflabel(func, *args, **kw):
2501 def difflabel(func, *args, **kw):
2502 '''yields 2-tuples of (output, label) based on the output of func()'''
2502 '''yields 2-tuples of (output, label) based on the output of func()'''
2503 if kw.get(r'opts') and kw[r'opts'].worddiff:
2503 if kw.get(r'opts') and kw[r'opts'].worddiff:
2504 dodiffhunk = diffsinglehunkinline
2504 dodiffhunk = diffsinglehunkinline
2505 else:
2505 else:
2506 dodiffhunk = diffsinglehunk
2506 dodiffhunk = diffsinglehunk
2507 headprefixes = [('diff', 'diff.diffline'),
2507 headprefixes = [('diff', 'diff.diffline'),
2508 ('copy', 'diff.extended'),
2508 ('copy', 'diff.extended'),
2509 ('rename', 'diff.extended'),
2509 ('rename', 'diff.extended'),
2510 ('old', 'diff.extended'),
2510 ('old', 'diff.extended'),
2511 ('new', 'diff.extended'),
2511 ('new', 'diff.extended'),
2512 ('deleted', 'diff.extended'),
2512 ('deleted', 'diff.extended'),
2513 ('index', 'diff.extended'),
2513 ('index', 'diff.extended'),
2514 ('similarity', 'diff.extended'),
2514 ('similarity', 'diff.extended'),
2515 ('---', 'diff.file_a'),
2515 ('---', 'diff.file_a'),
2516 ('+++', 'diff.file_b')]
2516 ('+++', 'diff.file_b')]
2517 textprefixes = [('@', 'diff.hunk'),
2517 textprefixes = [('@', 'diff.hunk'),
2518 # - and + are handled by diffsinglehunk
2518 # - and + are handled by diffsinglehunk
2519 ]
2519 ]
2520 head = False
2520 head = False
2521
2521
2522 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2522 # buffers a hunk, i.e. adjacent "-", "+" lines without other changes.
2523 hunkbuffer = []
2523 hunkbuffer = []
2524 def consumehunkbuffer():
2524 def consumehunkbuffer():
2525 if hunkbuffer:
2525 if hunkbuffer:
2526 for token in dodiffhunk(hunkbuffer):
2526 for token in dodiffhunk(hunkbuffer):
2527 yield token
2527 yield token
2528 hunkbuffer[:] = []
2528 hunkbuffer[:] = []
2529
2529
2530 for chunk in func(*args, **kw):
2530 for chunk in func(*args, **kw):
2531 lines = chunk.split('\n')
2531 lines = chunk.split('\n')
2532 linecount = len(lines)
2532 linecount = len(lines)
2533 for i, line in enumerate(lines):
2533 for i, line in enumerate(lines):
2534 if head:
2534 if head:
2535 if line.startswith('@'):
2535 if line.startswith('@'):
2536 head = False
2536 head = False
2537 else:
2537 else:
2538 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2538 if line and not line.startswith((' ', '+', '-', '@', '\\')):
2539 head = True
2539 head = True
2540 diffline = False
2540 diffline = False
2541 if not head and line and line.startswith(('+', '-')):
2541 if not head and line and line.startswith(('+', '-')):
2542 diffline = True
2542 diffline = True
2543
2543
2544 prefixes = textprefixes
2544 prefixes = textprefixes
2545 if head:
2545 if head:
2546 prefixes = headprefixes
2546 prefixes = headprefixes
2547 if diffline:
2547 if diffline:
2548 # buffered
2548 # buffered
2549 bufferedline = line
2549 bufferedline = line
2550 if i + 1 < linecount:
2550 if i + 1 < linecount:
2551 bufferedline += "\n"
2551 bufferedline += "\n"
2552 hunkbuffer.append(bufferedline)
2552 hunkbuffer.append(bufferedline)
2553 else:
2553 else:
2554 # unbuffered
2554 # unbuffered
2555 for token in consumehunkbuffer():
2555 for token in consumehunkbuffer():
2556 yield token
2556 yield token
2557 stripline = line.rstrip()
2557 stripline = line.rstrip()
2558 for prefix, label in prefixes:
2558 for prefix, label in prefixes:
2559 if stripline.startswith(prefix):
2559 if stripline.startswith(prefix):
2560 yield (stripline, label)
2560 yield (stripline, label)
2561 if line != stripline:
2561 if line != stripline:
2562 yield (line[len(stripline):],
2562 yield (line[len(stripline):],
2563 'diff.trailingwhitespace')
2563 'diff.trailingwhitespace')
2564 break
2564 break
2565 else:
2565 else:
2566 yield (line, '')
2566 yield (line, '')
2567 if i + 1 < linecount:
2567 if i + 1 < linecount:
2568 yield ('\n', '')
2568 yield ('\n', '')
2569 for token in consumehunkbuffer():
2569 for token in consumehunkbuffer():
2570 yield token
2570 yield token
2571
2571
2572 def diffui(*args, **kw):
2572 def diffui(*args, **kw):
2573 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2573 '''like diff(), but yields 2-tuples of (output, label) for ui.write()'''
2574 return difflabel(diff, *args, **kw)
2574 return difflabel(diff, *args, **kw)
2575
2575
2576 def _filepairs(modified, added, removed, copy, opts):
2576 def _filepairs(modified, added, removed, copy, opts):
2577 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2577 '''generates tuples (f1, f2, copyop), where f1 is the name of the file
2578 before and f2 is the the name after. For added files, f1 will be None,
2578 before and f2 is the the name after. For added files, f1 will be None,
2579 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2579 and for removed files, f2 will be None. copyop may be set to None, 'copy'
2580 or 'rename' (the latter two only if opts.git is set).'''
2580 or 'rename' (the latter two only if opts.git is set).'''
2581 gone = set()
2581 gone = set()
2582
2582
2583 copyto = dict([(v, k) for k, v in copy.items()])
2583 copyto = dict([(v, k) for k, v in copy.items()])
2584
2584
2585 addedset, removedset = set(added), set(removed)
2585 addedset, removedset = set(added), set(removed)
2586
2586
2587 for f in sorted(modified + added + removed):
2587 for f in sorted(modified + added + removed):
2588 copyop = None
2588 copyop = None
2589 f1, f2 = f, f
2589 f1, f2 = f, f
2590 if f in addedset:
2590 if f in addedset:
2591 f1 = None
2591 f1 = None
2592 if f in copy:
2592 if f in copy:
2593 if opts.git:
2593 if opts.git:
2594 f1 = copy[f]
2594 f1 = copy[f]
2595 if f1 in removedset and f1 not in gone:
2595 if f1 in removedset and f1 not in gone:
2596 copyop = 'rename'
2596 copyop = 'rename'
2597 gone.add(f1)
2597 gone.add(f1)
2598 else:
2598 else:
2599 copyop = 'copy'
2599 copyop = 'copy'
2600 elif f in removedset:
2600 elif f in removedset:
2601 f2 = None
2601 f2 = None
2602 if opts.git:
2602 if opts.git:
2603 # have we already reported a copy above?
2603 # have we already reported a copy above?
2604 if (f in copyto and copyto[f] in addedset
2604 if (f in copyto and copyto[f] in addedset
2605 and copy[copyto[f]] == f):
2605 and copy[copyto[f]] == f):
2606 continue
2606 continue
2607 yield f1, f2, copyop
2607 yield f1, f2, copyop
2608
2608
2609 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2609 def trydiff(repo, revs, ctx1, ctx2, modified, added, removed,
2610 copy, getfilectx, opts, losedatafn, pathfn):
2610 copy, getfilectx, opts, losedatafn, pathfn):
2611 '''given input data, generate a diff and yield it in blocks
2611 '''given input data, generate a diff and yield it in blocks
2612
2612
2613 If generating a diff would lose data like flags or binary data and
2613 If generating a diff would lose data like flags or binary data and
2614 losedatafn is not None, it will be called.
2614 losedatafn is not None, it will be called.
2615
2615
2616 pathfn is applied to every path in the diff output.
2616 pathfn is applied to every path in the diff output.
2617 '''
2617 '''
2618
2618
2619 def gitindex(text):
2619 def gitindex(text):
2620 if not text:
2620 if not text:
2621 text = ""
2621 text = ""
2622 l = len(text)
2622 l = len(text)
2623 s = hashlib.sha1('blob %d\0' % l)
2623 s = hashlib.sha1('blob %d\0' % l)
2624 s.update(text)
2624 s.update(text)
2625 return hex(s.digest())
2625 return hex(s.digest())
2626
2626
2627 if opts.noprefix:
2627 if opts.noprefix:
2628 aprefix = bprefix = ''
2628 aprefix = bprefix = ''
2629 else:
2629 else:
2630 aprefix = 'a/'
2630 aprefix = 'a/'
2631 bprefix = 'b/'
2631 bprefix = 'b/'
2632
2632
2633 def diffline(f, revs):
2633 def diffline(f, revs):
2634 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2634 revinfo = ' '.join(["-r %s" % rev for rev in revs])
2635 return 'diff %s %s' % (revinfo, f)
2635 return 'diff %s %s' % (revinfo, f)
2636
2636
2637 def isempty(fctx):
2637 def isempty(fctx):
2638 return fctx is None or fctx.size() == 0
2638 return fctx is None or fctx.size() == 0
2639
2639
2640 date1 = dateutil.datestr(ctx1.date())
2640 date1 = dateutil.datestr(ctx1.date())
2641 date2 = dateutil.datestr(ctx2.date())
2641 date2 = dateutil.datestr(ctx2.date())
2642
2642
2643 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2643 gitmode = {'l': '120000', 'x': '100755', '': '100644'}
2644
2644
2645 if not pathfn:
2645 if not pathfn:
2646 pathfn = lambda f: f
2646 pathfn = lambda f: f
2647
2647
2648 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2648 for f1, f2, copyop in _filepairs(modified, added, removed, copy, opts):
2649 content1 = None
2649 content1 = None
2650 content2 = None
2650 content2 = None
2651 fctx1 = None
2651 fctx1 = None
2652 fctx2 = None
2652 fctx2 = None
2653 flag1 = None
2653 flag1 = None
2654 flag2 = None
2654 flag2 = None
2655 if f1:
2655 if f1:
2656 fctx1 = getfilectx(f1, ctx1)
2656 fctx1 = getfilectx(f1, ctx1)
2657 if opts.git or losedatafn:
2657 if opts.git or losedatafn:
2658 flag1 = ctx1.flags(f1)
2658 flag1 = ctx1.flags(f1)
2659 if f2:
2659 if f2:
2660 fctx2 = getfilectx(f2, ctx2)
2660 fctx2 = getfilectx(f2, ctx2)
2661 if opts.git or losedatafn:
2661 if opts.git or losedatafn:
2662 flag2 = ctx2.flags(f2)
2662 flag2 = ctx2.flags(f2)
2663 # if binary is True, output "summary" or "base85", but not "text diff"
2663 # if binary is True, output "summary" or "base85", but not "text diff"
2664 if opts.text:
2664 if opts.text:
2665 binary = False
2665 binary = False
2666 else:
2666 else:
2667 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2667 binary = any(f.isbinary() for f in [fctx1, fctx2] if f is not None)
2668
2668
2669 if losedatafn and not opts.git:
2669 if losedatafn and not opts.git:
2670 if (binary or
2670 if (binary or
2671 # copy/rename
2671 # copy/rename
2672 f2 in copy or
2672 f2 in copy or
2673 # empty file creation
2673 # empty file creation
2674 (not f1 and isempty(fctx2)) or
2674 (not f1 and isempty(fctx2)) or
2675 # empty file deletion
2675 # empty file deletion
2676 (isempty(fctx1) and not f2) or
2676 (isempty(fctx1) and not f2) or
2677 # create with flags
2677 # create with flags
2678 (not f1 and flag2) or
2678 (not f1 and flag2) or
2679 # change flags
2679 # change flags
2680 (f1 and f2 and flag1 != flag2)):
2680 (f1 and f2 and flag1 != flag2)):
2681 losedatafn(f2 or f1)
2681 losedatafn(f2 or f1)
2682
2682
2683 path1 = pathfn(f1 or f2)
2683 path1 = pathfn(f1 or f2)
2684 path2 = pathfn(f2 or f1)
2684 path2 = pathfn(f2 or f1)
2685 header = []
2685 header = []
2686 if opts.git:
2686 if opts.git:
2687 header.append('diff --git %s%s %s%s' %
2687 header.append('diff --git %s%s %s%s' %
2688 (aprefix, path1, bprefix, path2))
2688 (aprefix, path1, bprefix, path2))
2689 if not f1: # added
2689 if not f1: # added
2690 header.append('new file mode %s' % gitmode[flag2])
2690 header.append('new file mode %s' % gitmode[flag2])
2691 elif not f2: # removed
2691 elif not f2: # removed
2692 header.append('deleted file mode %s' % gitmode[flag1])
2692 header.append('deleted file mode %s' % gitmode[flag1])
2693 else: # modified/copied/renamed
2693 else: # modified/copied/renamed
2694 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2694 mode1, mode2 = gitmode[flag1], gitmode[flag2]
2695 if mode1 != mode2:
2695 if mode1 != mode2:
2696 header.append('old mode %s' % mode1)
2696 header.append('old mode %s' % mode1)
2697 header.append('new mode %s' % mode2)
2697 header.append('new mode %s' % mode2)
2698 if copyop is not None:
2698 if copyop is not None:
2699 if opts.showsimilarity:
2699 if opts.showsimilarity:
2700 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2700 sim = similar.score(ctx1[path1], ctx2[path2]) * 100
2701 header.append('similarity index %d%%' % sim)
2701 header.append('similarity index %d%%' % sim)
2702 header.append('%s from %s' % (copyop, path1))
2702 header.append('%s from %s' % (copyop, path1))
2703 header.append('%s to %s' % (copyop, path2))
2703 header.append('%s to %s' % (copyop, path2))
2704 elif revs:
2704 elif revs:
2705 header.append(diffline(path1, revs))
2705 header.append(diffline(path1, revs))
2706
2706
2707 # fctx.is | diffopts | what to | is fctx.data()
2707 # fctx.is | diffopts | what to | is fctx.data()
2708 # binary() | text nobinary git index | output? | outputted?
2708 # binary() | text nobinary git index | output? | outputted?
2709 # ------------------------------------|----------------------------
2709 # ------------------------------------|----------------------------
2710 # yes | no no no * | summary | no
2710 # yes | no no no * | summary | no
2711 # yes | no no yes * | base85 | yes
2711 # yes | no no yes * | base85 | yes
2712 # yes | no yes no * | summary | no
2712 # yes | no yes no * | summary | no
2713 # yes | no yes yes 0 | summary | no
2713 # yes | no yes yes 0 | summary | no
2714 # yes | no yes yes >0 | summary | semi [1]
2714 # yes | no yes yes >0 | summary | semi [1]
2715 # yes | yes * * * | text diff | yes
2715 # yes | yes * * * | text diff | yes
2716 # no | * * * * | text diff | yes
2716 # no | * * * * | text diff | yes
2717 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2717 # [1]: hash(fctx.data()) is outputted. so fctx.data() cannot be faked
2718 if binary and (not opts.git or (opts.git and opts.nobinary and not
2718 if binary and (not opts.git or (opts.git and opts.nobinary and not
2719 opts.index)):
2719 opts.index)):
2720 # fast path: no binary content will be displayed, content1 and
2720 # fast path: no binary content will be displayed, content1 and
2721 # content2 are only used for equivalent test. cmp() could have a
2721 # content2 are only used for equivalent test. cmp() could have a
2722 # fast path.
2722 # fast path.
2723 if fctx1 is not None:
2723 if fctx1 is not None:
2724 content1 = b'\0'
2724 content1 = b'\0'
2725 if fctx2 is not None:
2725 if fctx2 is not None:
2726 if fctx1 is not None and not fctx1.cmp(fctx2):
2726 if fctx1 is not None and not fctx1.cmp(fctx2):
2727 content2 = b'\0' # not different
2727 content2 = b'\0' # not different
2728 else:
2728 else:
2729 content2 = b'\0\0'
2729 content2 = b'\0\0'
2730 else:
2730 else:
2731 # normal path: load contents
2731 # normal path: load contents
2732 if fctx1 is not None:
2732 if fctx1 is not None:
2733 content1 = fctx1.data()
2733 content1 = fctx1.data()
2734 if fctx2 is not None:
2734 if fctx2 is not None:
2735 content2 = fctx2.data()
2735 content2 = fctx2.data()
2736
2736
2737 if binary and opts.git and not opts.nobinary:
2737 if binary and opts.git and not opts.nobinary:
2738 text = mdiff.b85diff(content1, content2)
2738 text = mdiff.b85diff(content1, content2)
2739 if text:
2739 if text:
2740 header.append('index %s..%s' %
2740 header.append('index %s..%s' %
2741 (gitindex(content1), gitindex(content2)))
2741 (gitindex(content1), gitindex(content2)))
2742 hunks = (None, [text]),
2742 hunks = (None, [text]),
2743 else:
2743 else:
2744 if opts.git and opts.index > 0:
2744 if opts.git and opts.index > 0:
2745 flag = flag1
2745 flag = flag1
2746 if flag is None:
2746 if flag is None:
2747 flag = flag2
2747 flag = flag2
2748 header.append('index %s..%s %s' %
2748 header.append('index %s..%s %s' %
2749 (gitindex(content1)[0:opts.index],
2749 (gitindex(content1)[0:opts.index],
2750 gitindex(content2)[0:opts.index],
2750 gitindex(content2)[0:opts.index],
2751 gitmode[flag]))
2751 gitmode[flag]))
2752
2752
2753 uheaders, hunks = mdiff.unidiff(content1, date1,
2753 uheaders, hunks = mdiff.unidiff(content1, date1,
2754 content2, date2,
2754 content2, date2,
2755 path1, path2,
2755 path1, path2,
2756 binary=binary, opts=opts)
2756 binary=binary, opts=opts)
2757 header.extend(uheaders)
2757 header.extend(uheaders)
2758 yield fctx1, fctx2, header, hunks
2758 yield fctx1, fctx2, header, hunks
2759
2759
2760 def diffstatsum(stats):
2760 def diffstatsum(stats):
2761 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2761 maxfile, maxtotal, addtotal, removetotal, binary = 0, 0, 0, 0, False
2762 for f, a, r, b in stats:
2762 for f, a, r, b in stats:
2763 maxfile = max(maxfile, encoding.colwidth(f))
2763 maxfile = max(maxfile, encoding.colwidth(f))
2764 maxtotal = max(maxtotal, a + r)
2764 maxtotal = max(maxtotal, a + r)
2765 addtotal += a
2765 addtotal += a
2766 removetotal += r
2766 removetotal += r
2767 binary = binary or b
2767 binary = binary or b
2768
2768
2769 return maxfile, maxtotal, addtotal, removetotal, binary
2769 return maxfile, maxtotal, addtotal, removetotal, binary
2770
2770
2771 def diffstatdata(lines):
2771 def diffstatdata(lines):
2772 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2772 diffre = re.compile(br'^diff .*-r [a-z0-9]+\s(.*)$')
2773
2773
2774 results = []
2774 results = []
2775 filename, adds, removes, isbinary = None, 0, 0, False
2775 filename, adds, removes, isbinary = None, 0, 0, False
2776
2776
2777 def addresult():
2777 def addresult():
2778 if filename:
2778 if filename:
2779 results.append((filename, adds, removes, isbinary))
2779 results.append((filename, adds, removes, isbinary))
2780
2780
2781 # inheader is used to track if a line is in the
2781 # inheader is used to track if a line is in the
2782 # header portion of the diff. This helps properly account
2782 # header portion of the diff. This helps properly account
2783 # for lines that start with '--' or '++'
2783 # for lines that start with '--' or '++'
2784 inheader = False
2784 inheader = False
2785
2785
2786 for line in lines:
2786 for line in lines:
2787 if line.startswith('diff'):
2787 if line.startswith('diff'):
2788 addresult()
2788 addresult()
2789 # starting a new file diff
2789 # starting a new file diff
2790 # set numbers to 0 and reset inheader
2790 # set numbers to 0 and reset inheader
2791 inheader = True
2791 inheader = True
2792 adds, removes, isbinary = 0, 0, False
2792 adds, removes, isbinary = 0, 0, False
2793 if line.startswith('diff --git a/'):
2793 if line.startswith('diff --git a/'):
2794 filename = gitre.search(line).group(2)
2794 filename = gitre.search(line).group(2)
2795 elif line.startswith('diff -r'):
2795 elif line.startswith('diff -r'):
2796 # format: "diff -r ... -r ... filename"
2796 # format: "diff -r ... -r ... filename"
2797 filename = diffre.search(line).group(1)
2797 filename = diffre.search(line).group(1)
2798 elif line.startswith('@@'):
2798 elif line.startswith('@@'):
2799 inheader = False
2799 inheader = False
2800 elif line.startswith('+') and not inheader:
2800 elif line.startswith('+') and not inheader:
2801 adds += 1
2801 adds += 1
2802 elif line.startswith('-') and not inheader:
2802 elif line.startswith('-') and not inheader:
2803 removes += 1
2803 removes += 1
2804 elif (line.startswith('GIT binary patch') or
2804 elif (line.startswith('GIT binary patch') or
2805 line.startswith('Binary file')):
2805 line.startswith('Binary file')):
2806 isbinary = True
2806 isbinary = True
2807 elif line.startswith('rename from'):
2807 elif line.startswith('rename from'):
2808 filename = line[12:]
2808 filename = line[12:]
2809 elif line.startswith('rename to'):
2809 elif line.startswith('rename to'):
2810 filename += ' => %s' % line[10:]
2810 filename += ' => %s' % line[10:]
2811 addresult()
2811 addresult()
2812 return results
2812 return results
2813
2813
2814 def diffstat(lines, width=80):
2814 def diffstat(lines, width=80):
2815 output = []
2815 output = []
2816 stats = diffstatdata(lines)
2816 stats = diffstatdata(lines)
2817 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2817 maxname, maxtotal, totaladds, totalremoves, hasbinary = diffstatsum(stats)
2818
2818
2819 countwidth = len(str(maxtotal))
2819 countwidth = len(str(maxtotal))
2820 if hasbinary and countwidth < 3:
2820 if hasbinary and countwidth < 3:
2821 countwidth = 3
2821 countwidth = 3
2822 graphwidth = width - countwidth - maxname - 6
2822 graphwidth = width - countwidth - maxname - 6
2823 if graphwidth < 10:
2823 if graphwidth < 10:
2824 graphwidth = 10
2824 graphwidth = 10
2825
2825
2826 def scale(i):
2826 def scale(i):
2827 if maxtotal <= graphwidth:
2827 if maxtotal <= graphwidth:
2828 return i
2828 return i
2829 # If diffstat runs out of room it doesn't print anything,
2829 # If diffstat runs out of room it doesn't print anything,
2830 # which isn't very useful, so always print at least one + or -
2830 # which isn't very useful, so always print at least one + or -
2831 # if there were at least some changes.
2831 # if there were at least some changes.
2832 return max(i * graphwidth // maxtotal, int(bool(i)))
2832 return max(i * graphwidth // maxtotal, int(bool(i)))
2833
2833
2834 for filename, adds, removes, isbinary in stats:
2834 for filename, adds, removes, isbinary in stats:
2835 if isbinary:
2835 if isbinary:
2836 count = 'Bin'
2836 count = 'Bin'
2837 else:
2837 else:
2838 count = '%d' % (adds + removes)
2838 count = '%d' % (adds + removes)
2839 pluses = '+' * scale(adds)
2839 pluses = '+' * scale(adds)
2840 minuses = '-' * scale(removes)
2840 minuses = '-' * scale(removes)
2841 output.append(' %s%s | %*s %s%s\n' %
2841 output.append(' %s%s | %*s %s%s\n' %
2842 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2842 (filename, ' ' * (maxname - encoding.colwidth(filename)),
2843 countwidth, count, pluses, minuses))
2843 countwidth, count, pluses, minuses))
2844
2844
2845 if stats:
2845 if stats:
2846 output.append(_(' %d files changed, %d insertions(+), '
2846 output.append(_(' %d files changed, %d insertions(+), '
2847 '%d deletions(-)\n')
2847 '%d deletions(-)\n')
2848 % (len(stats), totaladds, totalremoves))
2848 % (len(stats), totaladds, totalremoves))
2849
2849
2850 return ''.join(output)
2850 return ''.join(output)
2851
2851
2852 def diffstatui(*args, **kw):
2852 def diffstatui(*args, **kw):
2853 '''like diffstat(), but yields 2-tuples of (output, label) for
2853 '''like diffstat(), but yields 2-tuples of (output, label) for
2854 ui.write()
2854 ui.write()
2855 '''
2855 '''
2856
2856
2857 for line in diffstat(*args, **kw).splitlines():
2857 for line in diffstat(*args, **kw).splitlines():
2858 if line and line[-1] in '+-':
2858 if line and line[-1] in '+-':
2859 name, graph = line.rsplit(' ', 1)
2859 name, graph = line.rsplit(' ', 1)
2860 yield (name + ' ', '')
2860 yield (name + ' ', '')
2861 m = re.search(br'\++', graph)
2861 m = re.search(br'\++', graph)
2862 if m:
2862 if m:
2863 yield (m.group(0), 'diffstat.inserted')
2863 yield (m.group(0), 'diffstat.inserted')
2864 m = re.search(br'-+', graph)
2864 m = re.search(br'-+', graph)
2865 if m:
2865 if m:
2866 yield (m.group(0), 'diffstat.deleted')
2866 yield (m.group(0), 'diffstat.deleted')
2867 else:
2867 else:
2868 yield (line, '')
2868 yield (line, '')
2869 yield ('\n', '')
2869 yield ('\n', '')
@@ -1,888 +1,978 b''
1 #testcases obsstore-on obsstore-off
1 #testcases obsstore-on obsstore-off
2
2
3 $ cat > $TESTTMP/editor.py <<EOF
3 $ cat > $TESTTMP/editor.py <<EOF
4 > #!"$PYTHON"
4 > #!"$PYTHON"
5 > import os
5 > import os
6 > import sys
6 > import sys
7 > path = os.path.join(os.environ['TESTTMP'], 'messages')
7 > path = os.path.join(os.environ['TESTTMP'], 'messages')
8 > messages = open(path).read().split('--\n')
8 > messages = open(path).read().split('--\n')
9 > prompt = open(sys.argv[1]).read()
9 > prompt = open(sys.argv[1]).read()
10 > sys.stdout.write(''.join('EDITOR: %s' % l for l in prompt.splitlines(True)))
10 > sys.stdout.write(''.join('EDITOR: %s' % l for l in prompt.splitlines(True)))
11 > sys.stdout.flush()
11 > sys.stdout.flush()
12 > with open(sys.argv[1], 'w') as f:
12 > with open(sys.argv[1], 'w') as f:
13 > f.write(messages[0])
13 > f.write(messages[0])
14 > with open(path, 'w') as f:
14 > with open(path, 'w') as f:
15 > f.write('--\n'.join(messages[1:]))
15 > f.write('--\n'.join(messages[1:]))
16 > EOF
16 > EOF
17
17
18 $ cat >> $HGRCPATH <<EOF
18 $ cat >> $HGRCPATH <<EOF
19 > [extensions]
19 > [extensions]
20 > drawdag=$TESTDIR/drawdag.py
20 > drawdag=$TESTDIR/drawdag.py
21 > split=
21 > split=
22 > [ui]
22 > [ui]
23 > interactive=1
23 > interactive=1
24 > color=no
24 > color=no
25 > paginate=never
25 > paginate=never
26 > [diff]
26 > [diff]
27 > git=1
27 > git=1
28 > unified=0
28 > unified=0
29 > [commands]
29 > [commands]
30 > commit.interactive.unified=0
30 > commit.interactive.unified=0
31 > [alias]
31 > [alias]
32 > glog=log -G -T '{rev}:{node|short} {desc} {bookmarks}\n'
32 > glog=log -G -T '{rev}:{node|short} {desc} {bookmarks}\n'
33 > EOF
33 > EOF
34
34
35 #if obsstore-on
35 #if obsstore-on
36 $ cat >> $HGRCPATH <<EOF
36 $ cat >> $HGRCPATH <<EOF
37 > [experimental]
37 > [experimental]
38 > evolution=all
38 > evolution=all
39 > EOF
39 > EOF
40 #endif
40 #endif
41
41
42 $ hg init a
42 $ hg init a
43 $ cd a
43 $ cd a
44
44
45 Nothing to split
45 Nothing to split
46
46
47 $ hg split
47 $ hg split
48 nothing to split
48 nothing to split
49 [1]
49 [1]
50
50
51 $ hg commit -m empty --config ui.allowemptycommit=1
51 $ hg commit -m empty --config ui.allowemptycommit=1
52 $ hg split
52 $ hg split
53 abort: cannot split an empty revision
53 abort: cannot split an empty revision
54 [255]
54 [255]
55
55
56 $ rm -rf .hg
56 $ rm -rf .hg
57 $ hg init
57 $ hg init
58
58
59 Cannot split working directory
59 Cannot split working directory
60
60
61 $ hg split -r 'wdir()'
61 $ hg split -r 'wdir()'
62 abort: cannot split working directory
62 abort: cannot split working directory
63 [255]
63 [255]
64
64
65 Generate some content. The sed filter drop CR on Windows, which is dropped in
65 Generate some content. The sed filter drop CR on Windows, which is dropped in
66 the a > b line.
66 the a > b line.
67
67
68 $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
68 $ $TESTDIR/seq.py 1 5 | sed 's/\r$//' >> a
69 $ hg ci -m a1 -A a -q
69 $ hg ci -m a1 -A a -q
70 $ hg bookmark -i r1
70 $ hg bookmark -i r1
71 $ sed 's/1/11/;s/3/33/;s/5/55/' a > b
71 $ sed 's/1/11/;s/3/33/;s/5/55/' a > b
72 $ mv b a
72 $ mv b a
73 $ hg ci -m a2 -q
73 $ hg ci -m a2 -q
74 $ hg bookmark -i r2
74 $ hg bookmark -i r2
75
75
76 Cannot split a public changeset
76 Cannot split a public changeset
77
77
78 $ hg phase --public -r 'all()'
78 $ hg phase --public -r 'all()'
79 $ hg split .
79 $ hg split .
80 abort: cannot split public changeset
80 abort: cannot split public changeset
81 (see 'hg help phases' for details)
81 (see 'hg help phases' for details)
82 [255]
82 [255]
83
83
84 $ hg phase --draft -f -r 'all()'
84 $ hg phase --draft -f -r 'all()'
85
85
86 Cannot split while working directory is dirty
86 Cannot split while working directory is dirty
87
87
88 $ touch dirty
88 $ touch dirty
89 $ hg add dirty
89 $ hg add dirty
90 $ hg split .
90 $ hg split .
91 abort: uncommitted changes
91 abort: uncommitted changes
92 [255]
92 [255]
93 $ hg forget dirty
93 $ hg forget dirty
94 $ rm dirty
94 $ rm dirty
95
95
96 Make a clean directory for future tests to build off of
96 Make a clean directory for future tests to build off of
97
97
98 $ cp -R . ../clean
98 $ cp -R . ../clean
99
99
100 Split a head
100 Split a head
101
101
102 $ hg bookmark r3
102 $ hg bookmark r3
103
103
104 $ hg split 'all()'
104 $ hg split 'all()'
105 abort: cannot split multiple revisions
105 abort: cannot split multiple revisions
106 [255]
106 [255]
107
107
108 This function splits a bit strangely primarily to avoid changing the behavior of
108 This function splits a bit strangely primarily to avoid changing the behavior of
109 the test after a bug was fixed with how split/commit --interactive handled
109 the test after a bug was fixed with how split/commit --interactive handled
110 `commands.commit.interactive.unified=0`: when there were no context lines,
110 `commands.commit.interactive.unified=0`: when there were no context lines,
111 it kept only the last diff hunk. When running split, this meant that runsplit
111 it kept only the last diff hunk. When running split, this meant that runsplit
112 was always recording three commits, one for each diff hunk, in reverse order
112 was always recording three commits, one for each diff hunk, in reverse order
113 (the base commit was the last diff hunk in the file).
113 (the base commit was the last diff hunk in the file).
114 $ runsplit() {
114 $ runsplit() {
115 > cat > $TESTTMP/messages <<EOF
115 > cat > $TESTTMP/messages <<EOF
116 > split 1
116 > split 1
117 > --
117 > --
118 > split 2
118 > split 2
119 > --
119 > --
120 > split 3
120 > split 3
121 > EOF
121 > EOF
122 > cat <<EOF | hg split "$@"
122 > cat <<EOF | hg split "$@"
123 > y
123 > y
124 > n
124 > n
125 > n
125 > n
126 > y
126 > y
127 > y
127 > y
128 > n
128 > n
129 > y
129 > y
130 > y
130 > y
131 > y
131 > y
132 > EOF
132 > EOF
133 > }
133 > }
134
134
135 $ HGEDITOR=false runsplit
135 $ HGEDITOR=false runsplit
136 diff --git a/a b/a
136 diff --git a/a b/a
137 3 hunks, 3 lines changed
137 3 hunks, 3 lines changed
138 examine changes to 'a'?
138 examine changes to 'a'?
139 (enter ? for help) [Ynesfdaq?] y
139 (enter ? for help) [Ynesfdaq?] y
140
140
141 @@ -1,1 +1,1 @@
141 @@ -1,1 +1,1 @@
142 -1
142 -1
143 +11
143 +11
144 record change 1/3 to 'a'?
144 record change 1/3 to 'a'?
145 (enter ? for help) [Ynesfdaq?] n
145 (enter ? for help) [Ynesfdaq?] n
146
146
147 @@ -3,1 +3,1 @@ 2
147 @@ -3,1 +3,1 @@ 2
148 -3
148 -3
149 +33
149 +33
150 record change 2/3 to 'a'?
150 record change 2/3 to 'a'?
151 (enter ? for help) [Ynesfdaq?] n
151 (enter ? for help) [Ynesfdaq?] n
152
152
153 @@ -5,1 +5,1 @@ 4
153 @@ -5,1 +5,1 @@ 4
154 -5
154 -5
155 +55
155 +55
156 record change 3/3 to 'a'?
156 record change 3/3 to 'a'?
157 (enter ? for help) [Ynesfdaq?] y
157 (enter ? for help) [Ynesfdaq?] y
158
158
159 transaction abort!
159 transaction abort!
160 rollback completed
160 rollback completed
161 abort: edit failed: false exited with status 1
161 abort: edit failed: false exited with status 1
162 [255]
162 [255]
163 $ hg status
163 $ hg status
164
164
165 $ HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py"
165 $ HGEDITOR="\"$PYTHON\" $TESTTMP/editor.py"
166 $ runsplit
166 $ runsplit
167 diff --git a/a b/a
167 diff --git a/a b/a
168 3 hunks, 3 lines changed
168 3 hunks, 3 lines changed
169 examine changes to 'a'?
169 examine changes to 'a'?
170 (enter ? for help) [Ynesfdaq?] y
170 (enter ? for help) [Ynesfdaq?] y
171
171
172 @@ -1,1 +1,1 @@
172 @@ -1,1 +1,1 @@
173 -1
173 -1
174 +11
174 +11
175 record change 1/3 to 'a'?
175 record change 1/3 to 'a'?
176 (enter ? for help) [Ynesfdaq?] n
176 (enter ? for help) [Ynesfdaq?] n
177
177
178 @@ -3,1 +3,1 @@ 2
178 @@ -3,1 +3,1 @@ 2
179 -3
179 -3
180 +33
180 +33
181 record change 2/3 to 'a'?
181 record change 2/3 to 'a'?
182 (enter ? for help) [Ynesfdaq?] n
182 (enter ? for help) [Ynesfdaq?] n
183
183
184 @@ -5,1 +5,1 @@ 4
184 @@ -5,1 +5,1 @@ 4
185 -5
185 -5
186 +55
186 +55
187 record change 3/3 to 'a'?
187 record change 3/3 to 'a'?
188 (enter ? for help) [Ynesfdaq?] y
188 (enter ? for help) [Ynesfdaq?] y
189
189
190 EDITOR: HG: Splitting 1df0d5c5a3ab. Write commit message for the first split changeset.
190 EDITOR: HG: Splitting 1df0d5c5a3ab. Write commit message for the first split changeset.
191 EDITOR: a2
191 EDITOR: a2
192 EDITOR:
192 EDITOR:
193 EDITOR:
193 EDITOR:
194 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
194 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
195 EDITOR: HG: Leave message empty to abort commit.
195 EDITOR: HG: Leave message empty to abort commit.
196 EDITOR: HG: --
196 EDITOR: HG: --
197 EDITOR: HG: user: test
197 EDITOR: HG: user: test
198 EDITOR: HG: branch 'default'
198 EDITOR: HG: branch 'default'
199 EDITOR: HG: changed a
199 EDITOR: HG: changed a
200 created new head
200 created new head
201 diff --git a/a b/a
201 diff --git a/a b/a
202 2 hunks, 2 lines changed
202 2 hunks, 2 lines changed
203 examine changes to 'a'?
203 examine changes to 'a'?
204 (enter ? for help) [Ynesfdaq?] y
204 (enter ? for help) [Ynesfdaq?] y
205
205
206 @@ -1,1 +1,1 @@
206 @@ -1,1 +1,1 @@
207 -1
207 -1
208 +11
208 +11
209 record change 1/2 to 'a'?
209 record change 1/2 to 'a'?
210 (enter ? for help) [Ynesfdaq?] n
210 (enter ? for help) [Ynesfdaq?] n
211
211
212 @@ -3,1 +3,1 @@ 2
212 @@ -3,1 +3,1 @@ 2
213 -3
213 -3
214 +33
214 +33
215 record change 2/2 to 'a'?
215 record change 2/2 to 'a'?
216 (enter ? for help) [Ynesfdaq?] y
216 (enter ? for help) [Ynesfdaq?] y
217
217
218 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
218 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
219 EDITOR: HG: - e704349bd21b: split 1
219 EDITOR: HG: - e704349bd21b: split 1
220 EDITOR: HG: Write commit message for the next split changeset.
220 EDITOR: HG: Write commit message for the next split changeset.
221 EDITOR: a2
221 EDITOR: a2
222 EDITOR:
222 EDITOR:
223 EDITOR:
223 EDITOR:
224 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
224 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
225 EDITOR: HG: Leave message empty to abort commit.
225 EDITOR: HG: Leave message empty to abort commit.
226 EDITOR: HG: --
226 EDITOR: HG: --
227 EDITOR: HG: user: test
227 EDITOR: HG: user: test
228 EDITOR: HG: branch 'default'
228 EDITOR: HG: branch 'default'
229 EDITOR: HG: changed a
229 EDITOR: HG: changed a
230 diff --git a/a b/a
230 diff --git a/a b/a
231 1 hunks, 1 lines changed
231 1 hunks, 1 lines changed
232 examine changes to 'a'?
232 examine changes to 'a'?
233 (enter ? for help) [Ynesfdaq?] y
233 (enter ? for help) [Ynesfdaq?] y
234
234
235 @@ -1,1 +1,1 @@
235 @@ -1,1 +1,1 @@
236 -1
236 -1
237 +11
237 +11
238 record this change to 'a'?
238 record this change to 'a'?
239 (enter ? for help) [Ynesfdaq?] y
239 (enter ? for help) [Ynesfdaq?] y
240
240
241 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
241 EDITOR: HG: Splitting 1df0d5c5a3ab. So far it has been split into:
242 EDITOR: HG: - e704349bd21b: split 1
242 EDITOR: HG: - e704349bd21b: split 1
243 EDITOR: HG: - a09ad58faae3: split 2
243 EDITOR: HG: - a09ad58faae3: split 2
244 EDITOR: HG: Write commit message for the next split changeset.
244 EDITOR: HG: Write commit message for the next split changeset.
245 EDITOR: a2
245 EDITOR: a2
246 EDITOR:
246 EDITOR:
247 EDITOR:
247 EDITOR:
248 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
248 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
249 EDITOR: HG: Leave message empty to abort commit.
249 EDITOR: HG: Leave message empty to abort commit.
250 EDITOR: HG: --
250 EDITOR: HG: --
251 EDITOR: HG: user: test
251 EDITOR: HG: user: test
252 EDITOR: HG: branch 'default'
252 EDITOR: HG: branch 'default'
253 EDITOR: HG: changed a
253 EDITOR: HG: changed a
254 saved backup bundle to $TESTTMP/a/.hg/strip-backup/1df0d5c5a3ab-8341b760-split.hg (obsstore-off !)
254 saved backup bundle to $TESTTMP/a/.hg/strip-backup/1df0d5c5a3ab-8341b760-split.hg (obsstore-off !)
255
255
256 #if obsstore-off
256 #if obsstore-off
257 $ hg bookmark
257 $ hg bookmark
258 r1 0:a61bcde8c529
258 r1 0:a61bcde8c529
259 r2 3:00eebaf8d2e2
259 r2 3:00eebaf8d2e2
260 * r3 3:00eebaf8d2e2
260 * r3 3:00eebaf8d2e2
261 $ hg glog -p
261 $ hg glog -p
262 @ 3:00eebaf8d2e2 split 3 r2 r3
262 @ 3:00eebaf8d2e2 split 3 r2 r3
263 | diff --git a/a b/a
263 | diff --git a/a b/a
264 | --- a/a
264 | --- a/a
265 | +++ b/a
265 | +++ b/a
266 | @@ -1,1 +1,1 @@
266 | @@ -1,1 +1,1 @@
267 | -1
267 | -1
268 | +11
268 | +11
269 |
269 |
270 o 2:a09ad58faae3 split 2
270 o 2:a09ad58faae3 split 2
271 | diff --git a/a b/a
271 | diff --git a/a b/a
272 | --- a/a
272 | --- a/a
273 | +++ b/a
273 | +++ b/a
274 | @@ -3,1 +3,1 @@
274 | @@ -3,1 +3,1 @@
275 | -3
275 | -3
276 | +33
276 | +33
277 |
277 |
278 o 1:e704349bd21b split 1
278 o 1:e704349bd21b split 1
279 | diff --git a/a b/a
279 | diff --git a/a b/a
280 | --- a/a
280 | --- a/a
281 | +++ b/a
281 | +++ b/a
282 | @@ -5,1 +5,1 @@
282 | @@ -5,1 +5,1 @@
283 | -5
283 | -5
284 | +55
284 | +55
285 |
285 |
286 o 0:a61bcde8c529 a1 r1
286 o 0:a61bcde8c529 a1 r1
287 diff --git a/a b/a
287 diff --git a/a b/a
288 new file mode 100644
288 new file mode 100644
289 --- /dev/null
289 --- /dev/null
290 +++ b/a
290 +++ b/a
291 @@ -0,0 +1,5 @@
291 @@ -0,0 +1,5 @@
292 +1
292 +1
293 +2
293 +2
294 +3
294 +3
295 +4
295 +4
296 +5
296 +5
297
297
298 #else
298 #else
299 $ hg bookmark
299 $ hg bookmark
300 r1 0:a61bcde8c529
300 r1 0:a61bcde8c529
301 r2 4:00eebaf8d2e2
301 r2 4:00eebaf8d2e2
302 * r3 4:00eebaf8d2e2
302 * r3 4:00eebaf8d2e2
303 $ hg glog
303 $ hg glog
304 @ 4:00eebaf8d2e2 split 3 r2 r3
304 @ 4:00eebaf8d2e2 split 3 r2 r3
305 |
305 |
306 o 3:a09ad58faae3 split 2
306 o 3:a09ad58faae3 split 2
307 |
307 |
308 o 2:e704349bd21b split 1
308 o 2:e704349bd21b split 1
309 |
309 |
310 o 0:a61bcde8c529 a1 r1
310 o 0:a61bcde8c529 a1 r1
311
311
312 #endif
312 #endif
313
313
314 Split a head while working parent is not that head
314 Split a head while working parent is not that head
315
315
316 $ cp -R $TESTTMP/clean $TESTTMP/b
316 $ cp -R $TESTTMP/clean $TESTTMP/b
317 $ cd $TESTTMP/b
317 $ cd $TESTTMP/b
318
318
319 $ hg up 0 -q
319 $ hg up 0 -q
320 $ hg bookmark r3
320 $ hg bookmark r3
321
321
322 $ runsplit tip >/dev/null
322 $ runsplit tip >/dev/null
323
323
324 #if obsstore-off
324 #if obsstore-off
325 $ hg bookmark
325 $ hg bookmark
326 r1 0:a61bcde8c529
326 r1 0:a61bcde8c529
327 r2 3:00eebaf8d2e2
327 r2 3:00eebaf8d2e2
328 * r3 0:a61bcde8c529
328 * r3 0:a61bcde8c529
329 $ hg glog
329 $ hg glog
330 o 3:00eebaf8d2e2 split 3 r2
330 o 3:00eebaf8d2e2 split 3 r2
331 |
331 |
332 o 2:a09ad58faae3 split 2
332 o 2:a09ad58faae3 split 2
333 |
333 |
334 o 1:e704349bd21b split 1
334 o 1:e704349bd21b split 1
335 |
335 |
336 @ 0:a61bcde8c529 a1 r1 r3
336 @ 0:a61bcde8c529 a1 r1 r3
337
337
338 #else
338 #else
339 $ hg bookmark
339 $ hg bookmark
340 r1 0:a61bcde8c529
340 r1 0:a61bcde8c529
341 r2 4:00eebaf8d2e2
341 r2 4:00eebaf8d2e2
342 * r3 0:a61bcde8c529
342 * r3 0:a61bcde8c529
343 $ hg glog
343 $ hg glog
344 o 4:00eebaf8d2e2 split 3 r2
344 o 4:00eebaf8d2e2 split 3 r2
345 |
345 |
346 o 3:a09ad58faae3 split 2
346 o 3:a09ad58faae3 split 2
347 |
347 |
348 o 2:e704349bd21b split 1
348 o 2:e704349bd21b split 1
349 |
349 |
350 @ 0:a61bcde8c529 a1 r1 r3
350 @ 0:a61bcde8c529 a1 r1 r3
351
351
352 #endif
352 #endif
353
353
354 Split a non-head
354 Split a non-head
355
355
356 $ cp -R $TESTTMP/clean $TESTTMP/c
356 $ cp -R $TESTTMP/clean $TESTTMP/c
357 $ cd $TESTTMP/c
357 $ cd $TESTTMP/c
358 $ echo d > d
358 $ echo d > d
359 $ hg ci -m d1 -A d
359 $ hg ci -m d1 -A d
360 $ hg bookmark -i d1
360 $ hg bookmark -i d1
361 $ echo 2 >> d
361 $ echo 2 >> d
362 $ hg ci -m d2
362 $ hg ci -m d2
363 $ echo 3 >> d
363 $ echo 3 >> d
364 $ hg ci -m d3
364 $ hg ci -m d3
365 $ hg bookmark -i d3
365 $ hg bookmark -i d3
366 $ hg up '.^' -q
366 $ hg up '.^' -q
367 $ hg bookmark d2
367 $ hg bookmark d2
368 $ cp -R . ../d
368 $ cp -R . ../d
369
369
370 $ runsplit -r 1 | grep rebasing
370 $ runsplit -r 1 | grep rebasing
371 rebasing 2:b5c5ea414030 "d1" (d1)
371 rebasing 2:b5c5ea414030 "d1" (d1)
372 rebasing 3:f4a0a8d004cc "d2" (d2)
372 rebasing 3:f4a0a8d004cc "d2" (d2)
373 rebasing 4:777940761eba "d3" (d3)
373 rebasing 4:777940761eba "d3" (d3)
374 #if obsstore-off
374 #if obsstore-off
375 $ hg bookmark
375 $ hg bookmark
376 d1 4:c4b449ef030e
376 d1 4:c4b449ef030e
377 * d2 5:c9dd00ab36a3
377 * d2 5:c9dd00ab36a3
378 d3 6:19f476bc865c
378 d3 6:19f476bc865c
379 r1 0:a61bcde8c529
379 r1 0:a61bcde8c529
380 r2 3:00eebaf8d2e2
380 r2 3:00eebaf8d2e2
381 $ hg glog -p
381 $ hg glog -p
382 o 6:19f476bc865c d3 d3
382 o 6:19f476bc865c d3 d3
383 | diff --git a/d b/d
383 | diff --git a/d b/d
384 | --- a/d
384 | --- a/d
385 | +++ b/d
385 | +++ b/d
386 | @@ -2,0 +3,1 @@
386 | @@ -2,0 +3,1 @@
387 | +3
387 | +3
388 |
388 |
389 @ 5:c9dd00ab36a3 d2 d2
389 @ 5:c9dd00ab36a3 d2 d2
390 | diff --git a/d b/d
390 | diff --git a/d b/d
391 | --- a/d
391 | --- a/d
392 | +++ b/d
392 | +++ b/d
393 | @@ -1,0 +2,1 @@
393 | @@ -1,0 +2,1 @@
394 | +2
394 | +2
395 |
395 |
396 o 4:c4b449ef030e d1 d1
396 o 4:c4b449ef030e d1 d1
397 | diff --git a/d b/d
397 | diff --git a/d b/d
398 | new file mode 100644
398 | new file mode 100644
399 | --- /dev/null
399 | --- /dev/null
400 | +++ b/d
400 | +++ b/d
401 | @@ -0,0 +1,1 @@
401 | @@ -0,0 +1,1 @@
402 | +d
402 | +d
403 |
403 |
404 o 3:00eebaf8d2e2 split 3 r2
404 o 3:00eebaf8d2e2 split 3 r2
405 | diff --git a/a b/a
405 | diff --git a/a b/a
406 | --- a/a
406 | --- a/a
407 | +++ b/a
407 | +++ b/a
408 | @@ -1,1 +1,1 @@
408 | @@ -1,1 +1,1 @@
409 | -1
409 | -1
410 | +11
410 | +11
411 |
411 |
412 o 2:a09ad58faae3 split 2
412 o 2:a09ad58faae3 split 2
413 | diff --git a/a b/a
413 | diff --git a/a b/a
414 | --- a/a
414 | --- a/a
415 | +++ b/a
415 | +++ b/a
416 | @@ -3,1 +3,1 @@
416 | @@ -3,1 +3,1 @@
417 | -3
417 | -3
418 | +33
418 | +33
419 |
419 |
420 o 1:e704349bd21b split 1
420 o 1:e704349bd21b split 1
421 | diff --git a/a b/a
421 | diff --git a/a b/a
422 | --- a/a
422 | --- a/a
423 | +++ b/a
423 | +++ b/a
424 | @@ -5,1 +5,1 @@
424 | @@ -5,1 +5,1 @@
425 | -5
425 | -5
426 | +55
426 | +55
427 |
427 |
428 o 0:a61bcde8c529 a1 r1
428 o 0:a61bcde8c529 a1 r1
429 diff --git a/a b/a
429 diff --git a/a b/a
430 new file mode 100644
430 new file mode 100644
431 --- /dev/null
431 --- /dev/null
432 +++ b/a
432 +++ b/a
433 @@ -0,0 +1,5 @@
433 @@ -0,0 +1,5 @@
434 +1
434 +1
435 +2
435 +2
436 +3
436 +3
437 +4
437 +4
438 +5
438 +5
439
439
440 #else
440 #else
441 $ hg bookmark
441 $ hg bookmark
442 d1 8:c4b449ef030e
442 d1 8:c4b449ef030e
443 * d2 9:c9dd00ab36a3
443 * d2 9:c9dd00ab36a3
444 d3 10:19f476bc865c
444 d3 10:19f476bc865c
445 r1 0:a61bcde8c529
445 r1 0:a61bcde8c529
446 r2 7:00eebaf8d2e2
446 r2 7:00eebaf8d2e2
447 $ hg glog
447 $ hg glog
448 o 10:19f476bc865c d3 d3
448 o 10:19f476bc865c d3 d3
449 |
449 |
450 @ 9:c9dd00ab36a3 d2 d2
450 @ 9:c9dd00ab36a3 d2 d2
451 |
451 |
452 o 8:c4b449ef030e d1 d1
452 o 8:c4b449ef030e d1 d1
453 |
453 |
454 o 7:00eebaf8d2e2 split 3 r2
454 o 7:00eebaf8d2e2 split 3 r2
455 |
455 |
456 o 6:a09ad58faae3 split 2
456 o 6:a09ad58faae3 split 2
457 |
457 |
458 o 5:e704349bd21b split 1
458 o 5:e704349bd21b split 1
459 |
459 |
460 o 0:a61bcde8c529 a1 r1
460 o 0:a61bcde8c529 a1 r1
461
461
462 #endif
462 #endif
463
463
464 Split a non-head without rebase
464 Split a non-head without rebase
465
465
466 $ cd $TESTTMP/d
466 $ cd $TESTTMP/d
467 #if obsstore-off
467 #if obsstore-off
468 $ runsplit -r 1 --no-rebase
468 $ runsplit -r 1 --no-rebase
469 abort: cannot split changeset with children without rebase
469 abort: cannot split changeset with children without rebase
470 [255]
470 [255]
471 #else
471 #else
472 $ runsplit -r 1 --no-rebase >/dev/null
472 $ runsplit -r 1 --no-rebase >/dev/null
473 3 new orphan changesets
473 3 new orphan changesets
474 $ hg bookmark
474 $ hg bookmark
475 d1 2:b5c5ea414030
475 d1 2:b5c5ea414030
476 * d2 3:f4a0a8d004cc
476 * d2 3:f4a0a8d004cc
477 d3 4:777940761eba
477 d3 4:777940761eba
478 r1 0:a61bcde8c529
478 r1 0:a61bcde8c529
479 r2 7:00eebaf8d2e2
479 r2 7:00eebaf8d2e2
480
480
481 $ hg glog
481 $ hg glog
482 o 7:00eebaf8d2e2 split 3 r2
482 o 7:00eebaf8d2e2 split 3 r2
483 |
483 |
484 o 6:a09ad58faae3 split 2
484 o 6:a09ad58faae3 split 2
485 |
485 |
486 o 5:e704349bd21b split 1
486 o 5:e704349bd21b split 1
487 |
487 |
488 | * 4:777940761eba d3 d3
488 | * 4:777940761eba d3 d3
489 | |
489 | |
490 | @ 3:f4a0a8d004cc d2 d2
490 | @ 3:f4a0a8d004cc d2 d2
491 | |
491 | |
492 | * 2:b5c5ea414030 d1 d1
492 | * 2:b5c5ea414030 d1 d1
493 | |
493 | |
494 | x 1:1df0d5c5a3ab a2
494 | x 1:1df0d5c5a3ab a2
495 |/
495 |/
496 o 0:a61bcde8c529 a1 r1
496 o 0:a61bcde8c529 a1 r1
497
497
498 #endif
498 #endif
499
499
500 Split a non-head with obsoleted descendants
500 Split a non-head with obsoleted descendants
501
501
502 #if obsstore-on
502 #if obsstore-on
503 $ hg init $TESTTMP/e
503 $ hg init $TESTTMP/e
504 $ cd $TESTTMP/e
504 $ cd $TESTTMP/e
505 $ hg debugdrawdag <<'EOS'
505 $ hg debugdrawdag <<'EOS'
506 > H I J
506 > H I J
507 > | | |
507 > | | |
508 > F G1 G2 # amend: G1 -> G2
508 > F G1 G2 # amend: G1 -> G2
509 > | | / # prune: F
509 > | | / # prune: F
510 > C D E
510 > C D E
511 > \|/
511 > \|/
512 > B
512 > B
513 > |
513 > |
514 > A
514 > A
515 > EOS
515 > EOS
516 2 new orphan changesets
516 2 new orphan changesets
517 $ eval `hg tags -T '{tag}={node}\n'`
517 $ eval `hg tags -T '{tag}={node}\n'`
518 $ rm .hg/localtags
518 $ rm .hg/localtags
519 $ hg split $B --config experimental.evolution=createmarkers
519 $ hg split $B --config experimental.evolution=createmarkers
520 abort: split would leave orphaned changesets behind
520 abort: split would leave orphaned changesets behind
521 [255]
521 [255]
522 $ cat > $TESTTMP/messages <<EOF
522 $ cat > $TESTTMP/messages <<EOF
523 > Split B
523 > Split B
524 > EOF
524 > EOF
525 $ cat <<EOF | hg split $B
525 $ cat <<EOF | hg split $B
526 > y
526 > y
527 > y
527 > y
528 > EOF
528 > EOF
529 diff --git a/B b/B
529 diff --git a/B b/B
530 new file mode 100644
530 new file mode 100644
531 examine changes to 'B'?
531 examine changes to 'B'?
532 (enter ? for help) [Ynesfdaq?] y
532 (enter ? for help) [Ynesfdaq?] y
533
533
534 @@ -0,0 +1,1 @@
534 @@ -0,0 +1,1 @@
535 +B
535 +B
536 \ No newline at end of file
536 \ No newline at end of file
537 record this change to 'B'?
537 record this change to 'B'?
538 (enter ? for help) [Ynesfdaq?] y
538 (enter ? for help) [Ynesfdaq?] y
539
539
540 EDITOR: HG: Splitting 112478962961. Write commit message for the first split changeset.
540 EDITOR: HG: Splitting 112478962961. Write commit message for the first split changeset.
541 EDITOR: B
541 EDITOR: B
542 EDITOR:
542 EDITOR:
543 EDITOR:
543 EDITOR:
544 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
544 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
545 EDITOR: HG: Leave message empty to abort commit.
545 EDITOR: HG: Leave message empty to abort commit.
546 EDITOR: HG: --
546 EDITOR: HG: --
547 EDITOR: HG: user: test
547 EDITOR: HG: user: test
548 EDITOR: HG: branch 'default'
548 EDITOR: HG: branch 'default'
549 EDITOR: HG: added B
549 EDITOR: HG: added B
550 created new head
550 created new head
551 rebasing 2:26805aba1e60 "C"
551 rebasing 2:26805aba1e60 "C"
552 rebasing 3:be0ef73c17ad "D"
552 rebasing 3:be0ef73c17ad "D"
553 rebasing 4:49cb92066bfd "E"
553 rebasing 4:49cb92066bfd "E"
554 rebasing 7:97a6268cc7ef "G2"
554 rebasing 7:97a6268cc7ef "G2"
555 rebasing 10:e2f1e425c0db "J"
555 rebasing 10:e2f1e425c0db "J"
556 $ hg glog -r 'sort(all(), topo)'
556 $ hg glog -r 'sort(all(), topo)'
557 o 16:556c085f8b52 J
557 o 16:556c085f8b52 J
558 |
558 |
559 o 15:8761f6c9123f G2
559 o 15:8761f6c9123f G2
560 |
560 |
561 o 14:a7aeffe59b65 E
561 o 14:a7aeffe59b65 E
562 |
562 |
563 | o 13:e1e914ede9ab D
563 | o 13:e1e914ede9ab D
564 |/
564 |/
565 | o 12:01947e9b98aa C
565 | o 12:01947e9b98aa C
566 |/
566 |/
567 o 11:0947baa74d47 Split B
567 o 11:0947baa74d47 Split B
568 |
568 |
569 | * 9:88ede1d5ee13 I
569 | * 9:88ede1d5ee13 I
570 | |
570 | |
571 | x 6:af8cbf225b7b G1
571 | x 6:af8cbf225b7b G1
572 | |
572 | |
573 | x 3:be0ef73c17ad D
573 | x 3:be0ef73c17ad D
574 | |
574 | |
575 | | * 8:74863e5b5074 H
575 | | * 8:74863e5b5074 H
576 | | |
576 | | |
577 | | x 5:ee481a2a1e69 F
577 | | x 5:ee481a2a1e69 F
578 | | |
578 | | |
579 | | x 2:26805aba1e60 C
579 | | x 2:26805aba1e60 C
580 | |/
580 | |/
581 | x 1:112478962961 B
581 | x 1:112478962961 B
582 |/
582 |/
583 o 0:426bada5c675 A
583 o 0:426bada5c675 A
584
584
585 #endif
585 #endif
586
586
587 Preserve secret phase in split
587 Preserve secret phase in split
588
588
589 $ cp -R $TESTTMP/clean $TESTTMP/phases1
589 $ cp -R $TESTTMP/clean $TESTTMP/phases1
590 $ cd $TESTTMP/phases1
590 $ cd $TESTTMP/phases1
591 $ hg phase --secret -fr tip
591 $ hg phase --secret -fr tip
592 $ hg log -T '{short(node)} {phase}\n'
592 $ hg log -T '{short(node)} {phase}\n'
593 1df0d5c5a3ab secret
593 1df0d5c5a3ab secret
594 a61bcde8c529 draft
594 a61bcde8c529 draft
595 $ runsplit tip >/dev/null
595 $ runsplit tip >/dev/null
596 $ hg log -T '{short(node)} {phase}\n'
596 $ hg log -T '{short(node)} {phase}\n'
597 00eebaf8d2e2 secret
597 00eebaf8d2e2 secret
598 a09ad58faae3 secret
598 a09ad58faae3 secret
599 e704349bd21b secret
599 e704349bd21b secret
600 a61bcde8c529 draft
600 a61bcde8c529 draft
601
601
602 Do not move things to secret even if phases.new-commit=secret
602 Do not move things to secret even if phases.new-commit=secret
603
603
604 $ cp -R $TESTTMP/clean $TESTTMP/phases2
604 $ cp -R $TESTTMP/clean $TESTTMP/phases2
605 $ cd $TESTTMP/phases2
605 $ cd $TESTTMP/phases2
606 $ cat >> .hg/hgrc <<EOF
606 $ cat >> .hg/hgrc <<EOF
607 > [phases]
607 > [phases]
608 > new-commit=secret
608 > new-commit=secret
609 > EOF
609 > EOF
610 $ hg log -T '{short(node)} {phase}\n'
610 $ hg log -T '{short(node)} {phase}\n'
611 1df0d5c5a3ab draft
611 1df0d5c5a3ab draft
612 a61bcde8c529 draft
612 a61bcde8c529 draft
613 $ runsplit tip >/dev/null
613 $ runsplit tip >/dev/null
614 $ hg log -T '{short(node)} {phase}\n'
614 $ hg log -T '{short(node)} {phase}\n'
615 00eebaf8d2e2 draft
615 00eebaf8d2e2 draft
616 a09ad58faae3 draft
616 a09ad58faae3 draft
617 e704349bd21b draft
617 e704349bd21b draft
618 a61bcde8c529 draft
618 a61bcde8c529 draft
619
619
620 `hg split` with ignoreblanklines=1 does not infinite loop
620 `hg split` with ignoreblanklines=1 does not infinite loop
621
621
622 $ mkdir $TESTTMP/f
622 $ mkdir $TESTTMP/f
623 $ hg init $TESTTMP/f/a
623 $ hg init $TESTTMP/f/a
624 $ cd $TESTTMP/f/a
624 $ cd $TESTTMP/f/a
625 $ printf '1\n2\n3\n4\n5\n' > foo
625 $ printf '1\n2\n3\n4\n5\n' > foo
626 $ cp foo bar
626 $ cp foo bar
627 $ hg ci -qAm initial
627 $ hg ci -qAm initial
628 $ printf '1\n\n2\n3\ntest\n4\n5\n' > bar
628 $ printf '1\n\n2\n3\ntest\n4\n5\n' > bar
629 $ printf '1\n2\n3\ntest\n4\n5\n' > foo
629 $ printf '1\n2\n3\ntest\n4\n5\n' > foo
630 $ hg ci -qm splitme
630 $ hg ci -qm splitme
631 $ cat > $TESTTMP/messages <<EOF
631 $ cat > $TESTTMP/messages <<EOF
632 > split 1
632 > split 1
633 > --
633 > --
634 > split 2
634 > split 2
635 > EOF
635 > EOF
636 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
636 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
637 diff --git a/bar b/bar
637 diff --git a/bar b/bar
638 2 hunks, 2 lines changed
638 2 hunks, 2 lines changed
639 examine changes to 'bar'?
639 examine changes to 'bar'?
640 (enter ? for help) [Ynesfdaq?] f
640 (enter ? for help) [Ynesfdaq?] f
641
641
642 diff --git a/foo b/foo
642 diff --git a/foo b/foo
643 1 hunks, 1 lines changed
643 1 hunks, 1 lines changed
644 examine changes to 'foo'?
644 examine changes to 'foo'?
645 (enter ? for help) [Ynesfdaq?] n
645 (enter ? for help) [Ynesfdaq?] n
646
646
647 EDITOR: HG: Splitting dd3c45017cbf. Write commit message for the first split changeset.
647 EDITOR: HG: Splitting dd3c45017cbf. Write commit message for the first split changeset.
648 EDITOR: splitme
648 EDITOR: splitme
649 EDITOR:
649 EDITOR:
650 EDITOR:
650 EDITOR:
651 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
651 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
652 EDITOR: HG: Leave message empty to abort commit.
652 EDITOR: HG: Leave message empty to abort commit.
653 EDITOR: HG: --
653 EDITOR: HG: --
654 EDITOR: HG: user: test
654 EDITOR: HG: user: test
655 EDITOR: HG: branch 'default'
655 EDITOR: HG: branch 'default'
656 EDITOR: HG: changed bar
656 EDITOR: HG: changed bar
657 created new head
657 created new head
658 diff --git a/foo b/foo
658 diff --git a/foo b/foo
659 1 hunks, 1 lines changed
659 1 hunks, 1 lines changed
660 examine changes to 'foo'?
660 examine changes to 'foo'?
661 (enter ? for help) [Ynesfdaq?] f
661 (enter ? for help) [Ynesfdaq?] f
662
662
663 EDITOR: HG: Splitting dd3c45017cbf. So far it has been split into:
663 EDITOR: HG: Splitting dd3c45017cbf. So far it has been split into:
664 EDITOR: HG: - f205aea1c624: split 1
664 EDITOR: HG: - f205aea1c624: split 1
665 EDITOR: HG: Write commit message for the next split changeset.
665 EDITOR: HG: Write commit message for the next split changeset.
666 EDITOR: splitme
666 EDITOR: splitme
667 EDITOR:
667 EDITOR:
668 EDITOR:
668 EDITOR:
669 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
669 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
670 EDITOR: HG: Leave message empty to abort commit.
670 EDITOR: HG: Leave message empty to abort commit.
671 EDITOR: HG: --
671 EDITOR: HG: --
672 EDITOR: HG: user: test
672 EDITOR: HG: user: test
673 EDITOR: HG: branch 'default'
673 EDITOR: HG: branch 'default'
674 EDITOR: HG: changed foo
674 EDITOR: HG: changed foo
675 saved backup bundle to $TESTTMP/f/a/.hg/strip-backup/dd3c45017cbf-463441b5-split.hg (obsstore-off !)
675 saved backup bundle to $TESTTMP/f/a/.hg/strip-backup/dd3c45017cbf-463441b5-split.hg (obsstore-off !)
676
676
677 Let's try that again, with a slightly different set of patches, to ensure that
677 Let's try that again, with a slightly different set of patches, to ensure that
678 the ignoreblanklines thing isn't somehow position dependent.
678 the ignoreblanklines thing isn't somehow position dependent.
679
679
680 $ hg init $TESTTMP/f/b
680 $ hg init $TESTTMP/f/b
681 $ cd $TESTTMP/f/b
681 $ cd $TESTTMP/f/b
682 $ printf '1\n2\n3\n4\n5\n' > foo
682 $ printf '1\n2\n3\n4\n5\n' > foo
683 $ cp foo bar
683 $ cp foo bar
684 $ hg ci -qAm initial
684 $ hg ci -qAm initial
685 $ printf '1\n2\n3\ntest\n4\n5\n' > bar
685 $ printf '1\n2\n3\ntest\n4\n5\n' > bar
686 $ printf '1\n2\n3\ntest\n4\n\n5\n' > foo
686 $ printf '1\n2\n3\ntest\n4\n\n5\n' > foo
687 $ hg ci -qm splitme
687 $ hg ci -qm splitme
688 $ cat > $TESTTMP/messages <<EOF
688 $ cat > $TESTTMP/messages <<EOF
689 > split 1
689 > split 1
690 > --
690 > --
691 > split 2
691 > split 2
692 > EOF
692 > EOF
693 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
693 $ printf 'f\nn\nf\n' | hg --config extensions.split= --config diff.ignoreblanklines=1 split
694 diff --git a/bar b/bar
694 diff --git a/bar b/bar
695 1 hunks, 1 lines changed
695 1 hunks, 1 lines changed
696 examine changes to 'bar'?
696 examine changes to 'bar'?
697 (enter ? for help) [Ynesfdaq?] f
697 (enter ? for help) [Ynesfdaq?] f
698
698
699 diff --git a/foo b/foo
699 diff --git a/foo b/foo
700 2 hunks, 2 lines changed
700 2 hunks, 2 lines changed
701 examine changes to 'foo'?
701 examine changes to 'foo'?
702 (enter ? for help) [Ynesfdaq?] n
702 (enter ? for help) [Ynesfdaq?] n
703
703
704 EDITOR: HG: Splitting 904c80b40a4a. Write commit message for the first split changeset.
704 EDITOR: HG: Splitting 904c80b40a4a. Write commit message for the first split changeset.
705 EDITOR: splitme
705 EDITOR: splitme
706 EDITOR:
706 EDITOR:
707 EDITOR:
707 EDITOR:
708 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
708 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
709 EDITOR: HG: Leave message empty to abort commit.
709 EDITOR: HG: Leave message empty to abort commit.
710 EDITOR: HG: --
710 EDITOR: HG: --
711 EDITOR: HG: user: test
711 EDITOR: HG: user: test
712 EDITOR: HG: branch 'default'
712 EDITOR: HG: branch 'default'
713 EDITOR: HG: changed bar
713 EDITOR: HG: changed bar
714 created new head
714 created new head
715 diff --git a/foo b/foo
715 diff --git a/foo b/foo
716 2 hunks, 2 lines changed
716 2 hunks, 2 lines changed
717 examine changes to 'foo'?
717 examine changes to 'foo'?
718 (enter ? for help) [Ynesfdaq?] f
718 (enter ? for help) [Ynesfdaq?] f
719
719
720 EDITOR: HG: Splitting 904c80b40a4a. So far it has been split into:
720 EDITOR: HG: Splitting 904c80b40a4a. So far it has been split into:
721 EDITOR: HG: - ffecf40fa954: split 1
721 EDITOR: HG: - ffecf40fa954: split 1
722 EDITOR: HG: Write commit message for the next split changeset.
722 EDITOR: HG: Write commit message for the next split changeset.
723 EDITOR: splitme
723 EDITOR: splitme
724 EDITOR:
724 EDITOR:
725 EDITOR:
725 EDITOR:
726 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
726 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
727 EDITOR: HG: Leave message empty to abort commit.
727 EDITOR: HG: Leave message empty to abort commit.
728 EDITOR: HG: --
728 EDITOR: HG: --
729 EDITOR: HG: user: test
729 EDITOR: HG: user: test
730 EDITOR: HG: branch 'default'
730 EDITOR: HG: branch 'default'
731 EDITOR: HG: changed foo
731 EDITOR: HG: changed foo
732 saved backup bundle to $TESTTMP/f/b/.hg/strip-backup/904c80b40a4a-47fb907f-split.hg (obsstore-off !)
732 saved backup bundle to $TESTTMP/f/b/.hg/strip-backup/904c80b40a4a-47fb907f-split.hg (obsstore-off !)
733
733
734
734
735 Testing the case in split when commiting flag-only file changes (issue5864)
735 Testing the case in split when commiting flag-only file changes (issue5864)
736 ---------------------------------------------------------------------------
736 ---------------------------------------------------------------------------
737 $ hg init $TESTTMP/issue5864
737 $ hg init $TESTTMP/issue5864
738 $ cd $TESTTMP/issue5864
738 $ cd $TESTTMP/issue5864
739 $ echo foo > foo
739 $ echo foo > foo
740 $ hg add foo
740 $ hg add foo
741 $ hg ci -m "initial"
741 $ hg ci -m "initial"
742 $ hg import -q --bypass -m "make executable" - <<EOF
742 $ hg import -q --bypass -m "make executable" - <<EOF
743 > diff --git a/foo b/foo
743 > diff --git a/foo b/foo
744 > old mode 100644
744 > old mode 100644
745 > new mode 100755
745 > new mode 100755
746 > EOF
746 > EOF
747 $ hg up -q
747 $ hg up -q
748
748
749 $ hg glog
749 $ hg glog
750 @ 1:3a2125f0f4cb make executable
750 @ 1:3a2125f0f4cb make executable
751 |
751 |
752 o 0:51f273a58d82 initial
752 o 0:51f273a58d82 initial
753
753
754
754
755 #if no-windows
755 #if no-windows
756 $ cat > $TESTTMP/messages <<EOF
756 $ cat > $TESTTMP/messages <<EOF
757 > split 1
757 > split 1
758 > EOF
758 > EOF
759 $ printf 'y\n' | hg split
759 $ printf 'y\n' | hg split
760 diff --git a/foo b/foo
760 diff --git a/foo b/foo
761 old mode 100644
761 old mode 100644
762 new mode 100755
762 new mode 100755
763 examine changes to 'foo'?
763 examine changes to 'foo'?
764 (enter ? for help) [Ynesfdaq?] y
764 (enter ? for help) [Ynesfdaq?] y
765
765
766 EDITOR: HG: Splitting 3a2125f0f4cb. Write commit message for the first split changeset.
766 EDITOR: HG: Splitting 3a2125f0f4cb. Write commit message for the first split changeset.
767 EDITOR: make executable
767 EDITOR: make executable
768 EDITOR:
768 EDITOR:
769 EDITOR:
769 EDITOR:
770 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
770 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
771 EDITOR: HG: Leave message empty to abort commit.
771 EDITOR: HG: Leave message empty to abort commit.
772 EDITOR: HG: --
772 EDITOR: HG: --
773 EDITOR: HG: user: test
773 EDITOR: HG: user: test
774 EDITOR: HG: branch 'default'
774 EDITOR: HG: branch 'default'
775 EDITOR: HG: changed foo
775 EDITOR: HG: changed foo
776 created new head
776 created new head
777 saved backup bundle to $TESTTMP/issue5864/.hg/strip-backup/3a2125f0f4cb-629e4432-split.hg (obsstore-off !)
777 saved backup bundle to $TESTTMP/issue5864/.hg/strip-backup/3a2125f0f4cb-629e4432-split.hg (obsstore-off !)
778
778
779 $ hg log -G -T "{node|short} {desc}\n"
779 $ hg log -G -T "{node|short} {desc}\n"
780 @ b154670c87da split 1
780 @ b154670c87da split 1
781 |
781 |
782 o 51f273a58d82 initial
782 o 51f273a58d82 initial
783
783
784 #else
784 #else
785
785
786 TODO: Fix this on Windows. See issue 2020 and 5883
786 TODO: Fix this on Windows. See issue 2020 and 5883
787
787
788 $ printf 'y\ny\ny\n' | hg split
788 $ printf 'y\ny\ny\n' | hg split
789 abort: cannot split an empty revision
789 abort: cannot split an empty revision
790 [255]
790 [255]
791 #endif
791 #endif
792
792
793 Test that splitting moves works properly (issue5723)
794 ----------------------------------------------------
795
796 $ hg init $TESTTMP/issue5723-mv
797 $ cd $TESTTMP/issue5723-mv
798 $ printf '1\n2\n' > file
799 $ hg ci -qAm initial
800 $ hg mv file file2
801 $ printf 'a\nb\n1\n2\n3\n4\n' > file2
802 $ cat > $TESTTMP/messages <<EOF
803 > split1, keeping only the numbered lines
804 > --
805 > split2, keeping the lettered lines
806 > EOF
807 $ hg ci -m 'move and modify'
808 $ printf 'y\nn\na\na\n' | hg split
809 diff --git a/file b/file2
810 rename from file
811 rename to file2
812 2 hunks, 4 lines changed
813 examine changes to 'file' and 'file2'?
814 (enter ? for help) [Ynesfdaq?] y
815
816 @@ -0,0 +1,2 @@
817 +a
818 +b
819 record change 1/2 to 'file2'?
820 (enter ? for help) [Ynesfdaq?] n
821
822 @@ -2,0 +5,2 @@ 2
823 +3
824 +4
825 record change 2/2 to 'file2'?
826 (enter ? for help) [Ynesfdaq?] a
827
828 EDITOR: HG: Splitting 8c42fa635116. Write commit message for the first split changeset.
829 EDITOR: move and modify
830 EDITOR:
831 EDITOR:
832 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
833 EDITOR: HG: Leave message empty to abort commit.
834 EDITOR: HG: --
835 EDITOR: HG: user: test
836 EDITOR: HG: branch 'default'
837 EDITOR: HG: added file2
838 EDITOR: HG: removed file
839 created new head
840 diff --git a/file2 b/file2
841 1 hunks, 2 lines changed
842 examine changes to 'file2'?
843 (enter ? for help) [Ynesfdaq?] a
844
845 EDITOR: HG: Splitting 8c42fa635116. So far it has been split into:
846 EDITOR: HG: - 478be2a70c27: split1, keeping only the numbered lines
847 EDITOR: HG: Write commit message for the next split changeset.
848 EDITOR: move and modify
849 EDITOR:
850 EDITOR:
851 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
852 EDITOR: HG: Leave message empty to abort commit.
853 EDITOR: HG: --
854 EDITOR: HG: user: test
855 EDITOR: HG: branch 'default'
856 EDITOR: HG: changed file2
857 saved backup bundle to $TESTTMP/issue5723-mv/.hg/strip-backup/8c42fa635116-a38044d4-split.hg (obsstore-off !)
858 $ hg log -T '{desc}: {files%"{file} "}\n'
859 split2, keeping the lettered lines: file2
860 split1, keeping only the numbered lines: file file2
861 initial: file
862 $ cat file2
863 a
864 b
865 1
866 2
867 3
868 4
869 $ hg cat -r ".^" file2
870 1
871 2
872 3
873 4
874 $ hg cat -r . file2
875 a
876 b
877 1
878 2
879 3
880 4
881
882
793 Test that splitting copies works properly (issue5723)
883 Test that splitting copies works properly (issue5723)
794 ----------------------------------------------------
884 ----------------------------------------------------
795
885
796 $ hg init $TESTTMP/issue5723-cp
886 $ hg init $TESTTMP/issue5723-cp
797 $ cd $TESTTMP/issue5723-cp
887 $ cd $TESTTMP/issue5723-cp
798 $ printf '1\n2\n' > file
888 $ printf '1\n2\n' > file
799 $ hg ci -qAm initial
889 $ hg ci -qAm initial
800 $ hg cp file file2
890 $ hg cp file file2
801 $ printf 'a\nb\n1\n2\n3\n4\n' > file2
891 $ printf 'a\nb\n1\n2\n3\n4\n' > file2
802 Also modify 'file' to prove that the changes aren't being pulled in
892 Also modify 'file' to prove that the changes aren't being pulled in
803 accidentally.
893 accidentally.
804 $ printf 'this is the new contents of "file"' > file
894 $ printf 'this is the new contents of "file"' > file
805 $ cat > $TESTTMP/messages <<EOF
895 $ cat > $TESTTMP/messages <<EOF
806 > split1, keeping "file" and only the numbered lines in file2
896 > split1, keeping "file" and only the numbered lines in file2
807 > --
897 > --
808 > split2, keeping the lettered lines in file2
898 > split2, keeping the lettered lines in file2
809 > EOF
899 > EOF
810 $ hg ci -m 'copy file->file2, modify both'
900 $ hg ci -m 'copy file->file2, modify both'
811 $ printf 'f\ny\nn\na\na\n' | hg split
901 $ printf 'f\ny\nn\na\na\n' | hg split
812 diff --git a/file b/file
902 diff --git a/file b/file
813 1 hunks, 2 lines changed
903 1 hunks, 2 lines changed
814 examine changes to 'file'?
904 examine changes to 'file'?
815 (enter ? for help) [Ynesfdaq?] f
905 (enter ? for help) [Ynesfdaq?] f
816
906
817 diff --git a/file b/file2
907 diff --git a/file b/file2
818 copy from file
908 copy from file
819 copy to file2
909 copy to file2
820 2 hunks, 4 lines changed
910 2 hunks, 4 lines changed
821 examine changes to 'file' and 'file2'?
911 examine changes to 'file' and 'file2'?
822 (enter ? for help) [Ynesfdaq?] y
912 (enter ? for help) [Ynesfdaq?] y
823
913
824 @@ -0,0 +1,2 @@
914 @@ -0,0 +1,2 @@
825 +a
915 +a
826 +b
916 +b
827 record change 2/3 to 'file2'?
917 record change 2/3 to 'file2'?
828 (enter ? for help) [Ynesfdaq?] n
918 (enter ? for help) [Ynesfdaq?] n
829
919
830 @@ -2,0 +5,2 @@ 2
920 @@ -2,0 +5,2 @@ 2
831 +3
921 +3
832 +4
922 +4
833 record change 3/3 to 'file2'?
923 record change 3/3 to 'file2'?
834 (enter ? for help) [Ynesfdaq?] a
924 (enter ? for help) [Ynesfdaq?] a
835
925
836 EDITOR: HG: Splitting 41c861dfa61e. Write commit message for the first split changeset.
926 EDITOR: HG: Splitting 41c861dfa61e. Write commit message for the first split changeset.
837 EDITOR: copy file->file2, modify both
927 EDITOR: copy file->file2, modify both
838 EDITOR:
928 EDITOR:
839 EDITOR:
929 EDITOR:
840 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
930 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
841 EDITOR: HG: Leave message empty to abort commit.
931 EDITOR: HG: Leave message empty to abort commit.
842 EDITOR: HG: --
932 EDITOR: HG: --
843 EDITOR: HG: user: test
933 EDITOR: HG: user: test
844 EDITOR: HG: branch 'default'
934 EDITOR: HG: branch 'default'
845 EDITOR: HG: added file2
935 EDITOR: HG: added file2
846 EDITOR: HG: changed file
936 EDITOR: HG: changed file
847 created new head
937 created new head
848 diff --git a/file2 b/file2
938 diff --git a/file2 b/file2
849 1 hunks, 2 lines changed
939 1 hunks, 2 lines changed
850 examine changes to 'file2'?
940 examine changes to 'file2'?
851 (enter ? for help) [Ynesfdaq?] a
941 (enter ? for help) [Ynesfdaq?] a
852
942
853 EDITOR: HG: Splitting 41c861dfa61e. So far it has been split into:
943 EDITOR: HG: Splitting 41c861dfa61e. So far it has been split into:
854 EDITOR: HG: - 4b19e06610eb: split1, keeping "file" and only the numbered lines in file2
944 EDITOR: HG: - 4b19e06610eb: split1, keeping "file" and only the numbered lines in file2
855 EDITOR: HG: Write commit message for the next split changeset.
945 EDITOR: HG: Write commit message for the next split changeset.
856 EDITOR: copy file->file2, modify both
946 EDITOR: copy file->file2, modify both
857 EDITOR:
947 EDITOR:
858 EDITOR:
948 EDITOR:
859 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
949 EDITOR: HG: Enter commit message. Lines beginning with 'HG:' are removed.
860 EDITOR: HG: Leave message empty to abort commit.
950 EDITOR: HG: Leave message empty to abort commit.
861 EDITOR: HG: --
951 EDITOR: HG: --
862 EDITOR: HG: user: test
952 EDITOR: HG: user: test
863 EDITOR: HG: branch 'default'
953 EDITOR: HG: branch 'default'
864 EDITOR: HG: changed file2
954 EDITOR: HG: changed file2
865 saved backup bundle to $TESTTMP/issue5723-cp/.hg/strip-backup/41c861dfa61e-467e8d3c-split.hg (obsstore-off !)
955 saved backup bundle to $TESTTMP/issue5723-cp/.hg/strip-backup/41c861dfa61e-467e8d3c-split.hg (obsstore-off !)
866 $ hg log -T '{desc}: {files%"{file} "}\n'
956 $ hg log -T '{desc}: {files%"{file} "}\n'
867 split2, keeping the lettered lines in file2: file2
957 split2, keeping the lettered lines in file2: file2
868 split1, keeping "file" and only the numbered lines in file2: file file2
958 split1, keeping "file" and only the numbered lines in file2: file file2
869 initial: file
959 initial: file
870 $ cat file2
960 $ cat file2
871 a
961 a
872 b
962 b
873 1
963 1
874 2
964 2
875 3
965 3
876 4
966 4
877 $ hg cat -r ".^" file2
967 $ hg cat -r ".^" file2
878 1
968 1
879 2
969 2
880 3
970 3
881 4
971 4
882 $ hg cat -r . file2
972 $ hg cat -r . file2
883 a
973 a
884 b
974 b
885 1
975 1
886 2
976 2
887 3
977 3
888 4
978 4
General Comments 0
You need to be logged in to leave comments. Login now