##// END OF EJS Templates
errors: raise InputError from revsingle() iff revset provided by the user...
Martin von Zweigbergk -
r48930:5105a997 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,119 +1,120 b''
1 1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
2 2 #
3 3 # This extension enables removal of file content at a given revision,
4 4 # rewriting the data/metadata of successive revisions to preserve revision log
5 5 # integrity.
6 6
7 7 """erase file content at a given revision
8 8
9 9 The censor command instructs Mercurial to erase all content of a file at a given
10 10 revision *without updating the changeset hash.* This allows existing history to
11 11 remain valid while preventing future clones/pulls from receiving the erased
12 12 data.
13 13
14 14 Typical uses for censor are due to security or legal requirements, including::
15 15
16 16 * Passwords, private keys, cryptographic material
17 17 * Licensed data/code/libraries for which the license has expired
18 18 * Personally Identifiable Information or other private data
19 19
20 20 Censored nodes can interrupt mercurial's typical operation whenever the excised
21 21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
22 22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
23 23 ``hg update``, must be capable of tolerating censored data to continue to
24 24 function in a meaningful way. Such commands only tolerate censored file
25 25 revisions if they are allowed by the "censor.policy=ignore" config option.
26 26
27 27 A few informative commands such as ``hg grep`` will unconditionally
28 28 ignore censored data and merely report that it was encountered.
29 29 """
30 30
31 31 from __future__ import absolute_import
32 32
33 33 from mercurial.i18n import _
34 34 from mercurial.node import short
35 35
36 36 from mercurial import (
37 37 error,
38 logcmdutil,
38 39 registrar,
39 40 scmutil,
40 41 )
41 42
42 43 cmdtable = {}
43 44 command = registrar.command(cmdtable)
44 45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 47 # be specifying the version(s) of Mercurial they are tested with, or
47 48 # leave the attribute unspecified.
48 49 testedwith = b'ships-with-hg-core'
49 50
50 51
51 52 @command(
52 53 b'censor',
53 54 [
54 55 (
55 56 b'r',
56 57 b'rev',
57 58 b'',
58 59 _(b'censor file from specified revision'),
59 60 _(b'REV'),
60 61 ),
61 62 (b't', b'tombstone', b'', _(b'replacement tombstone data'), _(b'TEXT')),
62 63 ],
63 64 _(b'-r REV [-t TEXT] [FILE]'),
64 65 helpcategory=command.CATEGORY_MAINTENANCE,
65 66 )
66 67 def censor(ui, repo, path, rev=b'', tombstone=b'', **opts):
67 68 with repo.wlock(), repo.lock():
68 69 return _docensor(ui, repo, path, rev, tombstone, **opts)
69 70
70 71
71 72 def _docensor(ui, repo, path, rev=b'', tombstone=b'', **opts):
72 73 if not path:
73 74 raise error.Abort(_(b'must specify file path to censor'))
74 75 if not rev:
75 76 raise error.Abort(_(b'must specify revision to censor'))
76 77
77 78 wctx = repo[None]
78 79
79 80 m = scmutil.match(wctx, (path,))
80 81 if m.anypats() or len(m.files()) != 1:
81 82 raise error.Abort(_(b'can only specify an explicit filename'))
82 83 path = m.files()[0]
83 84 flog = repo.file(path)
84 85 if not len(flog):
85 86 raise error.Abort(_(b'cannot censor file with no history'))
86 87
87 rev = scmutil.revsingle(repo, rev, rev).rev()
88 rev = logcmdutil.revsingle(repo, rev, rev).rev()
88 89 try:
89 90 ctx = repo[rev]
90 91 except KeyError:
91 92 raise error.Abort(_(b'invalid revision identifier %s') % rev)
92 93
93 94 try:
94 95 fctx = ctx.filectx(path)
95 96 except error.LookupError:
96 97 raise error.Abort(_(b'file does not exist at revision %s') % rev)
97 98
98 99 fnode = fctx.filenode()
99 100 heads = []
100 101 for headnode in repo.heads():
101 102 hc = repo[headnode]
102 103 if path in hc and hc.filenode(path) == fnode:
103 104 heads.append(hc)
104 105 if heads:
105 106 headlist = b', '.join([short(c.node()) for c in heads])
106 107 raise error.Abort(
107 108 _(b'cannot censor file in heads (%s)') % headlist,
108 109 hint=_(b'clean/delete and commit first'),
109 110 )
110 111
111 112 wp = wctx.parents()
112 113 if ctx.node() in [p.node() for p in wp]:
113 114 raise error.Abort(
114 115 _(b'cannot censor working directory'),
115 116 hint=_(b'clean/delete/update first'),
116 117 )
117 118
118 119 with repo.transaction(b'censor') as tr:
119 120 flog.censorrevision(tr, fnode, tombstone=tombstone)
@@ -1,84 +1,83 b''
1 1 # Mercurial extension to provide the 'hg children' command
2 2 #
3 3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
4 4 #
5 5 # Author(s):
6 6 # Thomas Arendsen Hein <thomas@intevation.de>
7 7 #
8 8 # This software may be used and distributed according to the terms of the
9 9 # GNU General Public License version 2 or any later version.
10 10
11 11 '''command to display child changesets (DEPRECATED)
12 12
13 13 This extension is deprecated. You should use :hg:`log -r
14 14 "children(REV)"` instead.
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 from mercurial.i18n import _
20 20 from mercurial import (
21 21 cmdutil,
22 22 logcmdutil,
23 23 pycompat,
24 24 registrar,
25 scmutil,
26 25 )
27 26
28 27 templateopts = cmdutil.templateopts
29 28
30 29 cmdtable = {}
31 30 command = registrar.command(cmdtable)
32 31 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
33 32 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
34 33 # be specifying the version(s) of Mercurial they are tested with, or
35 34 # leave the attribute unspecified.
36 35 testedwith = b'ships-with-hg-core'
37 36
38 37
39 38 @command(
40 39 b'children',
41 40 [
42 41 (
43 42 b'r',
44 43 b'rev',
45 44 b'.',
46 45 _(b'show children of the specified revision'),
47 46 _(b'REV'),
48 47 ),
49 48 ]
50 49 + templateopts,
51 50 _(b'hg children [-r REV] [FILE]'),
52 51 helpcategory=command.CATEGORY_CHANGE_NAVIGATION,
53 52 inferrepo=True,
54 53 )
55 54 def children(ui, repo, file_=None, **opts):
56 55 """show the children of the given or working directory revision
57 56
58 57 Print the children of the working directory's revisions. If a
59 58 revision is given via -r/--rev, the children of that revision will
60 59 be printed. If a file argument is given, revision in which the
61 60 file was last changed (after the working directory revision or the
62 61 argument to --rev if given) is printed.
63 62
64 63 Please use :hg:`log` instead::
65 64
66 65 hg children => hg log -r "children(.)"
67 66 hg children -r REV => hg log -r "children(REV)"
68 67
69 68 See :hg:`help log` and :hg:`help revsets.children`.
70 69
71 70 """
72 71 opts = pycompat.byteskwargs(opts)
73 72 rev = opts.get(b'rev')
74 ctx = scmutil.revsingle(repo, rev)
73 ctx = logcmdutil.revsingle(repo, rev)
75 74 if file_:
76 75 fctx = repo.filectx(file_, changeid=ctx.rev())
77 76 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
78 77 else:
79 78 childctxs = ctx.children()
80 79
81 80 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
82 81 for cctx in childctxs:
83 82 displayer.show(cctx)
84 83 displayer.close()
@@ -1,95 +1,95 b''
1 1 # closehead.py - Close arbitrary heads without checking them out first
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 '''close arbitrary heads without checking them out first'''
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import (
12 12 bookmarks,
13 13 cmdutil,
14 14 context,
15 15 error,
16 logcmdutil,
16 17 pycompat,
17 18 registrar,
18 logcmdutil,
19 19 )
20 20
21 21 cmdtable = {}
22 22 command = registrar.command(cmdtable)
23 23 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
24 24 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
25 25 # be specifying the version(s) of Mercurial they are tested with, or
26 26 # leave the attribute unspecified.
27 27 testedwith = b'ships-with-hg-core'
28 28
29 29 commitopts = cmdutil.commitopts
30 30 commitopts2 = cmdutil.commitopts2
31 31 commitopts3 = [(b'r', b'rev', [], _(b'revision to check'), _(b'REV'))]
32 32
33 33
34 34 @command(
35 35 b'close-head|close-heads',
36 36 commitopts + commitopts2 + commitopts3,
37 37 _(b'[OPTION]... [REV]...'),
38 38 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
39 39 inferrepo=True,
40 40 )
41 41 def close_branch(ui, repo, *revs, **opts):
42 42 """close the given head revisions
43 43
44 44 This is equivalent to checking out each revision in a clean tree and running
45 45 ``hg commit --close-branch``, except that it doesn't change the working
46 46 directory.
47 47
48 48 The commit message must be specified with -l or -m.
49 49 """
50 50
51 51 def docommit(rev):
52 52 cctx = context.memctx(
53 53 repo,
54 54 parents=[rev, None],
55 55 text=message,
56 56 files=[],
57 57 filectxfn=None,
58 58 user=opts.get(b'user'),
59 59 date=opts.get(b'date'),
60 60 extra=extra,
61 61 )
62 62 tr = repo.transaction(b'commit')
63 63 ret = repo.commitctx(cctx, True)
64 64 bookmarks.update(repo, [rev, None], ret)
65 65 cctx.markcommitted(ret)
66 66 tr.close()
67 67
68 68 opts = pycompat.byteskwargs(opts)
69 69
70 70 revs += tuple(opts.get(b'rev', []))
71 71 revs = logcmdutil.revrange(repo, revs)
72 72
73 73 if not revs:
74 74 raise error.Abort(_(b'no revisions specified'))
75 75
76 76 heads = []
77 77 for branch in repo.branchmap():
78 78 heads.extend(repo.branchheads(branch))
79 79 heads = {repo[h].rev() for h in heads}
80 80 for rev in revs:
81 81 if rev not in heads:
82 82 raise error.Abort(_(b'revision is not an open head: %d') % rev)
83 83
84 84 message = cmdutil.logmessage(ui, opts)
85 85 if not message:
86 86 raise error.Abort(_(b"no commit message specified with -l or -m"))
87 87 extra = {b'close': b'1'}
88 88
89 89 with repo.wlock(), repo.lock():
90 90 for rev in revs:
91 91 r = repo[rev]
92 92 branch = r.branch()
93 93 extra[b'branch'] = branch
94 94 docommit(r)
95 95 return 0
@@ -1,804 +1,804 b''
1 1 # extdiff.py - external diff program support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to allow external programs to compare revisions
9 9
10 10 The extdiff Mercurial extension allows you to use external programs
11 11 to compare revisions, or revision with working directory. The external
12 12 diff programs are called with a configurable set of options and two
13 13 non-option arguments: paths to directories containing snapshots of
14 14 files to compare.
15 15
16 16 If there is more than one file being compared and the "child" revision
17 17 is the working directory, any modifications made in the external diff
18 18 program will be copied back to the working directory from the temporary
19 19 directory.
20 20
21 21 The extdiff extension also allows you to configure new diff commands, so
22 22 you do not need to type :hg:`extdiff -p kdiff3` always. ::
23 23
24 24 [extdiff]
25 25 # add new command that runs GNU diff(1) in 'context diff' mode
26 26 cdiff = gdiff -Nprc5
27 27 ## or the old way:
28 28 #cmd.cdiff = gdiff
29 29 #opts.cdiff = -Nprc5
30 30
31 31 # add new command called meld, runs meld (no need to name twice). If
32 32 # the meld executable is not available, the meld tool in [merge-tools]
33 33 # will be used, if available
34 34 meld =
35 35
36 36 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
37 37 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
38 38 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
39 39 # your .vimrc
40 40 vimdiff = gvim -f "+next" \\
41 41 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
42 42
43 43 Tool arguments can include variables that are expanded at runtime::
44 44
45 45 $parent1, $plabel1 - filename, descriptive label of first parent
46 46 $child, $clabel - filename, descriptive label of child revision
47 47 $parent2, $plabel2 - filename, descriptive label of second parent
48 48 $root - repository root
49 49 $parent is an alias for $parent1.
50 50
51 51 The extdiff extension will look in your [diff-tools] and [merge-tools]
52 52 sections for diff tool arguments, when none are specified in [extdiff].
53 53
54 54 ::
55 55
56 56 [extdiff]
57 57 kdiff3 =
58 58
59 59 [diff-tools]
60 60 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
61 61
62 62 If a program has a graphical interface, it might be interesting to tell
63 63 Mercurial about it. It will prevent the program from being mistakenly
64 64 used in a terminal-only environment (such as an SSH terminal session),
65 65 and will make :hg:`extdiff --per-file` open multiple file diffs at once
66 66 instead of one by one (if you still want to open file diffs one by one,
67 67 you can use the --confirm option).
68 68
69 69 Declaring that a tool has a graphical interface can be done with the
70 70 ``gui`` flag next to where ``diffargs`` are specified:
71 71
72 72 ::
73 73
74 74 [diff-tools]
75 75 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
76 76 kdiff3.gui = true
77 77
78 78 You can use -I/-X and list of file or directory names like normal
79 79 :hg:`diff` command. The extdiff extension makes snapshots of only
80 80 needed files, so running the external diff program will actually be
81 81 pretty fast (at least faster than having to compare the entire tree).
82 82 '''
83 83
84 84 from __future__ import absolute_import
85 85
86 86 import os
87 87 import re
88 88 import shutil
89 89 import stat
90 90 import subprocess
91 91
92 92 from mercurial.i18n import _
93 93 from mercurial.node import (
94 94 nullrev,
95 95 short,
96 96 )
97 97 from mercurial import (
98 98 archival,
99 99 cmdutil,
100 100 encoding,
101 101 error,
102 102 filemerge,
103 103 formatter,
104 104 logcmdutil,
105 105 pycompat,
106 106 registrar,
107 107 scmutil,
108 108 util,
109 109 )
110 110 from mercurial.utils import (
111 111 procutil,
112 112 stringutil,
113 113 )
114 114
115 115 cmdtable = {}
116 116 command = registrar.command(cmdtable)
117 117
118 118 configtable = {}
119 119 configitem = registrar.configitem(configtable)
120 120
121 121 configitem(
122 122 b'extdiff',
123 123 br'opts\..*',
124 124 default=b'',
125 125 generic=True,
126 126 )
127 127
128 128 configitem(
129 129 b'extdiff',
130 130 br'gui\..*',
131 131 generic=True,
132 132 )
133 133
134 134 configitem(
135 135 b'diff-tools',
136 136 br'.*\.diffargs$',
137 137 default=None,
138 138 generic=True,
139 139 )
140 140
141 141 configitem(
142 142 b'diff-tools',
143 143 br'.*\.gui$',
144 144 generic=True,
145 145 )
146 146
147 147 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
148 148 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
149 149 # be specifying the version(s) of Mercurial they are tested with, or
150 150 # leave the attribute unspecified.
151 151 testedwith = b'ships-with-hg-core'
152 152
153 153
154 154 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
155 155 """snapshot files as of some revision
156 156 if not using snapshot, -I/-X does not work and recursive diff
157 157 in tools like kdiff3 and meld displays too many files."""
158 158 dirname = os.path.basename(repo.root)
159 159 if dirname == b"":
160 160 dirname = b"root"
161 161 if node is not None:
162 162 dirname = b'%s.%s' % (dirname, short(node))
163 163 base = os.path.join(tmproot, dirname)
164 164 os.mkdir(base)
165 165 fnsandstat = []
166 166
167 167 if node is not None:
168 168 ui.note(
169 169 _(b'making snapshot of %d files from rev %s\n')
170 170 % (len(files), short(node))
171 171 )
172 172 else:
173 173 ui.note(
174 174 _(b'making snapshot of %d files from working directory\n')
175 175 % (len(files))
176 176 )
177 177
178 178 if files:
179 179 repo.ui.setconfig(b"ui", b"archivemeta", False)
180 180
181 181 archival.archive(
182 182 repo,
183 183 base,
184 184 node,
185 185 b'files',
186 186 match=scmutil.matchfiles(repo, files),
187 187 subrepos=listsubrepos,
188 188 )
189 189
190 190 for fn in sorted(files):
191 191 wfn = util.pconvert(fn)
192 192 ui.note(b' %s\n' % wfn)
193 193
194 194 if node is None:
195 195 dest = os.path.join(base, wfn)
196 196
197 197 fnsandstat.append((dest, repo.wjoin(fn), os.lstat(dest)))
198 198 return dirname, fnsandstat
199 199
200 200
201 201 def formatcmdline(
202 202 cmdline,
203 203 repo_root,
204 204 do3way,
205 205 parent1,
206 206 plabel1,
207 207 parent2,
208 208 plabel2,
209 209 child,
210 210 clabel,
211 211 ):
212 212 # Function to quote file/dir names in the argument string.
213 213 # When not operating in 3-way mode, an empty string is
214 214 # returned for parent2
215 215 replace = {
216 216 b'parent': parent1,
217 217 b'parent1': parent1,
218 218 b'parent2': parent2,
219 219 b'plabel1': plabel1,
220 220 b'plabel2': plabel2,
221 221 b'child': child,
222 222 b'clabel': clabel,
223 223 b'root': repo_root,
224 224 }
225 225
226 226 def quote(match):
227 227 pre = match.group(2)
228 228 key = match.group(3)
229 229 if not do3way and key == b'parent2':
230 230 return pre
231 231 return pre + procutil.shellquote(replace[key])
232 232
233 233 # Match parent2 first, so 'parent1?' will match both parent1 and parent
234 234 regex = (
235 235 br'''(['"]?)([^\s'"$]*)'''
236 236 br'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1'
237 237 )
238 238 if not do3way and not re.search(regex, cmdline):
239 239 cmdline += b' $parent1 $child'
240 240 return re.sub(regex, quote, cmdline)
241 241
242 242
243 243 def _systembackground(cmd, environ=None, cwd=None):
244 244 """like 'procutil.system', but returns the Popen object directly
245 245 so we don't have to wait on it.
246 246 """
247 247 env = procutil.shellenviron(environ)
248 248 proc = subprocess.Popen(
249 249 procutil.tonativestr(cmd),
250 250 shell=True,
251 251 close_fds=procutil.closefds,
252 252 env=procutil.tonativeenv(env),
253 253 cwd=pycompat.rapply(procutil.tonativestr, cwd),
254 254 )
255 255 return proc
256 256
257 257
258 258 def _runperfilediff(
259 259 cmdline,
260 260 repo_root,
261 261 ui,
262 262 guitool,
263 263 do3way,
264 264 confirm,
265 265 commonfiles,
266 266 tmproot,
267 267 dir1a,
268 268 dir1b,
269 269 dir2,
270 270 rev1a,
271 271 rev1b,
272 272 rev2,
273 273 ):
274 274 # Note that we need to sort the list of files because it was
275 275 # built in an "unstable" way and it's annoying to get files in a
276 276 # random order, especially when "confirm" mode is enabled.
277 277 waitprocs = []
278 278 totalfiles = len(commonfiles)
279 279 for idx, commonfile in enumerate(sorted(commonfiles)):
280 280 path1a = os.path.join(dir1a, commonfile)
281 281 label1a = commonfile + rev1a
282 282 if not os.path.isfile(path1a):
283 283 path1a = pycompat.osdevnull
284 284
285 285 path1b = b''
286 286 label1b = b''
287 287 if do3way:
288 288 path1b = os.path.join(dir1b, commonfile)
289 289 label1b = commonfile + rev1b
290 290 if not os.path.isfile(path1b):
291 291 path1b = pycompat.osdevnull
292 292
293 293 path2 = os.path.join(dir2, commonfile)
294 294 label2 = commonfile + rev2
295 295
296 296 if confirm:
297 297 # Prompt before showing this diff
298 298 difffiles = _(b'diff %s (%d of %d)') % (
299 299 commonfile,
300 300 idx + 1,
301 301 totalfiles,
302 302 )
303 303 responses = _(
304 304 b'[Yns?]'
305 305 b'$$ &Yes, show diff'
306 306 b'$$ &No, skip this diff'
307 307 b'$$ &Skip remaining diffs'
308 308 b'$$ &? (display help)'
309 309 )
310 310 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
311 311 if r == 3: # ?
312 312 while r == 3:
313 313 for c, t in ui.extractchoices(responses)[1]:
314 314 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
315 315 r = ui.promptchoice(b'%s %s' % (difffiles, responses))
316 316 if r == 0: # yes
317 317 pass
318 318 elif r == 1: # no
319 319 continue
320 320 elif r == 2: # skip
321 321 break
322 322
323 323 curcmdline = formatcmdline(
324 324 cmdline,
325 325 repo_root,
326 326 do3way=do3way,
327 327 parent1=path1a,
328 328 plabel1=label1a,
329 329 parent2=path1b,
330 330 plabel2=label1b,
331 331 child=path2,
332 332 clabel=label2,
333 333 )
334 334
335 335 if confirm or not guitool:
336 336 # Run the comparison program and wait for it to exit
337 337 # before we show the next file.
338 338 # This is because either we need to wait for confirmation
339 339 # from the user between each invocation, or because, as far
340 340 # as we know, the tool doesn't have a GUI, in which case
341 341 # we can't run multiple CLI programs at the same time.
342 342 ui.debug(
343 343 b'running %r in %s\n' % (pycompat.bytestr(curcmdline), tmproot)
344 344 )
345 345 ui.system(curcmdline, cwd=tmproot, blockedtag=b'extdiff')
346 346 else:
347 347 # Run the comparison program but don't wait, as we're
348 348 # going to rapid-fire each file diff and then wait on
349 349 # the whole group.
350 350 ui.debug(
351 351 b'running %r in %s (backgrounded)\n'
352 352 % (pycompat.bytestr(curcmdline), tmproot)
353 353 )
354 354 proc = _systembackground(curcmdline, cwd=tmproot)
355 355 waitprocs.append(proc)
356 356
357 357 if waitprocs:
358 358 with ui.timeblockedsection(b'extdiff'):
359 359 for proc in waitprocs:
360 360 proc.wait()
361 361
362 362
363 363 def diffpatch(ui, repo, node1, node2, tmproot, matcher, cmdline):
364 364 template = b'hg-%h.patch'
365 365 # write patches to temporary files
366 366 with formatter.nullformatter(ui, b'extdiff', {}) as fm:
367 367 cmdutil.export(
368 368 repo,
369 369 [repo[node1].rev(), repo[node2].rev()],
370 370 fm,
371 371 fntemplate=repo.vfs.reljoin(tmproot, template),
372 372 match=matcher,
373 373 )
374 374 label1 = cmdutil.makefilename(repo[node1], template)
375 375 label2 = cmdutil.makefilename(repo[node2], template)
376 376 file1 = repo.vfs.reljoin(tmproot, label1)
377 377 file2 = repo.vfs.reljoin(tmproot, label2)
378 378 cmdline = formatcmdline(
379 379 cmdline,
380 380 repo.root,
381 381 # no 3way while comparing patches
382 382 do3way=False,
383 383 parent1=file1,
384 384 plabel1=label1,
385 385 # while comparing patches, there is no second parent
386 386 parent2=None,
387 387 plabel2=None,
388 388 child=file2,
389 389 clabel=label2,
390 390 )
391 391 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
392 392 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
393 393 return 1
394 394
395 395
396 396 def diffrevs(
397 397 ui,
398 398 repo,
399 399 ctx1a,
400 400 ctx1b,
401 401 ctx2,
402 402 matcher,
403 403 tmproot,
404 404 cmdline,
405 405 do3way,
406 406 guitool,
407 407 opts,
408 408 ):
409 409
410 410 subrepos = opts.get(b'subrepos')
411 411
412 412 # calculate list of files changed between both revs
413 413 st = ctx1a.status(ctx2, matcher, listsubrepos=subrepos)
414 414 mod_a, add_a, rem_a = set(st.modified), set(st.added), set(st.removed)
415 415 if do3way:
416 416 stb = ctx1b.status(ctx2, matcher, listsubrepos=subrepos)
417 417 mod_b, add_b, rem_b = (
418 418 set(stb.modified),
419 419 set(stb.added),
420 420 set(stb.removed),
421 421 )
422 422 else:
423 423 mod_b, add_b, rem_b = set(), set(), set()
424 424 modadd = mod_a | add_a | mod_b | add_b
425 425 common = modadd | rem_a | rem_b
426 426 if not common:
427 427 return 0
428 428
429 429 # Always make a copy of ctx1a (and ctx1b, if applicable)
430 430 # dir1a should contain files which are:
431 431 # * modified or removed from ctx1a to ctx2
432 432 # * modified or added from ctx1b to ctx2
433 433 # (except file added from ctx1a to ctx2 as they were not present in
434 434 # ctx1a)
435 435 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
436 436 dir1a = snapshot(ui, repo, dir1a_files, ctx1a.node(), tmproot, subrepos)[0]
437 437 rev1a = b'' if ctx1a.rev() is None else b'@%d' % ctx1a.rev()
438 438 if do3way:
439 439 # file calculation criteria same as dir1a
440 440 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
441 441 dir1b = snapshot(
442 442 ui, repo, dir1b_files, ctx1b.node(), tmproot, subrepos
443 443 )[0]
444 444 rev1b = b'@%d' % ctx1b.rev()
445 445 else:
446 446 dir1b = None
447 447 rev1b = b''
448 448
449 449 fnsandstat = []
450 450
451 451 # If ctx2 is not the wc or there is >1 change, copy it
452 452 dir2root = b''
453 453 rev2 = b''
454 454 if ctx2.node() is not None:
455 455 dir2 = snapshot(ui, repo, modadd, ctx2.node(), tmproot, subrepos)[0]
456 456 rev2 = b'@%d' % ctx2.rev()
457 457 elif len(common) > 1:
458 458 # we only actually need to get the files to copy back to
459 459 # the working dir in this case (because the other cases
460 460 # are: diffing 2 revisions or single file -- in which case
461 461 # the file is already directly passed to the diff tool).
462 462 dir2, fnsandstat = snapshot(ui, repo, modadd, None, tmproot, subrepos)
463 463 else:
464 464 # This lets the diff tool open the changed file directly
465 465 dir2 = b''
466 466 dir2root = repo.root
467 467
468 468 label1a = rev1a
469 469 label1b = rev1b
470 470 label2 = rev2
471 471
472 472 if not opts.get(b'per_file'):
473 473 # If only one change, diff the files instead of the directories
474 474 # Handle bogus modifies correctly by checking if the files exist
475 475 if len(common) == 1:
476 476 common_file = util.localpath(common.pop())
477 477 dir1a = os.path.join(tmproot, dir1a, common_file)
478 478 label1a = common_file + rev1a
479 479 if not os.path.isfile(dir1a):
480 480 dir1a = pycompat.osdevnull
481 481 if do3way:
482 482 dir1b = os.path.join(tmproot, dir1b, common_file)
483 483 label1b = common_file + rev1b
484 484 if not os.path.isfile(dir1b):
485 485 dir1b = pycompat.osdevnull
486 486 dir2 = os.path.join(dir2root, dir2, common_file)
487 487 label2 = common_file + rev2
488 488
489 489 # Run the external tool on the 2 temp directories or the patches
490 490 cmdline = formatcmdline(
491 491 cmdline,
492 492 repo.root,
493 493 do3way=do3way,
494 494 parent1=dir1a,
495 495 plabel1=label1a,
496 496 parent2=dir1b,
497 497 plabel2=label1b,
498 498 child=dir2,
499 499 clabel=label2,
500 500 )
501 501 ui.debug(b'running %r in %s\n' % (pycompat.bytestr(cmdline), tmproot))
502 502 ui.system(cmdline, cwd=tmproot, blockedtag=b'extdiff')
503 503 else:
504 504 # Run the external tool once for each pair of files
505 505 _runperfilediff(
506 506 cmdline,
507 507 repo.root,
508 508 ui,
509 509 guitool=guitool,
510 510 do3way=do3way,
511 511 confirm=opts.get(b'confirm'),
512 512 commonfiles=common,
513 513 tmproot=tmproot,
514 514 dir1a=os.path.join(tmproot, dir1a),
515 515 dir1b=os.path.join(tmproot, dir1b) if do3way else None,
516 516 dir2=os.path.join(dir2root, dir2),
517 517 rev1a=rev1a,
518 518 rev1b=rev1b,
519 519 rev2=rev2,
520 520 )
521 521
522 522 for copy_fn, working_fn, st in fnsandstat:
523 523 cpstat = os.lstat(copy_fn)
524 524 # Some tools copy the file and attributes, so mtime may not detect
525 525 # all changes. A size check will detect more cases, but not all.
526 526 # The only certain way to detect every case is to diff all files,
527 527 # which could be expensive.
528 528 # copyfile() carries over the permission, so the mode check could
529 529 # be in an 'elif' branch, but for the case where the file has
530 530 # changed without affecting mtime or size.
531 531 if (
532 532 cpstat[stat.ST_MTIME] != st[stat.ST_MTIME]
533 533 or cpstat.st_size != st.st_size
534 534 or (cpstat.st_mode & 0o100) != (st.st_mode & 0o100)
535 535 ):
536 536 ui.debug(
537 537 b'file changed while diffing. '
538 538 b'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn)
539 539 )
540 540 util.copyfile(copy_fn, working_fn)
541 541
542 542 return 1
543 543
544 544
545 545 def dodiff(ui, repo, cmdline, pats, opts, guitool=False):
546 546 """Do the actual diff:
547 547
548 548 - copy to a temp structure if diffing 2 internal revisions
549 549 - copy to a temp structure if diffing working revision with
550 550 another one and more than 1 file is changed
551 551 - just invoke the diff for a single file in the working dir
552 552 """
553 553
554 554 cmdutil.check_at_most_one_arg(opts, b'rev', b'change')
555 555 revs = opts.get(b'rev')
556 556 from_rev = opts.get(b'from')
557 557 to_rev = opts.get(b'to')
558 558 change = opts.get(b'change')
559 559 do3way = b'$parent2' in cmdline
560 560
561 561 if change:
562 ctx2 = scmutil.revsingle(repo, change, None)
562 ctx2 = logcmdutil.revsingle(repo, change, None)
563 563 ctx1a, ctx1b = ctx2.p1(), ctx2.p2()
564 564 elif from_rev or to_rev:
565 565 repo = scmutil.unhidehashlikerevs(
566 566 repo, [from_rev] + [to_rev], b'nowarn'
567 567 )
568 ctx1a = scmutil.revsingle(repo, from_rev, None)
568 ctx1a = logcmdutil.revsingle(repo, from_rev, None)
569 569 ctx1b = repo[nullrev]
570 ctx2 = scmutil.revsingle(repo, to_rev, None)
570 ctx2 = logcmdutil.revsingle(repo, to_rev, None)
571 571 else:
572 572 ctx1a, ctx2 = logcmdutil.revpair(repo, revs)
573 573 if not revs:
574 574 ctx1b = repo[None].p2()
575 575 else:
576 576 ctx1b = repo[nullrev]
577 577
578 578 # Disable 3-way merge if there is only one parent
579 579 if do3way:
580 580 if ctx1b.rev() == nullrev:
581 581 do3way = False
582 582
583 583 matcher = scmutil.match(ctx2, pats, opts)
584 584
585 585 if opts.get(b'patch'):
586 586 if opts.get(b'subrepos'):
587 587 raise error.Abort(_(b'--patch cannot be used with --subrepos'))
588 588 if opts.get(b'per_file'):
589 589 raise error.Abort(_(b'--patch cannot be used with --per-file'))
590 590 if ctx2.node() is None:
591 591 raise error.Abort(_(b'--patch requires two revisions'))
592 592
593 593 tmproot = pycompat.mkdtemp(prefix=b'extdiff.')
594 594 try:
595 595 if opts.get(b'patch'):
596 596 return diffpatch(
597 597 ui, repo, ctx1a.node(), ctx2.node(), tmproot, matcher, cmdline
598 598 )
599 599
600 600 return diffrevs(
601 601 ui,
602 602 repo,
603 603 ctx1a,
604 604 ctx1b,
605 605 ctx2,
606 606 matcher,
607 607 tmproot,
608 608 cmdline,
609 609 do3way,
610 610 guitool,
611 611 opts,
612 612 )
613 613
614 614 finally:
615 615 ui.note(_(b'cleaning up temp directory\n'))
616 616 shutil.rmtree(tmproot)
617 617
618 618
619 619 extdiffopts = (
620 620 [
621 621 (
622 622 b'o',
623 623 b'option',
624 624 [],
625 625 _(b'pass option to comparison program'),
626 626 _(b'OPT'),
627 627 ),
628 628 (b'r', b'rev', [], _(b'revision (DEPRECATED)'), _(b'REV')),
629 629 (b'', b'from', b'', _(b'revision to diff from'), _(b'REV1')),
630 630 (b'', b'to', b'', _(b'revision to diff to'), _(b'REV2')),
631 631 (b'c', b'change', b'', _(b'change made by revision'), _(b'REV')),
632 632 (
633 633 b'',
634 634 b'per-file',
635 635 False,
636 636 _(b'compare each file instead of revision snapshots'),
637 637 ),
638 638 (
639 639 b'',
640 640 b'confirm',
641 641 False,
642 642 _(b'prompt user before each external program invocation'),
643 643 ),
644 644 (b'', b'patch', None, _(b'compare patches for two revisions')),
645 645 ]
646 646 + cmdutil.walkopts
647 647 + cmdutil.subrepoopts
648 648 )
649 649
650 650
651 651 @command(
652 652 b'extdiff',
653 653 [
654 654 (b'p', b'program', b'', _(b'comparison program to run'), _(b'CMD')),
655 655 ]
656 656 + extdiffopts,
657 657 _(b'hg extdiff [OPT]... [FILE]...'),
658 658 helpcategory=command.CATEGORY_FILE_CONTENTS,
659 659 inferrepo=True,
660 660 )
661 661 def extdiff(ui, repo, *pats, **opts):
662 662 """use external program to diff repository (or selected files)
663 663
664 664 Show differences between revisions for the specified files, using
665 665 an external program. The default program used is diff, with
666 666 default options "-Npru".
667 667
668 668 To select a different program, use the -p/--program option. The
669 669 program will be passed the names of two directories to compare,
670 670 unless the --per-file option is specified (see below). To pass
671 671 additional options to the program, use -o/--option. These will be
672 672 passed before the names of the directories or files to compare.
673 673
674 674 The --from, --to, and --change options work the same way they do for
675 675 :hg:`diff`.
676 676
677 677 The --per-file option runs the external program repeatedly on each
678 678 file to diff, instead of once on two directories. By default,
679 679 this happens one by one, where the next file diff is open in the
680 680 external program only once the previous external program (for the
681 681 previous file diff) has exited. If the external program has a
682 682 graphical interface, it can open all the file diffs at once instead
683 683 of one by one. See :hg:`help -e extdiff` for information about how
684 684 to tell Mercurial that a given program has a graphical interface.
685 685
686 686 The --confirm option will prompt the user before each invocation of
687 687 the external program. It is ignored if --per-file isn't specified.
688 688 """
689 689 opts = pycompat.byteskwargs(opts)
690 690 program = opts.get(b'program')
691 691 option = opts.get(b'option')
692 692 if not program:
693 693 program = b'diff'
694 694 option = option or [b'-Npru']
695 695 cmdline = b' '.join(map(procutil.shellquote, [program] + option))
696 696 return dodiff(ui, repo, cmdline, pats, opts)
697 697
698 698
699 699 class savedcmd(object):
700 700 """use external program to diff repository (or selected files)
701 701
702 702 Show differences between revisions for the specified files, using
703 703 the following program::
704 704
705 705 %(path)s
706 706
707 707 When two revision arguments are given, then changes are shown
708 708 between those revisions. If only one revision is specified then
709 709 that revision is compared to the working directory, and, when no
710 710 revisions are specified, the working directory files are compared
711 711 to its parent.
712 712 """
713 713
714 714 def __init__(self, path, cmdline, isgui):
715 715 # We can't pass non-ASCII through docstrings (and path is
716 716 # in an unknown encoding anyway), but avoid double separators on
717 717 # Windows
718 718 docpath = stringutil.escapestr(path).replace(b'\\\\', b'\\')
719 719 self.__doc__ %= {'path': pycompat.sysstr(stringutil.uirepr(docpath))}
720 720 self._cmdline = cmdline
721 721 self._isgui = isgui
722 722
723 723 def __call__(self, ui, repo, *pats, **opts):
724 724 opts = pycompat.byteskwargs(opts)
725 725 options = b' '.join(map(procutil.shellquote, opts[b'option']))
726 726 if options:
727 727 options = b' ' + options
728 728 return dodiff(
729 729 ui, repo, self._cmdline + options, pats, opts, guitool=self._isgui
730 730 )
731 731
732 732
733 733 def _gettooldetails(ui, cmd, path):
734 734 """
735 735 returns following things for a
736 736 ```
737 737 [extdiff]
738 738 <cmd> = <path>
739 739 ```
740 740 entry:
741 741
742 742 cmd: command/tool name
743 743 path: path to the tool
744 744 cmdline: the command which should be run
745 745 isgui: whether the tool uses GUI or not
746 746
747 747 Reads all external tools related configs, whether it be extdiff section,
748 748 diff-tools or merge-tools section, or its specified in an old format or
749 749 the latest format.
750 750 """
751 751 path = util.expandpath(path)
752 752 if cmd.startswith(b'cmd.'):
753 753 cmd = cmd[4:]
754 754 if not path:
755 755 path = procutil.findexe(cmd)
756 756 if path is None:
757 757 path = filemerge.findexternaltool(ui, cmd) or cmd
758 758 diffopts = ui.config(b'extdiff', b'opts.' + cmd)
759 759 cmdline = procutil.shellquote(path)
760 760 if diffopts:
761 761 cmdline += b' ' + diffopts
762 762 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
763 763 else:
764 764 if path:
765 765 # case "cmd = path opts"
766 766 cmdline = path
767 767 diffopts = len(pycompat.shlexsplit(cmdline)) > 1
768 768 else:
769 769 # case "cmd ="
770 770 path = procutil.findexe(cmd)
771 771 if path is None:
772 772 path = filemerge.findexternaltool(ui, cmd) or cmd
773 773 cmdline = procutil.shellquote(path)
774 774 diffopts = False
775 775 isgui = ui.configbool(b'extdiff', b'gui.' + cmd)
776 776 # look for diff arguments in [diff-tools] then [merge-tools]
777 777 if not diffopts:
778 778 key = cmd + b'.diffargs'
779 779 for section in (b'diff-tools', b'merge-tools'):
780 780 args = ui.config(section, key)
781 781 if args:
782 782 cmdline += b' ' + args
783 783 if isgui is None:
784 784 isgui = ui.configbool(section, cmd + b'.gui') or False
785 785 break
786 786 return cmd, path, cmdline, isgui
787 787
788 788
789 789 def uisetup(ui):
790 790 for cmd, path in ui.configitems(b'extdiff'):
791 791 if cmd.startswith(b'opts.') or cmd.startswith(b'gui.'):
792 792 continue
793 793 cmd, path, cmdline, isgui = _gettooldetails(ui, cmd, path)
794 794 command(
795 795 cmd,
796 796 extdiffopts[:],
797 797 _(b'hg %s [OPTION]... [FILE]...') % cmd,
798 798 helpcategory=command.CATEGORY_FILE_CONTENTS,
799 799 inferrepo=True,
800 800 )(savedcmd(path, cmdline, isgui))
801 801
802 802
803 803 # tell hggettext to extract docstrings from these functions:
804 804 i18nfunctions = [savedcmd]
@@ -1,357 +1,358 b''
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # commands: fastannotate commands
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial import (
14 14 commands,
15 15 encoding,
16 16 error,
17 17 extensions,
18 logcmdutil,
18 19 patch,
19 20 pycompat,
20 21 registrar,
21 22 scmutil,
22 23 util,
23 24 )
24 25
25 26 from . import (
26 27 context as facontext,
27 28 error as faerror,
28 29 formatter as faformatter,
29 30 )
30 31
31 32 cmdtable = {}
32 33 command = registrar.command(cmdtable)
33 34
34 35
35 36 def _matchpaths(repo, rev, pats, opts, aopts=facontext.defaultopts):
36 37 """generate paths matching given patterns"""
37 38 perfhack = repo.ui.configbool(b'fastannotate', b'perfhack')
38 39
39 40 # disable perfhack if:
40 41 # a) any walkopt is used
41 42 # b) if we treat pats as plain file names, some of them do not have
42 43 # corresponding linelog files
43 44 if perfhack:
44 45 # cwd related to reporoot
45 46 reporoot = os.path.dirname(repo.path)
46 47 reldir = os.path.relpath(encoding.getcwd(), reporoot)
47 48 if reldir == b'.':
48 49 reldir = b''
49 50 if any(opts.get(o[1]) for o in commands.walkopts): # a)
50 51 perfhack = False
51 52 else: # b)
52 53 relpats = [
53 54 os.path.relpath(p, reporoot) if os.path.isabs(p) else p
54 55 for p in pats
55 56 ]
56 57 # disable perfhack on '..' since it allows escaping from the repo
57 58 if any(
58 59 (
59 60 b'..' in f
60 61 or not os.path.isfile(
61 62 facontext.pathhelper(repo, f, aopts).linelogpath
62 63 )
63 64 )
64 65 for f in relpats
65 66 ):
66 67 perfhack = False
67 68
68 69 # perfhack: emit paths directory without checking with manifest
69 70 # this can be incorrect if the rev dos not have file.
70 71 if perfhack:
71 72 for p in relpats:
72 73 yield os.path.join(reldir, p)
73 74 else:
74 75
75 76 def bad(x, y):
76 77 raise error.Abort(b"%s: %s" % (x, y))
77 78
78 ctx = scmutil.revsingle(repo, rev)
79 ctx = logcmdutil.revsingle(repo, rev)
79 80 m = scmutil.match(ctx, pats, opts, badfn=bad)
80 81 for p in ctx.walk(m):
81 82 yield p
82 83
83 84
84 85 fastannotatecommandargs = {
85 86 'options': [
86 87 (b'r', b'rev', b'.', _(b'annotate the specified revision'), _(b'REV')),
87 88 (b'u', b'user', None, _(b'list the author (long with -v)')),
88 89 (b'f', b'file', None, _(b'list the filename')),
89 90 (b'd', b'date', None, _(b'list the date (short with -q)')),
90 91 (b'n', b'number', None, _(b'list the revision number (default)')),
91 92 (b'c', b'changeset', None, _(b'list the changeset')),
92 93 (
93 94 b'l',
94 95 b'line-number',
95 96 None,
96 97 _(b'show line number at the first appearance'),
97 98 ),
98 99 (
99 100 b'e',
100 101 b'deleted',
101 102 None,
102 103 _(b'show deleted lines (slow) (EXPERIMENTAL)'),
103 104 ),
104 105 (
105 106 b'',
106 107 b'no-content',
107 108 None,
108 109 _(b'do not show file content (EXPERIMENTAL)'),
109 110 ),
110 111 (b'', b'no-follow', None, _(b"don't follow copies and renames")),
111 112 (
112 113 b'',
113 114 b'linear',
114 115 None,
115 116 _(
116 117 b'enforce linear history, ignore second parent '
117 118 b'of merges (EXPERIMENTAL)'
118 119 ),
119 120 ),
120 121 (
121 122 b'',
122 123 b'long-hash',
123 124 None,
124 125 _(b'show long changeset hash (EXPERIMENTAL)'),
125 126 ),
126 127 (
127 128 b'',
128 129 b'rebuild',
129 130 None,
130 131 _(b'rebuild cache even if it exists (EXPERIMENTAL)'),
131 132 ),
132 133 ]
133 134 + commands.diffwsopts
134 135 + commands.walkopts
135 136 + commands.formatteropts,
136 137 'synopsis': _(b'[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...'),
137 138 'inferrepo': True,
138 139 }
139 140
140 141
141 142 def fastannotate(ui, repo, *pats, **opts):
142 143 """show changeset information by line for each file
143 144
144 145 List changes in files, showing the revision id responsible for each line.
145 146
146 147 This command is useful for discovering when a change was made and by whom.
147 148
148 149 By default this command prints revision numbers. If you include --file,
149 150 --user, or --date, the revision number is suppressed unless you also
150 151 include --number. The default format can also be customized by setting
151 152 fastannotate.defaultformat.
152 153
153 154 Returns 0 on success.
154 155
155 156 .. container:: verbose
156 157
157 158 This command uses an implementation different from the vanilla annotate
158 159 command, which may produce slightly different (while still reasonable)
159 160 outputs for some cases.
160 161
161 162 Unlike the vanilla anootate, fastannotate follows rename regardless of
162 163 the existence of --file.
163 164
164 165 For the best performance when running on a full repo, use -c, -l,
165 166 avoid -u, -d, -n. Use --linear and --no-content to make it even faster.
166 167
167 168 For the best performance when running on a shallow (remotefilelog)
168 169 repo, avoid --linear, --no-follow, or any diff options. As the server
169 170 won't be able to populate annotate cache when non-default options
170 171 affecting results are used.
171 172 """
172 173 if not pats:
173 174 raise error.Abort(_(b'at least one filename or pattern is required'))
174 175
175 176 # performance hack: filtered repo can be slow. unfilter by default.
176 177 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
177 178 repo = repo.unfiltered()
178 179
179 180 opts = pycompat.byteskwargs(opts)
180 181
181 182 rev = opts.get(b'rev', b'.')
182 183 rebuild = opts.get(b'rebuild', False)
183 184
184 185 diffopts = patch.difffeatureopts(
185 186 ui, opts, section=b'annotate', whitespace=True
186 187 )
187 188 aopts = facontext.annotateopts(
188 189 diffopts=diffopts,
189 190 followmerge=not opts.get(b'linear', False),
190 191 followrename=not opts.get(b'no_follow', False),
191 192 )
192 193
193 194 if not any(
194 195 opts.get(s)
195 196 for s in [b'user', b'date', b'file', b'number', b'changeset']
196 197 ):
197 198 # default 'number' for compatibility. but fastannotate is more
198 199 # efficient with "changeset", "line-number" and "no-content".
199 200 for name in ui.configlist(
200 201 b'fastannotate', b'defaultformat', [b'number']
201 202 ):
202 203 opts[name] = True
203 204
204 205 ui.pager(b'fastannotate')
205 206 template = opts.get(b'template')
206 207 if template == b'json':
207 208 formatter = faformatter.jsonformatter(ui, repo, opts)
208 209 else:
209 210 formatter = faformatter.defaultformatter(ui, repo, opts)
210 211 showdeleted = opts.get(b'deleted', False)
211 212 showlines = not bool(opts.get(b'no_content'))
212 213 showpath = opts.get(b'file', False)
213 214
214 215 # find the head of the main (master) branch
215 216 master = ui.config(b'fastannotate', b'mainbranch') or rev
216 217
217 218 # paths will be used for prefetching and the real annotating
218 219 paths = list(_matchpaths(repo, rev, pats, opts, aopts))
219 220
220 221 # for client, prefetch from the server
221 222 if util.safehasattr(repo, 'prefetchfastannotate'):
222 223 repo.prefetchfastannotate(paths)
223 224
224 225 for path in paths:
225 226 result = lines = existinglines = None
226 227 while True:
227 228 try:
228 229 with facontext.annotatecontext(repo, path, aopts, rebuild) as a:
229 230 result = a.annotate(
230 231 rev,
231 232 master=master,
232 233 showpath=showpath,
233 234 showlines=(showlines and not showdeleted),
234 235 )
235 236 if showdeleted:
236 237 existinglines = {(l[0], l[1]) for l in result}
237 238 result = a.annotatealllines(
238 239 rev, showpath=showpath, showlines=showlines
239 240 )
240 241 break
241 242 except (faerror.CannotReuseError, faerror.CorruptedFileError):
242 243 # happens if master moves backwards, or the file was deleted
243 244 # and readded, or renamed to an existing name, or corrupted.
244 245 if rebuild: # give up since we have tried rebuild already
245 246 raise
246 247 else: # try a second time rebuilding the cache (slow)
247 248 rebuild = True
248 249 continue
249 250
250 251 if showlines:
251 252 result, lines = result
252 253
253 254 formatter.write(result, lines, existinglines=existinglines)
254 255 formatter.end()
255 256
256 257
257 258 _newopts = set()
258 259 _knownopts = {
259 260 opt[1].replace(b'-', b'_')
260 261 for opt in (fastannotatecommandargs['options'] + commands.globalopts)
261 262 }
262 263
263 264
264 265 def _annotatewrapper(orig, ui, repo, *pats, **opts):
265 266 """used by wrapdefault"""
266 267 # we need this hack until the obsstore has 0.0 seconds perf impact
267 268 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
268 269 repo = repo.unfiltered()
269 270
270 271 # treat the file as text (skip the isbinary check)
271 272 if ui.configbool(b'fastannotate', b'forcetext'):
272 273 opts['text'] = True
273 274
274 275 # check if we need to do prefetch (client-side)
275 276 rev = opts.get('rev')
276 277 if util.safehasattr(repo, 'prefetchfastannotate') and rev is not None:
277 278 paths = list(_matchpaths(repo, rev, pats, pycompat.byteskwargs(opts)))
278 279 repo.prefetchfastannotate(paths)
279 280
280 281 return orig(ui, repo, *pats, **opts)
281 282
282 283
283 284 def registercommand():
284 285 """register the fastannotate command"""
285 286 name = b'fastannotate|fastblame|fa'
286 287 command(name, helpbasic=True, **fastannotatecommandargs)(fastannotate)
287 288
288 289
289 290 def wrapdefault():
290 291 """wrap the default annotate command, to be aware of the protocol"""
291 292 extensions.wrapcommand(commands.table, b'annotate', _annotatewrapper)
292 293
293 294
294 295 @command(
295 296 b'debugbuildannotatecache',
296 297 [(b'r', b'rev', b'', _(b'build up to the specific revision'), _(b'REV'))]
297 298 + commands.walkopts,
298 299 _(b'[-r REV] FILE...'),
299 300 )
300 301 def debugbuildannotatecache(ui, repo, *pats, **opts):
301 302 """incrementally build fastannotate cache up to REV for specified files
302 303
303 304 If REV is not specified, use the config 'fastannotate.mainbranch'.
304 305
305 306 If fastannotate.client is True, download the annotate cache from the
306 307 server. Otherwise, build the annotate cache locally.
307 308
308 309 The annotate cache will be built using the default diff and follow
309 310 options and lives in '.hg/fastannotate/default'.
310 311 """
311 312 opts = pycompat.byteskwargs(opts)
312 313 rev = opts.get(b'REV') or ui.config(b'fastannotate', b'mainbranch')
313 314 if not rev:
314 315 raise error.Abort(
315 316 _(b'you need to provide a revision'),
316 317 hint=_(b'set fastannotate.mainbranch or use --rev'),
317 318 )
318 319 if ui.configbool(b'fastannotate', b'unfilteredrepo'):
319 320 repo = repo.unfiltered()
320 ctx = scmutil.revsingle(repo, rev)
321 ctx = logcmdutil.revsingle(repo, rev)
321 322 m = scmutil.match(ctx, pats, opts)
322 323 paths = list(ctx.walk(m))
323 324 if util.safehasattr(repo, 'prefetchfastannotate'):
324 325 # client
325 326 if opts.get(b'REV'):
326 327 raise error.Abort(_(b'--rev cannot be used for client'))
327 328 repo.prefetchfastannotate(paths)
328 329 else:
329 330 # server, or full repo
330 331 progress = ui.makeprogress(_(b'building'), total=len(paths))
331 332 for i, path in enumerate(paths):
332 333 progress.update(i)
333 334 with facontext.annotatecontext(repo, path) as actx:
334 335 try:
335 336 if actx.isuptodate(rev):
336 337 continue
337 338 actx.annotate(rev, rev)
338 339 except (faerror.CannotReuseError, faerror.CorruptedFileError):
339 340 # the cache is broken (could happen with renaming so the
340 341 # file history gets invalidated). rebuild and try again.
341 342 ui.debug(
342 343 b'fastannotate: %s: rebuilding broken cache\n' % path
343 344 )
344 345 actx.rebuild()
345 346 try:
346 347 actx.annotate(rev, rev)
347 348 except Exception as ex:
348 349 # possibly a bug, but should not stop us from building
349 350 # cache for other files.
350 351 ui.warn(
351 352 _(
352 353 b'fastannotate: %s: failed to '
353 354 b'build cache: %r\n'
354 355 )
355 356 % (path, ex)
356 357 )
357 358 progress.complete()
@@ -1,1869 +1,1869 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 10 from __future__ import absolute_import
11 11
12 12 import copy
13 13 import os
14 14
15 15 from mercurial.i18n import _
16 16
17 17 from mercurial.pycompat import open
18 18
19 19 from mercurial.hgweb import webcommands
20 20
21 21 from mercurial import (
22 22 archival,
23 23 cmdutil,
24 24 copies as copiesmod,
25 25 error,
26 26 exchange,
27 27 extensions,
28 28 exthelper,
29 29 filemerge,
30 30 hg,
31 31 logcmdutil,
32 32 match as matchmod,
33 33 merge,
34 34 mergestate as mergestatemod,
35 35 pathutil,
36 36 pycompat,
37 37 scmutil,
38 38 smartset,
39 39 subrepo,
40 40 url as urlmod,
41 41 util,
42 42 )
43 43
44 44 from mercurial.upgrade_utils import (
45 45 actions as upgrade_actions,
46 46 )
47 47
48 48 from . import (
49 49 lfcommands,
50 50 lfutil,
51 51 storefactory,
52 52 )
53 53
54 54 eh = exthelper.exthelper()
55 55
56 56 lfstatus = lfutil.lfstatus
57 57
58 58 MERGE_ACTION_LARGEFILE_MARK_REMOVED = b'lfmr'
59 59
60 60 # -- Utility functions: commonly/repeatedly needed functionality ---------------
61 61
62 62
63 63 def composelargefilematcher(match, manifest):
64 64 """create a matcher that matches only the largefiles in the original
65 65 matcher"""
66 66 m = copy.copy(match)
67 67 lfile = lambda f: lfutil.standin(f) in manifest
68 68 m._files = [lf for lf in m._files if lfile(lf)]
69 69 m._fileset = set(m._files)
70 70 m.always = lambda: False
71 71 origmatchfn = m.matchfn
72 72 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
73 73 return m
74 74
75 75
76 76 def composenormalfilematcher(match, manifest, exclude=None):
77 77 excluded = set()
78 78 if exclude is not None:
79 79 excluded.update(exclude)
80 80
81 81 m = copy.copy(match)
82 82 notlfile = lambda f: not (
83 83 lfutil.isstandin(f) or lfutil.standin(f) in manifest or f in excluded
84 84 )
85 85 m._files = [lf for lf in m._files if notlfile(lf)]
86 86 m._fileset = set(m._files)
87 87 m.always = lambda: False
88 88 origmatchfn = m.matchfn
89 89 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
90 90 return m
91 91
92 92
93 93 def addlargefiles(ui, repo, isaddremove, matcher, uipathfn, **opts):
94 94 large = opts.get('large')
95 95 lfsize = lfutil.getminsize(
96 96 ui, lfutil.islfilesrepo(repo), opts.get('lfsize')
97 97 )
98 98
99 99 lfmatcher = None
100 100 if lfutil.islfilesrepo(repo):
101 101 lfpats = ui.configlist(lfutil.longname, b'patterns')
102 102 if lfpats:
103 103 lfmatcher = matchmod.match(repo.root, b'', list(lfpats))
104 104
105 105 lfnames = []
106 106 m = matcher
107 107
108 108 wctx = repo[None]
109 109 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
110 110 exact = m.exact(f)
111 111 lfile = lfutil.standin(f) in wctx
112 112 nfile = f in wctx
113 113 exists = lfile or nfile
114 114
115 115 # Don't warn the user when they attempt to add a normal tracked file.
116 116 # The normal add code will do that for us.
117 117 if exact and exists:
118 118 if lfile:
119 119 ui.warn(_(b'%s already a largefile\n') % uipathfn(f))
120 120 continue
121 121
122 122 if (exact or not exists) and not lfutil.isstandin(f):
123 123 # In case the file was removed previously, but not committed
124 124 # (issue3507)
125 125 if not repo.wvfs.exists(f):
126 126 continue
127 127
128 128 abovemin = (
129 129 lfsize and repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024
130 130 )
131 131 if large or abovemin or (lfmatcher and lfmatcher(f)):
132 132 lfnames.append(f)
133 133 if ui.verbose or not exact:
134 134 ui.status(_(b'adding %s as a largefile\n') % uipathfn(f))
135 135
136 136 bad = []
137 137
138 138 # Need to lock, otherwise there could be a race condition between
139 139 # when standins are created and added to the repo.
140 140 with repo.wlock():
141 141 if not opts.get('dry_run'):
142 142 standins = []
143 143 lfdirstate = lfutil.openlfdirstate(ui, repo)
144 144 for f in lfnames:
145 145 standinname = lfutil.standin(f)
146 146 lfutil.writestandin(
147 147 repo,
148 148 standinname,
149 149 hash=b'',
150 150 executable=lfutil.getexecutable(repo.wjoin(f)),
151 151 )
152 152 standins.append(standinname)
153 153 lfdirstate.set_tracked(f)
154 154 lfdirstate.write()
155 155 bad += [
156 156 lfutil.splitstandin(f)
157 157 for f in repo[None].add(standins)
158 158 if f in m.files()
159 159 ]
160 160
161 161 added = [f for f in lfnames if f not in bad]
162 162 return added, bad
163 163
164 164
165 165 def removelargefiles(ui, repo, isaddremove, matcher, uipathfn, dryrun, **opts):
166 166 after = opts.get('after')
167 167 m = composelargefilematcher(matcher, repo[None].manifest())
168 168 with lfstatus(repo):
169 169 s = repo.status(match=m, clean=not isaddremove)
170 170 manifest = repo[None].manifest()
171 171 modified, added, deleted, clean = [
172 172 [f for f in list if lfutil.standin(f) in manifest]
173 173 for list in (s.modified, s.added, s.deleted, s.clean)
174 174 ]
175 175
176 176 def warn(files, msg):
177 177 for f in files:
178 178 ui.warn(msg % uipathfn(f))
179 179 return int(len(files) > 0)
180 180
181 181 if after:
182 182 remove = deleted
183 183 result = warn(
184 184 modified + added + clean, _(b'not removing %s: file still exists\n')
185 185 )
186 186 else:
187 187 remove = deleted + clean
188 188 result = warn(
189 189 modified,
190 190 _(
191 191 b'not removing %s: file is modified (use -f'
192 192 b' to force removal)\n'
193 193 ),
194 194 )
195 195 result = (
196 196 warn(
197 197 added,
198 198 _(
199 199 b'not removing %s: file has been marked for add'
200 200 b' (use forget to undo)\n'
201 201 ),
202 202 )
203 203 or result
204 204 )
205 205
206 206 # Need to lock because standin files are deleted then removed from the
207 207 # repository and we could race in-between.
208 208 with repo.wlock():
209 209 lfdirstate = lfutil.openlfdirstate(ui, repo)
210 210 for f in sorted(remove):
211 211 if ui.verbose or not m.exact(f):
212 212 ui.status(_(b'removing %s\n') % uipathfn(f))
213 213
214 214 if not dryrun:
215 215 if not after:
216 216 repo.wvfs.unlinkpath(f, ignoremissing=True)
217 217
218 218 if dryrun:
219 219 return result
220 220
221 221 remove = [lfutil.standin(f) for f in remove]
222 222 # If this is being called by addremove, let the original addremove
223 223 # function handle this.
224 224 if not isaddremove:
225 225 for f in remove:
226 226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227 227 repo[None].forget(remove)
228 228
229 229 for f in remove:
230 230 lfdirstate.set_untracked(lfutil.splitstandin(f))
231 231
232 232 lfdirstate.write()
233 233
234 234 return result
235 235
236 236
237 237 # For overriding mercurial.hgweb.webcommands so that largefiles will
238 238 # appear at their right place in the manifests.
239 239 @eh.wrapfunction(webcommands, b'decodepath')
240 240 def decodepath(orig, path):
241 241 return lfutil.splitstandin(path) or path
242 242
243 243
244 244 # -- Wrappers: modify existing commands --------------------------------
245 245
246 246
247 247 @eh.wrapcommand(
248 248 b'add',
249 249 opts=[
250 250 (b'', b'large', None, _(b'add as largefile')),
251 251 (b'', b'normal', None, _(b'add as normal file')),
252 252 (
253 253 b'',
254 254 b'lfsize',
255 255 b'',
256 256 _(
257 257 b'add all files above this size (in megabytes) '
258 258 b'as largefiles (default: 10)'
259 259 ),
260 260 ),
261 261 ],
262 262 )
263 263 def overrideadd(orig, ui, repo, *pats, **opts):
264 264 if opts.get('normal') and opts.get('large'):
265 265 raise error.Abort(_(b'--normal cannot be used with --large'))
266 266 return orig(ui, repo, *pats, **opts)
267 267
268 268
269 269 @eh.wrapfunction(cmdutil, b'add')
270 270 def cmdutiladd(orig, ui, repo, matcher, prefix, uipathfn, explicitonly, **opts):
271 271 # The --normal flag short circuits this override
272 272 if opts.get('normal'):
273 273 return orig(ui, repo, matcher, prefix, uipathfn, explicitonly, **opts)
274 274
275 275 ladded, lbad = addlargefiles(ui, repo, False, matcher, uipathfn, **opts)
276 276 normalmatcher = composenormalfilematcher(
277 277 matcher, repo[None].manifest(), ladded
278 278 )
279 279 bad = orig(ui, repo, normalmatcher, prefix, uipathfn, explicitonly, **opts)
280 280
281 281 bad.extend(f for f in lbad)
282 282 return bad
283 283
284 284
285 285 @eh.wrapfunction(cmdutil, b'remove')
286 286 def cmdutilremove(
287 287 orig, ui, repo, matcher, prefix, uipathfn, after, force, subrepos, dryrun
288 288 ):
289 289 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
290 290 result = orig(
291 291 ui,
292 292 repo,
293 293 normalmatcher,
294 294 prefix,
295 295 uipathfn,
296 296 after,
297 297 force,
298 298 subrepos,
299 299 dryrun,
300 300 )
301 301 return (
302 302 removelargefiles(
303 303 ui, repo, False, matcher, uipathfn, dryrun, after=after, force=force
304 304 )
305 305 or result
306 306 )
307 307
308 308
309 309 @eh.wrapfunction(subrepo.hgsubrepo, b'status')
310 310 def overridestatusfn(orig, repo, rev2, **opts):
311 311 with lfstatus(repo._repo):
312 312 return orig(repo, rev2, **opts)
313 313
314 314
315 315 @eh.wrapcommand(b'status')
316 316 def overridestatus(orig, ui, repo, *pats, **opts):
317 317 with lfstatus(repo):
318 318 return orig(ui, repo, *pats, **opts)
319 319
320 320
321 321 @eh.wrapfunction(subrepo.hgsubrepo, b'dirty')
322 322 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
323 323 with lfstatus(repo._repo):
324 324 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
325 325
326 326
327 327 @eh.wrapcommand(b'log')
328 328 def overridelog(orig, ui, repo, *pats, **opts):
329 329 def overridematchandpats(
330 330 orig,
331 331 ctx,
332 332 pats=(),
333 333 opts=None,
334 334 globbed=False,
335 335 default=b'relpath',
336 336 badfn=None,
337 337 ):
338 338 """Matcher that merges root directory with .hglf, suitable for log.
339 339 It is still possible to match .hglf directly.
340 340 For any listed files run log on the standin too.
341 341 matchfn tries both the given filename and with .hglf stripped.
342 342 """
343 343 if opts is None:
344 344 opts = {}
345 345 matchandpats = orig(ctx, pats, opts, globbed, default, badfn=badfn)
346 346 m, p = copy.copy(matchandpats)
347 347
348 348 if m.always():
349 349 # We want to match everything anyway, so there's no benefit trying
350 350 # to add standins.
351 351 return matchandpats
352 352
353 353 pats = set(p)
354 354
355 355 def fixpats(pat, tostandin=lfutil.standin):
356 356 if pat.startswith(b'set:'):
357 357 return pat
358 358
359 359 kindpat = matchmod._patsplit(pat, None)
360 360
361 361 if kindpat[0] is not None:
362 362 return kindpat[0] + b':' + tostandin(kindpat[1])
363 363 return tostandin(kindpat[1])
364 364
365 365 cwd = repo.getcwd()
366 366 if cwd:
367 367 hglf = lfutil.shortname
368 368 back = util.pconvert(repo.pathto(hglf)[: -len(hglf)])
369 369
370 370 def tostandin(f):
371 371 # The file may already be a standin, so truncate the back
372 372 # prefix and test before mangling it. This avoids turning
373 373 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
374 374 if f.startswith(back) and lfutil.splitstandin(f[len(back) :]):
375 375 return f
376 376
377 377 # An absolute path is from outside the repo, so truncate the
378 378 # path to the root before building the standin. Otherwise cwd
379 379 # is somewhere in the repo, relative to root, and needs to be
380 380 # prepended before building the standin.
381 381 if os.path.isabs(cwd):
382 382 f = f[len(back) :]
383 383 else:
384 384 f = cwd + b'/' + f
385 385 return back + lfutil.standin(f)
386 386
387 387 else:
388 388
389 389 def tostandin(f):
390 390 if lfutil.isstandin(f):
391 391 return f
392 392 return lfutil.standin(f)
393 393
394 394 pats.update(fixpats(f, tostandin) for f in p)
395 395
396 396 for i in range(0, len(m._files)):
397 397 # Don't add '.hglf' to m.files, since that is already covered by '.'
398 398 if m._files[i] == b'.':
399 399 continue
400 400 standin = lfutil.standin(m._files[i])
401 401 # If the "standin" is a directory, append instead of replace to
402 402 # support naming a directory on the command line with only
403 403 # largefiles. The original directory is kept to support normal
404 404 # files.
405 405 if standin in ctx:
406 406 m._files[i] = standin
407 407 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
408 408 m._files.append(standin)
409 409
410 410 m._fileset = set(m._files)
411 411 m.always = lambda: False
412 412 origmatchfn = m.matchfn
413 413
414 414 def lfmatchfn(f):
415 415 lf = lfutil.splitstandin(f)
416 416 if lf is not None and origmatchfn(lf):
417 417 return True
418 418 r = origmatchfn(f)
419 419 return r
420 420
421 421 m.matchfn = lfmatchfn
422 422
423 423 ui.debug(b'updated patterns: %s\n' % b', '.join(sorted(pats)))
424 424 return m, pats
425 425
426 426 # For hg log --patch, the match object is used in two different senses:
427 427 # (1) to determine what revisions should be printed out, and
428 428 # (2) to determine what files to print out diffs for.
429 429 # The magic matchandpats override should be used for case (1) but not for
430 430 # case (2).
431 431 oldmatchandpats = scmutil.matchandpats
432 432
433 433 def overridemakefilematcher(orig, repo, pats, opts, badfn=None):
434 434 wctx = repo[None]
435 435 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
436 436 return lambda ctx: match
437 437
438 438 wrappedmatchandpats = extensions.wrappedfunction(
439 439 scmutil, b'matchandpats', overridematchandpats
440 440 )
441 441 wrappedmakefilematcher = extensions.wrappedfunction(
442 442 logcmdutil, b'_makenofollowfilematcher', overridemakefilematcher
443 443 )
444 444 with wrappedmatchandpats, wrappedmakefilematcher:
445 445 return orig(ui, repo, *pats, **opts)
446 446
447 447
448 448 @eh.wrapcommand(
449 449 b'verify',
450 450 opts=[
451 451 (
452 452 b'',
453 453 b'large',
454 454 None,
455 455 _(b'verify that all largefiles in current revision exists'),
456 456 ),
457 457 (
458 458 b'',
459 459 b'lfa',
460 460 None,
461 461 _(b'verify largefiles in all revisions, not just current'),
462 462 ),
463 463 (
464 464 b'',
465 465 b'lfc',
466 466 None,
467 467 _(b'verify local largefile contents, not just existence'),
468 468 ),
469 469 ],
470 470 )
471 471 def overrideverify(orig, ui, repo, *pats, **opts):
472 472 large = opts.pop('large', False)
473 473 all = opts.pop('lfa', False)
474 474 contents = opts.pop('lfc', False)
475 475
476 476 result = orig(ui, repo, *pats, **opts)
477 477 if large or all or contents:
478 478 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
479 479 return result
480 480
481 481
482 482 @eh.wrapcommand(
483 483 b'debugstate',
484 484 opts=[(b'', b'large', None, _(b'display largefiles dirstate'))],
485 485 )
486 486 def overridedebugstate(orig, ui, repo, *pats, **opts):
487 487 large = opts.pop('large', False)
488 488 if large:
489 489
490 490 class fakerepo(object):
491 491 dirstate = lfutil.openlfdirstate(ui, repo)
492 492
493 493 orig(ui, fakerepo, *pats, **opts)
494 494 else:
495 495 orig(ui, repo, *pats, **opts)
496 496
497 497
498 498 # Before starting the manifest merge, merge.updates will call
499 499 # _checkunknownfile to check if there are any files in the merged-in
500 500 # changeset that collide with unknown files in the working copy.
501 501 #
502 502 # The largefiles are seen as unknown, so this prevents us from merging
503 503 # in a file 'foo' if we already have a largefile with the same name.
504 504 #
505 505 # The overridden function filters the unknown files by removing any
506 506 # largefiles. This makes the merge proceed and we can then handle this
507 507 # case further in the overridden calculateupdates function below.
508 508 @eh.wrapfunction(merge, b'_checkunknownfile')
509 509 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
510 510 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
511 511 return False
512 512 return origfn(repo, wctx, mctx, f, f2)
513 513
514 514
515 515 # The manifest merge handles conflicts on the manifest level. We want
516 516 # to handle changes in largefile-ness of files at this level too.
517 517 #
518 518 # The strategy is to run the original calculateupdates and then process
519 519 # the action list it outputs. There are two cases we need to deal with:
520 520 #
521 521 # 1. Normal file in p1, largefile in p2. Here the largefile is
522 522 # detected via its standin file, which will enter the working copy
523 523 # with a "get" action. It is not "merge" since the standin is all
524 524 # Mercurial is concerned with at this level -- the link to the
525 525 # existing normal file is not relevant here.
526 526 #
527 527 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
528 528 # since the largefile will be present in the working copy and
529 529 # different from the normal file in p2. Mercurial therefore
530 530 # triggers a merge action.
531 531 #
532 532 # In both cases, we prompt the user and emit new actions to either
533 533 # remove the standin (if the normal file was kept) or to remove the
534 534 # normal file and get the standin (if the largefile was kept). The
535 535 # default prompt answer is to use the largefile version since it was
536 536 # presumably changed on purpose.
537 537 #
538 538 # Finally, the merge.applyupdates function will then take care of
539 539 # writing the files into the working copy and lfcommands.updatelfiles
540 540 # will update the largefiles.
541 541 @eh.wrapfunction(merge, b'calculateupdates')
542 542 def overridecalculateupdates(
543 543 origfn, repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
544 544 ):
545 545 overwrite = force and not branchmerge
546 546 mresult = origfn(
547 547 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs
548 548 )
549 549
550 550 if overwrite:
551 551 return mresult
552 552
553 553 # Convert to dictionary with filename as key and action as value.
554 554 lfiles = set()
555 555 for f in mresult.files():
556 556 splitstandin = lfutil.splitstandin(f)
557 557 if splitstandin is not None and splitstandin in p1:
558 558 lfiles.add(splitstandin)
559 559 elif lfutil.standin(f) in p1:
560 560 lfiles.add(f)
561 561
562 562 for lfile in sorted(lfiles):
563 563 standin = lfutil.standin(lfile)
564 564 (lm, largs, lmsg) = mresult.getfile(lfile, (None, None, None))
565 565 (sm, sargs, smsg) = mresult.getfile(standin, (None, None, None))
566 566 if sm in (b'g', b'dc') and lm != b'r':
567 567 if sm == b'dc':
568 568 f1, f2, fa, move, anc = sargs
569 569 sargs = (p2[f2].flags(), False)
570 570 # Case 1: normal file in the working copy, largefile in
571 571 # the second parent
572 572 usermsg = (
573 573 _(
574 574 b'remote turned local normal file %s into a largefile\n'
575 575 b'use (l)argefile or keep (n)ormal file?'
576 576 b'$$ &Largefile $$ &Normal file'
577 577 )
578 578 % lfile
579 579 )
580 580 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
581 581 mresult.addfile(lfile, b'r', None, b'replaced by standin')
582 582 mresult.addfile(standin, b'g', sargs, b'replaces standin')
583 583 else: # keep local normal file
584 584 mresult.addfile(lfile, b'k', None, b'replaces standin')
585 585 if branchmerge:
586 586 mresult.addfile(
587 587 standin,
588 588 b'k',
589 589 None,
590 590 b'replaced by non-standin',
591 591 )
592 592 else:
593 593 mresult.addfile(
594 594 standin,
595 595 b'r',
596 596 None,
597 597 b'replaced by non-standin',
598 598 )
599 599 elif lm in (b'g', b'dc') and sm != b'r':
600 600 if lm == b'dc':
601 601 f1, f2, fa, move, anc = largs
602 602 largs = (p2[f2].flags(), False)
603 603 # Case 2: largefile in the working copy, normal file in
604 604 # the second parent
605 605 usermsg = (
606 606 _(
607 607 b'remote turned local largefile %s into a normal file\n'
608 608 b'keep (l)argefile or use (n)ormal file?'
609 609 b'$$ &Largefile $$ &Normal file'
610 610 )
611 611 % lfile
612 612 )
613 613 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
614 614 if branchmerge:
615 615 # largefile can be restored from standin safely
616 616 mresult.addfile(
617 617 lfile,
618 618 b'k',
619 619 None,
620 620 b'replaced by standin',
621 621 )
622 622 mresult.addfile(standin, b'k', None, b'replaces standin')
623 623 else:
624 624 # "lfile" should be marked as "removed" without
625 625 # removal of itself
626 626 mresult.addfile(
627 627 lfile,
628 628 MERGE_ACTION_LARGEFILE_MARK_REMOVED,
629 629 None,
630 630 b'forget non-standin largefile',
631 631 )
632 632
633 633 # linear-merge should treat this largefile as 're-added'
634 634 mresult.addfile(standin, b'a', None, b'keep standin')
635 635 else: # pick remote normal file
636 636 mresult.addfile(lfile, b'g', largs, b'replaces standin')
637 637 mresult.addfile(
638 638 standin,
639 639 b'r',
640 640 None,
641 641 b'replaced by non-standin',
642 642 )
643 643
644 644 return mresult
645 645
646 646
647 647 @eh.wrapfunction(mergestatemod, b'recordupdates')
648 648 def mergerecordupdates(orig, repo, actions, branchmerge, getfiledata):
649 649 if MERGE_ACTION_LARGEFILE_MARK_REMOVED in actions:
650 650 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
651 651 with lfdirstate.parentchange():
652 652 for lfile, args, msg in actions[
653 653 MERGE_ACTION_LARGEFILE_MARK_REMOVED
654 654 ]:
655 655 # this should be executed before 'orig', to execute 'remove'
656 656 # before all other actions
657 657 repo.dirstate.update_file(
658 658 lfile, p1_tracked=True, wc_tracked=False
659 659 )
660 660 # make sure lfile doesn't get synclfdirstate'd as normal
661 661 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
662 662 lfdirstate.write()
663 663
664 664 return orig(repo, actions, branchmerge, getfiledata)
665 665
666 666
667 667 # Override filemerge to prompt the user about how they wish to merge
668 668 # largefiles. This will handle identical edits without prompting the user.
669 669 @eh.wrapfunction(filemerge, b'_filemerge')
670 670 def overridefilemerge(
671 671 origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=None
672 672 ):
673 673 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
674 674 return origfn(
675 675 premerge, repo, wctx, mynode, orig, fcd, fco, fca, labels=labels
676 676 )
677 677
678 678 ahash = lfutil.readasstandin(fca).lower()
679 679 dhash = lfutil.readasstandin(fcd).lower()
680 680 ohash = lfutil.readasstandin(fco).lower()
681 681 if (
682 682 ohash != ahash
683 683 and ohash != dhash
684 684 and (
685 685 dhash == ahash
686 686 or repo.ui.promptchoice(
687 687 _(
688 688 b'largefile %s has a merge conflict\nancestor was %s\n'
689 689 b'you can keep (l)ocal %s or take (o)ther %s.\n'
690 690 b'what do you want to do?'
691 691 b'$$ &Local $$ &Other'
692 692 )
693 693 % (lfutil.splitstandin(orig), ahash, dhash, ohash),
694 694 0,
695 695 )
696 696 == 1
697 697 )
698 698 ):
699 699 repo.wwrite(fcd.path(), fco.data(), fco.flags())
700 700 return True, 0, False
701 701
702 702
703 703 @eh.wrapfunction(copiesmod, b'pathcopies')
704 704 def copiespathcopies(orig, ctx1, ctx2, match=None):
705 705 copies = orig(ctx1, ctx2, match=match)
706 706 updated = {}
707 707
708 708 for k, v in pycompat.iteritems(copies):
709 709 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
710 710
711 711 return updated
712 712
713 713
714 714 # Copy first changes the matchers to match standins instead of
715 715 # largefiles. Then it overrides util.copyfile in that function it
716 716 # checks if the destination largefile already exists. It also keeps a
717 717 # list of copied files so that the largefiles can be copied and the
718 718 # dirstate updated.
719 719 @eh.wrapfunction(cmdutil, b'copy')
720 720 def overridecopy(orig, ui, repo, pats, opts, rename=False):
721 721 # doesn't remove largefile on rename
722 722 if len(pats) < 2:
723 723 # this isn't legal, let the original function deal with it
724 724 return orig(ui, repo, pats, opts, rename)
725 725
726 726 # This could copy both lfiles and normal files in one command,
727 727 # but we don't want to do that. First replace their matcher to
728 728 # only match normal files and run it, then replace it to just
729 729 # match largefiles and run it again.
730 730 nonormalfiles = False
731 731 nolfiles = False
732 732 manifest = repo[None].manifest()
733 733
734 734 def normalfilesmatchfn(
735 735 orig,
736 736 ctx,
737 737 pats=(),
738 738 opts=None,
739 739 globbed=False,
740 740 default=b'relpath',
741 741 badfn=None,
742 742 ):
743 743 if opts is None:
744 744 opts = {}
745 745 match = orig(ctx, pats, opts, globbed, default, badfn=badfn)
746 746 return composenormalfilematcher(match, manifest)
747 747
748 748 with extensions.wrappedfunction(scmutil, b'match', normalfilesmatchfn):
749 749 try:
750 750 result = orig(ui, repo, pats, opts, rename)
751 751 except error.Abort as e:
752 752 if e.message != _(b'no files to copy'):
753 753 raise e
754 754 else:
755 755 nonormalfiles = True
756 756 result = 0
757 757
758 758 # The first rename can cause our current working directory to be removed.
759 759 # In that case there is nothing left to copy/rename so just quit.
760 760 try:
761 761 repo.getcwd()
762 762 except OSError:
763 763 return result
764 764
765 765 def makestandin(relpath):
766 766 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
767 767 return repo.wvfs.join(lfutil.standin(path))
768 768
769 769 fullpats = scmutil.expandpats(pats)
770 770 dest = fullpats[-1]
771 771
772 772 if os.path.isdir(dest):
773 773 if not os.path.isdir(makestandin(dest)):
774 774 os.makedirs(makestandin(dest))
775 775
776 776 try:
777 777 # When we call orig below it creates the standins but we don't add
778 778 # them to the dir state until later so lock during that time.
779 779 wlock = repo.wlock()
780 780
781 781 manifest = repo[None].manifest()
782 782
783 783 def overridematch(
784 784 orig,
785 785 ctx,
786 786 pats=(),
787 787 opts=None,
788 788 globbed=False,
789 789 default=b'relpath',
790 790 badfn=None,
791 791 ):
792 792 if opts is None:
793 793 opts = {}
794 794 newpats = []
795 795 # The patterns were previously mangled to add the standin
796 796 # directory; we need to remove that now
797 797 for pat in pats:
798 798 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
799 799 newpats.append(pat.replace(lfutil.shortname, b''))
800 800 else:
801 801 newpats.append(pat)
802 802 match = orig(ctx, newpats, opts, globbed, default, badfn=badfn)
803 803 m = copy.copy(match)
804 804 lfile = lambda f: lfutil.standin(f) in manifest
805 805 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
806 806 m._fileset = set(m._files)
807 807 origmatchfn = m.matchfn
808 808
809 809 def matchfn(f):
810 810 lfile = lfutil.splitstandin(f)
811 811 return (
812 812 lfile is not None
813 813 and (f in manifest)
814 814 and origmatchfn(lfile)
815 815 or None
816 816 )
817 817
818 818 m.matchfn = matchfn
819 819 return m
820 820
821 821 listpats = []
822 822 for pat in pats:
823 823 if matchmod.patkind(pat) is not None:
824 824 listpats.append(pat)
825 825 else:
826 826 listpats.append(makestandin(pat))
827 827
828 828 copiedfiles = []
829 829
830 830 def overridecopyfile(orig, src, dest, *args, **kwargs):
831 831 if lfutil.shortname in src and dest.startswith(
832 832 repo.wjoin(lfutil.shortname)
833 833 ):
834 834 destlfile = dest.replace(lfutil.shortname, b'')
835 835 if not opts[b'force'] and os.path.exists(destlfile):
836 836 raise IOError(
837 837 b'', _(b'destination largefile already exists')
838 838 )
839 839 copiedfiles.append((src, dest))
840 840 orig(src, dest, *args, **kwargs)
841 841
842 842 with extensions.wrappedfunction(util, b'copyfile', overridecopyfile):
843 843 with extensions.wrappedfunction(scmutil, b'match', overridematch):
844 844 result += orig(ui, repo, listpats, opts, rename)
845 845
846 846 lfdirstate = lfutil.openlfdirstate(ui, repo)
847 847 for (src, dest) in copiedfiles:
848 848 if lfutil.shortname in src and dest.startswith(
849 849 repo.wjoin(lfutil.shortname)
850 850 ):
851 851 srclfile = src.replace(repo.wjoin(lfutil.standin(b'')), b'')
852 852 destlfile = dest.replace(repo.wjoin(lfutil.standin(b'')), b'')
853 853 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or b'.'
854 854 if not os.path.isdir(destlfiledir):
855 855 os.makedirs(destlfiledir)
856 856 if rename:
857 857 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
858 858
859 859 # The file is gone, but this deletes any empty parent
860 860 # directories as a side-effect.
861 861 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
862 862 lfdirstate.set_untracked(srclfile)
863 863 else:
864 864 util.copyfile(repo.wjoin(srclfile), repo.wjoin(destlfile))
865 865
866 866 lfdirstate.set_tracked(destlfile)
867 867 lfdirstate.write()
868 868 except error.Abort as e:
869 869 if e.message != _(b'no files to copy'):
870 870 raise e
871 871 else:
872 872 nolfiles = True
873 873 finally:
874 874 wlock.release()
875 875
876 876 if nolfiles and nonormalfiles:
877 877 raise error.Abort(_(b'no files to copy'))
878 878
879 879 return result
880 880
881 881
882 882 # When the user calls revert, we have to be careful to not revert any
883 883 # changes to other largefiles accidentally. This means we have to keep
884 884 # track of the largefiles that are being reverted so we only pull down
885 885 # the necessary largefiles.
886 886 #
887 887 # Standins are only updated (to match the hash of largefiles) before
888 888 # commits. Update the standins then run the original revert, changing
889 889 # the matcher to hit standins instead of largefiles. Based on the
890 890 # resulting standins update the largefiles.
891 891 @eh.wrapfunction(cmdutil, b'revert')
892 892 def overriderevert(orig, ui, repo, ctx, *pats, **opts):
893 893 # Because we put the standins in a bad state (by updating them)
894 894 # and then return them to a correct state we need to lock to
895 895 # prevent others from changing them in their incorrect state.
896 896 with repo.wlock():
897 897 lfdirstate = lfutil.openlfdirstate(ui, repo)
898 898 s = lfutil.lfdirstatestatus(lfdirstate, repo)
899 899 lfdirstate.write()
900 900 for lfile in s.modified:
901 901 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
902 902 for lfile in s.deleted:
903 903 fstandin = lfutil.standin(lfile)
904 904 if repo.wvfs.exists(fstandin):
905 905 repo.wvfs.unlink(fstandin)
906 906
907 907 oldstandins = lfutil.getstandinsstate(repo)
908 908
909 909 def overridematch(
910 910 orig,
911 911 mctx,
912 912 pats=(),
913 913 opts=None,
914 914 globbed=False,
915 915 default=b'relpath',
916 916 badfn=None,
917 917 ):
918 918 if opts is None:
919 919 opts = {}
920 920 match = orig(mctx, pats, opts, globbed, default, badfn=badfn)
921 921 m = copy.copy(match)
922 922
923 923 # revert supports recursing into subrepos, and though largefiles
924 924 # currently doesn't work correctly in that case, this match is
925 925 # called, so the lfdirstate above may not be the correct one for
926 926 # this invocation of match.
927 927 lfdirstate = lfutil.openlfdirstate(
928 928 mctx.repo().ui, mctx.repo(), False
929 929 )
930 930
931 931 wctx = repo[None]
932 932 matchfiles = []
933 933 for f in m._files:
934 934 standin = lfutil.standin(f)
935 935 if standin in ctx or standin in mctx:
936 936 matchfiles.append(standin)
937 937 elif standin in wctx or lfdirstate.get_entry(f).removed:
938 938 continue
939 939 else:
940 940 matchfiles.append(f)
941 941 m._files = matchfiles
942 942 m._fileset = set(m._files)
943 943 origmatchfn = m.matchfn
944 944
945 945 def matchfn(f):
946 946 lfile = lfutil.splitstandin(f)
947 947 if lfile is not None:
948 948 return origmatchfn(lfile) and (f in ctx or f in mctx)
949 949 return origmatchfn(f)
950 950
951 951 m.matchfn = matchfn
952 952 return m
953 953
954 954 with extensions.wrappedfunction(scmutil, b'match', overridematch):
955 955 orig(ui, repo, ctx, *pats, **opts)
956 956
957 957 newstandins = lfutil.getstandinsstate(repo)
958 958 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
959 959 # lfdirstate should be 'normallookup'-ed for updated files,
960 960 # because reverting doesn't touch dirstate for 'normal' files
961 961 # when target revision is explicitly specified: in such case,
962 962 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
963 963 # of target (standin) file.
964 964 lfcommands.updatelfiles(
965 965 ui, repo, filelist, printmessage=False, normallookup=True
966 966 )
967 967
968 968
969 969 # after pulling changesets, we need to take some extra care to get
970 970 # largefiles updated remotely
971 971 @eh.wrapcommand(
972 972 b'pull',
973 973 opts=[
974 974 (
975 975 b'',
976 976 b'all-largefiles',
977 977 None,
978 978 _(b'download all pulled versions of largefiles (DEPRECATED)'),
979 979 ),
980 980 (
981 981 b'',
982 982 b'lfrev',
983 983 [],
984 984 _(b'download largefiles for these revisions'),
985 985 _(b'REV'),
986 986 ),
987 987 ],
988 988 )
989 989 def overridepull(orig, ui, repo, source=None, **opts):
990 990 revsprepull = len(repo)
991 991 if not source:
992 992 source = b'default'
993 993 repo.lfpullsource = source
994 994 result = orig(ui, repo, source, **opts)
995 995 revspostpull = len(repo)
996 996 lfrevs = opts.get('lfrev', [])
997 997 if opts.get('all_largefiles'):
998 998 lfrevs.append(b'pulled()')
999 999 if lfrevs and revspostpull > revsprepull:
1000 1000 numcached = 0
1001 1001 repo.firstpulled = revsprepull # for pulled() revset expression
1002 1002 try:
1003 1003 for rev in logcmdutil.revrange(repo, lfrevs):
1004 1004 ui.note(_(b'pulling largefiles for revision %d\n') % rev)
1005 1005 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
1006 1006 numcached += len(cached)
1007 1007 finally:
1008 1008 del repo.firstpulled
1009 1009 ui.status(_(b"%d largefiles cached\n") % numcached)
1010 1010 return result
1011 1011
1012 1012
1013 1013 @eh.wrapcommand(
1014 1014 b'push',
1015 1015 opts=[
1016 1016 (
1017 1017 b'',
1018 1018 b'lfrev',
1019 1019 [],
1020 1020 _(b'upload largefiles for these revisions'),
1021 1021 _(b'REV'),
1022 1022 )
1023 1023 ],
1024 1024 )
1025 1025 def overridepush(orig, ui, repo, *args, **kwargs):
1026 1026 """Override push command and store --lfrev parameters in opargs"""
1027 1027 lfrevs = kwargs.pop('lfrev', None)
1028 1028 if lfrevs:
1029 1029 opargs = kwargs.setdefault('opargs', {})
1030 1030 opargs[b'lfrevs'] = logcmdutil.revrange(repo, lfrevs)
1031 1031 return orig(ui, repo, *args, **kwargs)
1032 1032
1033 1033
1034 1034 @eh.wrapfunction(exchange, b'pushoperation')
1035 1035 def exchangepushoperation(orig, *args, **kwargs):
1036 1036 """Override pushoperation constructor and store lfrevs parameter"""
1037 1037 lfrevs = kwargs.pop('lfrevs', None)
1038 1038 pushop = orig(*args, **kwargs)
1039 1039 pushop.lfrevs = lfrevs
1040 1040 return pushop
1041 1041
1042 1042
1043 1043 @eh.revsetpredicate(b'pulled()')
1044 1044 def pulledrevsetsymbol(repo, subset, x):
1045 1045 """Changesets that just has been pulled.
1046 1046
1047 1047 Only available with largefiles from pull --lfrev expressions.
1048 1048
1049 1049 .. container:: verbose
1050 1050
1051 1051 Some examples:
1052 1052
1053 1053 - pull largefiles for all new changesets::
1054 1054
1055 1055 hg pull -lfrev "pulled()"
1056 1056
1057 1057 - pull largefiles for all new branch heads::
1058 1058
1059 1059 hg pull -lfrev "head(pulled()) and not closed()"
1060 1060
1061 1061 """
1062 1062
1063 1063 try:
1064 1064 firstpulled = repo.firstpulled
1065 1065 except AttributeError:
1066 1066 raise error.Abort(_(b"pulled() only available in --lfrev"))
1067 1067 return smartset.baseset([r for r in subset if r >= firstpulled])
1068 1068
1069 1069
1070 1070 @eh.wrapcommand(
1071 1071 b'clone',
1072 1072 opts=[
1073 1073 (
1074 1074 b'',
1075 1075 b'all-largefiles',
1076 1076 None,
1077 1077 _(b'download all versions of all largefiles'),
1078 1078 )
1079 1079 ],
1080 1080 )
1081 1081 def overrideclone(orig, ui, source, dest=None, **opts):
1082 1082 d = dest
1083 1083 if d is None:
1084 1084 d = hg.defaultdest(source)
1085 1085 if opts.get('all_largefiles') and not hg.islocal(d):
1086 1086 raise error.Abort(
1087 1087 _(b'--all-largefiles is incompatible with non-local destination %s')
1088 1088 % d
1089 1089 )
1090 1090
1091 1091 return orig(ui, source, dest, **opts)
1092 1092
1093 1093
1094 1094 @eh.wrapfunction(hg, b'clone')
1095 1095 def hgclone(orig, ui, opts, *args, **kwargs):
1096 1096 result = orig(ui, opts, *args, **kwargs)
1097 1097
1098 1098 if result is not None:
1099 1099 sourcerepo, destrepo = result
1100 1100 repo = destrepo.local()
1101 1101
1102 1102 # When cloning to a remote repo (like through SSH), no repo is available
1103 1103 # from the peer. Therefore the largefiles can't be downloaded and the
1104 1104 # hgrc can't be updated.
1105 1105 if not repo:
1106 1106 return result
1107 1107
1108 1108 # Caching is implicitly limited to 'rev' option, since the dest repo was
1109 1109 # truncated at that point. The user may expect a download count with
1110 1110 # this option, so attempt whether or not this is a largefile repo.
1111 1111 if opts.get(b'all_largefiles'):
1112 1112 success, missing = lfcommands.downloadlfiles(ui, repo)
1113 1113
1114 1114 if missing != 0:
1115 1115 return None
1116 1116
1117 1117 return result
1118 1118
1119 1119
1120 1120 @eh.wrapcommand(b'rebase', extension=b'rebase')
1121 1121 def overriderebasecmd(orig, ui, repo, **opts):
1122 1122 if not util.safehasattr(repo, b'_largefilesenabled'):
1123 1123 return orig(ui, repo, **opts)
1124 1124
1125 1125 resuming = opts.get('continue')
1126 1126 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1127 1127 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1128 1128 try:
1129 1129 with ui.configoverride(
1130 1130 {(b'rebase', b'experimental.inmemory'): False}, b"largefiles"
1131 1131 ):
1132 1132 return orig(ui, repo, **opts)
1133 1133 finally:
1134 1134 repo._lfstatuswriters.pop()
1135 1135 repo._lfcommithooks.pop()
1136 1136
1137 1137
1138 1138 @eh.extsetup
1139 1139 def overriderebase(ui):
1140 1140 try:
1141 1141 rebase = extensions.find(b'rebase')
1142 1142 except KeyError:
1143 1143 pass
1144 1144 else:
1145 1145
1146 1146 def _dorebase(orig, *args, **kwargs):
1147 1147 kwargs['inmemory'] = False
1148 1148 return orig(*args, **kwargs)
1149 1149
1150 1150 extensions.wrapfunction(rebase, b'_dorebase', _dorebase)
1151 1151
1152 1152
1153 1153 @eh.wrapcommand(b'archive')
1154 1154 def overridearchivecmd(orig, ui, repo, dest, **opts):
1155 1155 with lfstatus(repo.unfiltered()):
1156 1156 return orig(ui, repo.unfiltered(), dest, **opts)
1157 1157
1158 1158
1159 1159 @eh.wrapfunction(webcommands, b'archive')
1160 1160 def hgwebarchive(orig, web):
1161 1161 with lfstatus(web.repo):
1162 1162 return orig(web)
1163 1163
1164 1164
1165 1165 @eh.wrapfunction(archival, b'archive')
1166 1166 def overridearchive(
1167 1167 orig,
1168 1168 repo,
1169 1169 dest,
1170 1170 node,
1171 1171 kind,
1172 1172 decode=True,
1173 1173 match=None,
1174 1174 prefix=b'',
1175 1175 mtime=None,
1176 1176 subrepos=None,
1177 1177 ):
1178 1178 # For some reason setting repo.lfstatus in hgwebarchive only changes the
1179 1179 # unfiltered repo's attr, so check that as well.
1180 1180 if not repo.lfstatus and not repo.unfiltered().lfstatus:
1181 1181 return orig(
1182 1182 repo, dest, node, kind, decode, match, prefix, mtime, subrepos
1183 1183 )
1184 1184
1185 1185 # No need to lock because we are only reading history and
1186 1186 # largefile caches, neither of which are modified.
1187 1187 if node is not None:
1188 1188 lfcommands.cachelfiles(repo.ui, repo, node)
1189 1189
1190 1190 if kind not in archival.archivers:
1191 1191 raise error.Abort(_(b"unknown archive type '%s'") % kind)
1192 1192
1193 1193 ctx = repo[node]
1194 1194
1195 1195 if kind == b'files':
1196 1196 if prefix:
1197 1197 raise error.Abort(_(b'cannot give prefix when archiving to files'))
1198 1198 else:
1199 1199 prefix = archival.tidyprefix(dest, kind, prefix)
1200 1200
1201 1201 def write(name, mode, islink, getdata):
1202 1202 if match and not match(name):
1203 1203 return
1204 1204 data = getdata()
1205 1205 if decode:
1206 1206 data = repo.wwritedata(name, data)
1207 1207 archiver.addfile(prefix + name, mode, islink, data)
1208 1208
1209 1209 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
1210 1210
1211 1211 if repo.ui.configbool(b"ui", b"archivemeta"):
1212 1212 write(
1213 1213 b'.hg_archival.txt',
1214 1214 0o644,
1215 1215 False,
1216 1216 lambda: archival.buildmetadata(ctx),
1217 1217 )
1218 1218
1219 1219 for f in ctx:
1220 1220 ff = ctx.flags(f)
1221 1221 getdata = ctx[f].data
1222 1222 lfile = lfutil.splitstandin(f)
1223 1223 if lfile is not None:
1224 1224 if node is not None:
1225 1225 path = lfutil.findfile(repo, getdata().strip())
1226 1226
1227 1227 if path is None:
1228 1228 raise error.Abort(
1229 1229 _(
1230 1230 b'largefile %s not found in repo store or system cache'
1231 1231 )
1232 1232 % lfile
1233 1233 )
1234 1234 else:
1235 1235 path = lfile
1236 1236
1237 1237 f = lfile
1238 1238
1239 1239 getdata = lambda: util.readfile(path)
1240 1240 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1241 1241
1242 1242 if subrepos:
1243 1243 for subpath in sorted(ctx.substate):
1244 1244 sub = ctx.workingsub(subpath)
1245 1245 submatch = matchmod.subdirmatcher(subpath, match)
1246 1246 subprefix = prefix + subpath + b'/'
1247 1247
1248 1248 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1249 1249 # infer and possibly set lfstatus in hgsubrepoarchive. That would
1250 1250 # allow only hgsubrepos to set this, instead of the current scheme
1251 1251 # where the parent sets this for the child.
1252 1252 with (
1253 1253 util.safehasattr(sub, '_repo')
1254 1254 and lfstatus(sub._repo)
1255 1255 or util.nullcontextmanager()
1256 1256 ):
1257 1257 sub.archive(archiver, subprefix, submatch)
1258 1258
1259 1259 archiver.done()
1260 1260
1261 1261
1262 1262 @eh.wrapfunction(subrepo.hgsubrepo, b'archive')
1263 1263 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1264 1264 lfenabled = util.safehasattr(repo._repo, b'_largefilesenabled')
1265 1265 if not lfenabled or not repo._repo.lfstatus:
1266 1266 return orig(repo, archiver, prefix, match, decode)
1267 1267
1268 1268 repo._get(repo._state + (b'hg',))
1269 1269 rev = repo._state[1]
1270 1270 ctx = repo._repo[rev]
1271 1271
1272 1272 if ctx.node() is not None:
1273 1273 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1274 1274
1275 1275 def write(name, mode, islink, getdata):
1276 1276 # At this point, the standin has been replaced with the largefile name,
1277 1277 # so the normal matcher works here without the lfutil variants.
1278 1278 if match and not match(f):
1279 1279 return
1280 1280 data = getdata()
1281 1281 if decode:
1282 1282 data = repo._repo.wwritedata(name, data)
1283 1283
1284 1284 archiver.addfile(prefix + name, mode, islink, data)
1285 1285
1286 1286 for f in ctx:
1287 1287 ff = ctx.flags(f)
1288 1288 getdata = ctx[f].data
1289 1289 lfile = lfutil.splitstandin(f)
1290 1290 if lfile is not None:
1291 1291 if ctx.node() is not None:
1292 1292 path = lfutil.findfile(repo._repo, getdata().strip())
1293 1293
1294 1294 if path is None:
1295 1295 raise error.Abort(
1296 1296 _(
1297 1297 b'largefile %s not found in repo store or system cache'
1298 1298 )
1299 1299 % lfile
1300 1300 )
1301 1301 else:
1302 1302 path = lfile
1303 1303
1304 1304 f = lfile
1305 1305
1306 1306 getdata = lambda: util.readfile(os.path.join(prefix, path))
1307 1307
1308 1308 write(f, b'x' in ff and 0o755 or 0o644, b'l' in ff, getdata)
1309 1309
1310 1310 for subpath in sorted(ctx.substate):
1311 1311 sub = ctx.workingsub(subpath)
1312 1312 submatch = matchmod.subdirmatcher(subpath, match)
1313 1313 subprefix = prefix + subpath + b'/'
1314 1314 # TODO: Only hgsubrepo instances have `_repo`, so figure out how to
1315 1315 # infer and possibly set lfstatus at the top of this function. That
1316 1316 # would allow only hgsubrepos to set this, instead of the current scheme
1317 1317 # where the parent sets this for the child.
1318 1318 with (
1319 1319 util.safehasattr(sub, '_repo')
1320 1320 and lfstatus(sub._repo)
1321 1321 or util.nullcontextmanager()
1322 1322 ):
1323 1323 sub.archive(archiver, subprefix, submatch, decode)
1324 1324
1325 1325
1326 1326 # If a largefile is modified, the change is not reflected in its
1327 1327 # standin until a commit. cmdutil.bailifchanged() raises an exception
1328 1328 # if the repo has uncommitted changes. Wrap it to also check if
1329 1329 # largefiles were changed. This is used by bisect, backout and fetch.
1330 1330 @eh.wrapfunction(cmdutil, b'bailifchanged')
1331 1331 def overridebailifchanged(orig, repo, *args, **kwargs):
1332 1332 orig(repo, *args, **kwargs)
1333 1333 with lfstatus(repo):
1334 1334 s = repo.status()
1335 1335 if s.modified or s.added or s.removed or s.deleted:
1336 1336 raise error.Abort(_(b'uncommitted changes'))
1337 1337
1338 1338
1339 1339 @eh.wrapfunction(cmdutil, b'postcommitstatus')
1340 1340 def postcommitstatus(orig, repo, *args, **kwargs):
1341 1341 with lfstatus(repo):
1342 1342 return orig(repo, *args, **kwargs)
1343 1343
1344 1344
1345 1345 @eh.wrapfunction(cmdutil, b'forget')
1346 1346 def cmdutilforget(
1347 1347 orig, ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
1348 1348 ):
1349 1349 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1350 1350 bad, forgot = orig(
1351 1351 ui,
1352 1352 repo,
1353 1353 normalmatcher,
1354 1354 prefix,
1355 1355 uipathfn,
1356 1356 explicitonly,
1357 1357 dryrun,
1358 1358 interactive,
1359 1359 )
1360 1360 m = composelargefilematcher(match, repo[None].manifest())
1361 1361
1362 1362 with lfstatus(repo):
1363 1363 s = repo.status(match=m, clean=True)
1364 1364 manifest = repo[None].manifest()
1365 1365 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1366 1366 forget = [f for f in forget if lfutil.standin(f) in manifest]
1367 1367
1368 1368 for f in forget:
1369 1369 fstandin = lfutil.standin(f)
1370 1370 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1371 1371 ui.warn(
1372 1372 _(b'not removing %s: file is already untracked\n') % uipathfn(f)
1373 1373 )
1374 1374 bad.append(f)
1375 1375
1376 1376 for f in forget:
1377 1377 if ui.verbose or not m.exact(f):
1378 1378 ui.status(_(b'removing %s\n') % uipathfn(f))
1379 1379
1380 1380 # Need to lock because standin files are deleted then removed from the
1381 1381 # repository and we could race in-between.
1382 1382 with repo.wlock():
1383 1383 lfdirstate = lfutil.openlfdirstate(ui, repo)
1384 1384 for f in forget:
1385 1385 lfdirstate.set_untracked(f)
1386 1386 lfdirstate.write()
1387 1387 standins = [lfutil.standin(f) for f in forget]
1388 1388 for f in standins:
1389 1389 repo.wvfs.unlinkpath(f, ignoremissing=True)
1390 1390 rejected = repo[None].forget(standins)
1391 1391
1392 1392 bad.extend(f for f in rejected if f in m.files())
1393 1393 forgot.extend(f for f in forget if f not in rejected)
1394 1394 return bad, forgot
1395 1395
1396 1396
1397 1397 def _getoutgoings(repo, other, missing, addfunc):
1398 1398 """get pairs of filename and largefile hash in outgoing revisions
1399 1399 in 'missing'.
1400 1400
1401 1401 largefiles already existing on 'other' repository are ignored.
1402 1402
1403 1403 'addfunc' is invoked with each unique pairs of filename and
1404 1404 largefile hash value.
1405 1405 """
1406 1406 knowns = set()
1407 1407 lfhashes = set()
1408 1408
1409 1409 def dedup(fn, lfhash):
1410 1410 k = (fn, lfhash)
1411 1411 if k not in knowns:
1412 1412 knowns.add(k)
1413 1413 lfhashes.add(lfhash)
1414 1414
1415 1415 lfutil.getlfilestoupload(repo, missing, dedup)
1416 1416 if lfhashes:
1417 1417 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1418 1418 for fn, lfhash in knowns:
1419 1419 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1420 1420 addfunc(fn, lfhash)
1421 1421
1422 1422
1423 1423 def outgoinghook(ui, repo, other, opts, missing):
1424 1424 if opts.pop(b'large', None):
1425 1425 lfhashes = set()
1426 1426 if ui.debugflag:
1427 1427 toupload = {}
1428 1428
1429 1429 def addfunc(fn, lfhash):
1430 1430 if fn not in toupload:
1431 1431 toupload[fn] = []
1432 1432 toupload[fn].append(lfhash)
1433 1433 lfhashes.add(lfhash)
1434 1434
1435 1435 def showhashes(fn):
1436 1436 for lfhash in sorted(toupload[fn]):
1437 1437 ui.debug(b' %s\n' % lfhash)
1438 1438
1439 1439 else:
1440 1440 toupload = set()
1441 1441
1442 1442 def addfunc(fn, lfhash):
1443 1443 toupload.add(fn)
1444 1444 lfhashes.add(lfhash)
1445 1445
1446 1446 def showhashes(fn):
1447 1447 pass
1448 1448
1449 1449 _getoutgoings(repo, other, missing, addfunc)
1450 1450
1451 1451 if not toupload:
1452 1452 ui.status(_(b'largefiles: no files to upload\n'))
1453 1453 else:
1454 1454 ui.status(
1455 1455 _(b'largefiles to upload (%d entities):\n') % (len(lfhashes))
1456 1456 )
1457 1457 for file in sorted(toupload):
1458 1458 ui.status(lfutil.splitstandin(file) + b'\n')
1459 1459 showhashes(file)
1460 1460 ui.status(b'\n')
1461 1461
1462 1462
1463 1463 @eh.wrapcommand(
1464 1464 b'outgoing', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1465 1465 )
1466 1466 def _outgoingcmd(orig, *args, **kwargs):
1467 1467 # Nothing to do here other than add the extra help option- the hook above
1468 1468 # processes it.
1469 1469 return orig(*args, **kwargs)
1470 1470
1471 1471
1472 1472 def summaryremotehook(ui, repo, opts, changes):
1473 1473 largeopt = opts.get(b'large', False)
1474 1474 if changes is None:
1475 1475 if largeopt:
1476 1476 return (False, True) # only outgoing check is needed
1477 1477 else:
1478 1478 return (False, False)
1479 1479 elif largeopt:
1480 1480 url, branch, peer, outgoing = changes[1]
1481 1481 if peer is None:
1482 1482 # i18n: column positioning for "hg summary"
1483 1483 ui.status(_(b'largefiles: (no remote repo)\n'))
1484 1484 return
1485 1485
1486 1486 toupload = set()
1487 1487 lfhashes = set()
1488 1488
1489 1489 def addfunc(fn, lfhash):
1490 1490 toupload.add(fn)
1491 1491 lfhashes.add(lfhash)
1492 1492
1493 1493 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1494 1494
1495 1495 if not toupload:
1496 1496 # i18n: column positioning for "hg summary"
1497 1497 ui.status(_(b'largefiles: (no files to upload)\n'))
1498 1498 else:
1499 1499 # i18n: column positioning for "hg summary"
1500 1500 ui.status(
1501 1501 _(b'largefiles: %d entities for %d files to upload\n')
1502 1502 % (len(lfhashes), len(toupload))
1503 1503 )
1504 1504
1505 1505
1506 1506 @eh.wrapcommand(
1507 1507 b'summary', opts=[(b'', b'large', None, _(b'display outgoing largefiles'))]
1508 1508 )
1509 1509 def overridesummary(orig, ui, repo, *pats, **opts):
1510 1510 with lfstatus(repo):
1511 1511 orig(ui, repo, *pats, **opts)
1512 1512
1513 1513
1514 1514 @eh.wrapfunction(scmutil, b'addremove')
1515 1515 def scmutiladdremove(orig, repo, matcher, prefix, uipathfn, opts=None):
1516 1516 if opts is None:
1517 1517 opts = {}
1518 1518 if not lfutil.islfilesrepo(repo):
1519 1519 return orig(repo, matcher, prefix, uipathfn, opts)
1520 1520 # Get the list of missing largefiles so we can remove them
1521 1521 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1522 1522 unsure, s = lfdirstate.status(
1523 1523 matchmod.always(),
1524 1524 subrepos=[],
1525 1525 ignored=False,
1526 1526 clean=False,
1527 1527 unknown=False,
1528 1528 )
1529 1529
1530 1530 # Call into the normal remove code, but the removing of the standin, we want
1531 1531 # to have handled by original addremove. Monkey patching here makes sure
1532 1532 # we don't remove the standin in the largefiles code, preventing a very
1533 1533 # confused state later.
1534 1534 if s.deleted:
1535 1535 m = copy.copy(matcher)
1536 1536
1537 1537 # The m._files and m._map attributes are not changed to the deleted list
1538 1538 # because that affects the m.exact() test, which in turn governs whether
1539 1539 # or not the file name is printed, and how. Simply limit the original
1540 1540 # matches to those in the deleted status list.
1541 1541 matchfn = m.matchfn
1542 1542 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1543 1543
1544 1544 removelargefiles(
1545 1545 repo.ui,
1546 1546 repo,
1547 1547 True,
1548 1548 m,
1549 1549 uipathfn,
1550 1550 opts.get(b'dry_run'),
1551 1551 **pycompat.strkwargs(opts)
1552 1552 )
1553 1553 # Call into the normal add code, and any files that *should* be added as
1554 1554 # largefiles will be
1555 1555 added, bad = addlargefiles(
1556 1556 repo.ui, repo, True, matcher, uipathfn, **pycompat.strkwargs(opts)
1557 1557 )
1558 1558 # Now that we've handled largefiles, hand off to the original addremove
1559 1559 # function to take care of the rest. Make sure it doesn't do anything with
1560 1560 # largefiles by passing a matcher that will ignore them.
1561 1561 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1562 1562 return orig(repo, matcher, prefix, uipathfn, opts)
1563 1563
1564 1564
1565 1565 # Calling purge with --all will cause the largefiles to be deleted.
1566 1566 # Override repo.status to prevent this from happening.
1567 1567 @eh.wrapcommand(b'purge')
1568 1568 def overridepurge(orig, ui, repo, *dirs, **opts):
1569 1569 # XXX Monkey patching a repoview will not work. The assigned attribute will
1570 1570 # be set on the unfiltered repo, but we will only lookup attributes in the
1571 1571 # unfiltered repo if the lookup in the repoview object itself fails. As the
1572 1572 # monkey patched method exists on the repoview class the lookup will not
1573 1573 # fail. As a result, the original version will shadow the monkey patched
1574 1574 # one, defeating the monkey patch.
1575 1575 #
1576 1576 # As a work around we use an unfiltered repo here. We should do something
1577 1577 # cleaner instead.
1578 1578 repo = repo.unfiltered()
1579 1579 oldstatus = repo.status
1580 1580
1581 1581 def overridestatus(
1582 1582 node1=b'.',
1583 1583 node2=None,
1584 1584 match=None,
1585 1585 ignored=False,
1586 1586 clean=False,
1587 1587 unknown=False,
1588 1588 listsubrepos=False,
1589 1589 ):
1590 1590 r = oldstatus(
1591 1591 node1, node2, match, ignored, clean, unknown, listsubrepos
1592 1592 )
1593 1593 lfdirstate = lfutil.openlfdirstate(ui, repo)
1594 1594 unknown = [
1595 1595 f for f in r.unknown if not lfdirstate.get_entry(f).any_tracked
1596 1596 ]
1597 1597 ignored = [
1598 1598 f for f in r.ignored if not lfdirstate.get_entry(f).any_tracked
1599 1599 ]
1600 1600 return scmutil.status(
1601 1601 r.modified, r.added, r.removed, r.deleted, unknown, ignored, r.clean
1602 1602 )
1603 1603
1604 1604 repo.status = overridestatus
1605 1605 orig(ui, repo, *dirs, **opts)
1606 1606 repo.status = oldstatus
1607 1607
1608 1608
1609 1609 @eh.wrapcommand(b'rollback')
1610 1610 def overriderollback(orig, ui, repo, **opts):
1611 1611 with repo.wlock():
1612 1612 before = repo.dirstate.parents()
1613 1613 orphans = {
1614 1614 f
1615 1615 for f in repo.dirstate
1616 1616 if lfutil.isstandin(f) and not repo.dirstate.get_entry(f).removed
1617 1617 }
1618 1618 result = orig(ui, repo, **opts)
1619 1619 after = repo.dirstate.parents()
1620 1620 if before == after:
1621 1621 return result # no need to restore standins
1622 1622
1623 1623 pctx = repo[b'.']
1624 1624 for f in repo.dirstate:
1625 1625 if lfutil.isstandin(f):
1626 1626 orphans.discard(f)
1627 1627 if repo.dirstate.get_entry(f).removed:
1628 1628 repo.wvfs.unlinkpath(f, ignoremissing=True)
1629 1629 elif f in pctx:
1630 1630 fctx = pctx[f]
1631 1631 repo.wwrite(f, fctx.data(), fctx.flags())
1632 1632 else:
1633 1633 # content of standin is not so important in 'a',
1634 1634 # 'm' or 'n' (coming from the 2nd parent) cases
1635 1635 lfutil.writestandin(repo, f, b'', False)
1636 1636 for standin in orphans:
1637 1637 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1638 1638
1639 1639 lfdirstate = lfutil.openlfdirstate(ui, repo)
1640 1640 with lfdirstate.parentchange():
1641 1641 orphans = set(lfdirstate)
1642 1642 lfiles = lfutil.listlfiles(repo)
1643 1643 for file in lfiles:
1644 1644 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1645 1645 orphans.discard(file)
1646 1646 for lfile in orphans:
1647 1647 lfdirstate.update_file(
1648 1648 lfile, p1_tracked=False, wc_tracked=False
1649 1649 )
1650 1650 lfdirstate.write()
1651 1651 return result
1652 1652
1653 1653
1654 1654 @eh.wrapcommand(b'transplant', extension=b'transplant')
1655 1655 def overridetransplant(orig, ui, repo, *revs, **opts):
1656 1656 resuming = opts.get('continue')
1657 1657 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1658 1658 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1659 1659 try:
1660 1660 result = orig(ui, repo, *revs, **opts)
1661 1661 finally:
1662 1662 repo._lfstatuswriters.pop()
1663 1663 repo._lfcommithooks.pop()
1664 1664 return result
1665 1665
1666 1666
1667 1667 @eh.wrapcommand(b'cat')
1668 1668 def overridecat(orig, ui, repo, file1, *pats, **opts):
1669 1669 opts = pycompat.byteskwargs(opts)
1670 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
1670 ctx = logcmdutil.revsingle(repo, opts.get(b'rev'))
1671 1671 err = 1
1672 1672 notbad = set()
1673 1673 m = scmutil.match(ctx, (file1,) + pats, opts)
1674 1674 origmatchfn = m.matchfn
1675 1675
1676 1676 def lfmatchfn(f):
1677 1677 if origmatchfn(f):
1678 1678 return True
1679 1679 lf = lfutil.splitstandin(f)
1680 1680 if lf is None:
1681 1681 return False
1682 1682 notbad.add(lf)
1683 1683 return origmatchfn(lf)
1684 1684
1685 1685 m.matchfn = lfmatchfn
1686 1686 origbadfn = m.bad
1687 1687
1688 1688 def lfbadfn(f, msg):
1689 1689 if not f in notbad:
1690 1690 origbadfn(f, msg)
1691 1691
1692 1692 m.bad = lfbadfn
1693 1693
1694 1694 origvisitdirfn = m.visitdir
1695 1695
1696 1696 def lfvisitdirfn(dir):
1697 1697 if dir == lfutil.shortname:
1698 1698 return True
1699 1699 ret = origvisitdirfn(dir)
1700 1700 if ret:
1701 1701 return ret
1702 1702 lf = lfutil.splitstandin(dir)
1703 1703 if lf is None:
1704 1704 return False
1705 1705 return origvisitdirfn(lf)
1706 1706
1707 1707 m.visitdir = lfvisitdirfn
1708 1708
1709 1709 for f in ctx.walk(m):
1710 1710 with cmdutil.makefileobj(ctx, opts.get(b'output'), pathname=f) as fp:
1711 1711 lf = lfutil.splitstandin(f)
1712 1712 if lf is None or origmatchfn(f):
1713 1713 # duplicating unreachable code from commands.cat
1714 1714 data = ctx[f].data()
1715 1715 if opts.get(b'decode'):
1716 1716 data = repo.wwritedata(f, data)
1717 1717 fp.write(data)
1718 1718 else:
1719 1719 hash = lfutil.readasstandin(ctx[f])
1720 1720 if not lfutil.inusercache(repo.ui, hash):
1721 1721 store = storefactory.openstore(repo)
1722 1722 success, missing = store.get([(lf, hash)])
1723 1723 if len(success) != 1:
1724 1724 raise error.Abort(
1725 1725 _(
1726 1726 b'largefile %s is not in cache and could not be '
1727 1727 b'downloaded'
1728 1728 )
1729 1729 % lf
1730 1730 )
1731 1731 path = lfutil.usercachepath(repo.ui, hash)
1732 1732 with open(path, b"rb") as fpin:
1733 1733 for chunk in util.filechunkiter(fpin):
1734 1734 fp.write(chunk)
1735 1735 err = 0
1736 1736 return err
1737 1737
1738 1738
1739 1739 @eh.wrapfunction(merge, b'_update')
1740 1740 def mergeupdate(orig, repo, node, branchmerge, force, *args, **kwargs):
1741 1741 matcher = kwargs.get('matcher', None)
1742 1742 # note if this is a partial update
1743 1743 partial = matcher and not matcher.always()
1744 1744 with repo.wlock():
1745 1745 # branch | | |
1746 1746 # merge | force | partial | action
1747 1747 # -------+-------+---------+--------------
1748 1748 # x | x | x | linear-merge
1749 1749 # o | x | x | branch-merge
1750 1750 # x | o | x | overwrite (as clean update)
1751 1751 # o | o | x | force-branch-merge (*1)
1752 1752 # x | x | o | (*)
1753 1753 # o | x | o | (*)
1754 1754 # x | o | o | overwrite (as revert)
1755 1755 # o | o | o | (*)
1756 1756 #
1757 1757 # (*) don't care
1758 1758 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1759 1759
1760 1760 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1761 1761 unsure, s = lfdirstate.status(
1762 1762 matchmod.always(),
1763 1763 subrepos=[],
1764 1764 ignored=False,
1765 1765 clean=True,
1766 1766 unknown=False,
1767 1767 )
1768 1768 oldclean = set(s.clean)
1769 1769 pctx = repo[b'.']
1770 1770 dctx = repo[node]
1771 1771 for lfile in unsure + s.modified:
1772 1772 lfileabs = repo.wvfs.join(lfile)
1773 1773 if not repo.wvfs.exists(lfileabs):
1774 1774 continue
1775 1775 lfhash = lfutil.hashfile(lfileabs)
1776 1776 standin = lfutil.standin(lfile)
1777 1777 lfutil.writestandin(
1778 1778 repo, standin, lfhash, lfutil.getexecutable(lfileabs)
1779 1779 )
1780 1780 if standin in pctx and lfhash == lfutil.readasstandin(
1781 1781 pctx[standin]
1782 1782 ):
1783 1783 oldclean.add(lfile)
1784 1784 for lfile in s.added:
1785 1785 fstandin = lfutil.standin(lfile)
1786 1786 if fstandin not in dctx:
1787 1787 # in this case, content of standin file is meaningless
1788 1788 # (in dctx, lfile is unknown, or normal file)
1789 1789 continue
1790 1790 lfutil.updatestandin(repo, lfile, fstandin)
1791 1791 # mark all clean largefiles as dirty, just in case the update gets
1792 1792 # interrupted before largefiles and lfdirstate are synchronized
1793 1793 for lfile in oldclean:
1794 1794 lfdirstate.set_possibly_dirty(lfile)
1795 1795 lfdirstate.write()
1796 1796
1797 1797 oldstandins = lfutil.getstandinsstate(repo)
1798 1798 wc = kwargs.get('wc')
1799 1799 if wc and wc.isinmemory():
1800 1800 # largefiles is not a good candidate for in-memory merge (large
1801 1801 # files, custom dirstate, matcher usage).
1802 1802 raise error.ProgrammingError(
1803 1803 b'largefiles is not compatible with in-memory merge'
1804 1804 )
1805 1805 with lfdirstate.parentchange():
1806 1806 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1807 1807
1808 1808 newstandins = lfutil.getstandinsstate(repo)
1809 1809 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1810 1810
1811 1811 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1812 1812 # all the ones that didn't change as clean
1813 1813 for lfile in oldclean.difference(filelist):
1814 1814 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
1815 1815 lfdirstate.write()
1816 1816
1817 1817 if branchmerge or force or partial:
1818 1818 filelist.extend(s.deleted + s.removed)
1819 1819
1820 1820 lfcommands.updatelfiles(
1821 1821 repo.ui, repo, filelist=filelist, normallookup=partial
1822 1822 )
1823 1823
1824 1824 return result
1825 1825
1826 1826
1827 1827 @eh.wrapfunction(scmutil, b'marktouched')
1828 1828 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1829 1829 result = orig(repo, files, *args, **kwargs)
1830 1830
1831 1831 filelist = []
1832 1832 for f in files:
1833 1833 lf = lfutil.splitstandin(f)
1834 1834 if lf is not None:
1835 1835 filelist.append(lf)
1836 1836 if filelist:
1837 1837 lfcommands.updatelfiles(
1838 1838 repo.ui,
1839 1839 repo,
1840 1840 filelist=filelist,
1841 1841 printmessage=False,
1842 1842 normallookup=True,
1843 1843 )
1844 1844
1845 1845 return result
1846 1846
1847 1847
1848 1848 @eh.wrapfunction(upgrade_actions, b'preservedrequirements')
1849 1849 @eh.wrapfunction(upgrade_actions, b'supporteddestrequirements')
1850 1850 def upgraderequirements(orig, repo):
1851 1851 reqs = orig(repo)
1852 1852 if b'largefiles' in repo.requirements:
1853 1853 reqs.add(b'largefiles')
1854 1854 return reqs
1855 1855
1856 1856
1857 1857 _lfscheme = b'largefile://'
1858 1858
1859 1859
1860 1860 @eh.wrapfunction(urlmod, b'open')
1861 1861 def openlargefile(orig, ui, url_, data=None, **kwargs):
1862 1862 if url_.startswith(_lfscheme):
1863 1863 if data:
1864 1864 msg = b"cannot use data on a 'largefile://' url"
1865 1865 raise error.ProgrammingError(msg)
1866 1866 lfid = url_[len(_lfscheme) :]
1867 1867 return storefactory.getlfile(ui, lfid)
1868 1868 else:
1869 1869 return orig(ui, url_, data=data, **kwargs)
@@ -1,2288 +1,2288 b''
1 1 # rebase.py - rebasing feature for mercurial
2 2 #
3 3 # Copyright 2008 Stefano Tortarolo <stefano.tortarolo at gmail dot com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to move sets of revisions to a different ancestor
9 9
10 10 This extension lets you rebase changesets in an existing Mercurial
11 11 repository.
12 12
13 13 For more information:
14 14 https://mercurial-scm.org/wiki/RebaseExtension
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 import errno
20 20 import os
21 21
22 22 from mercurial.i18n import _
23 23 from mercurial.node import (
24 24 nullrev,
25 25 short,
26 26 wdirrev,
27 27 )
28 28 from mercurial.pycompat import open
29 29 from mercurial import (
30 30 bookmarks,
31 31 cmdutil,
32 32 commands,
33 33 copies,
34 34 destutil,
35 35 dirstateguard,
36 36 error,
37 37 extensions,
38 38 logcmdutil,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 mergeutil,
42 42 obsolete,
43 43 obsutil,
44 44 patch,
45 45 phases,
46 46 pycompat,
47 47 registrar,
48 48 repair,
49 49 revset,
50 50 revsetlang,
51 51 rewriteutil,
52 52 scmutil,
53 53 smartset,
54 54 state as statemod,
55 55 util,
56 56 )
57 57
58 58 # The following constants are used throughout the rebase module. The ordering of
59 59 # their values must be maintained.
60 60
61 61 # Indicates that a revision needs to be rebased
62 62 revtodo = -1
63 63 revtodostr = b'-1'
64 64
65 65 # legacy revstates no longer needed in current code
66 66 # -2: nullmerge, -3: revignored, -4: revprecursor, -5: revpruned
67 67 legacystates = {b'-2', b'-3', b'-4', b'-5'}
68 68
69 69 cmdtable = {}
70 70 command = registrar.command(cmdtable)
71 71
72 72 configtable = {}
73 73 configitem = registrar.configitem(configtable)
74 74 configitem(
75 75 b'devel',
76 76 b'rebase.force-in-memory-merge',
77 77 default=False,
78 78 )
79 79 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
80 80 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
81 81 # be specifying the version(s) of Mercurial they are tested with, or
82 82 # leave the attribute unspecified.
83 83 testedwith = b'ships-with-hg-core'
84 84
85 85
86 86 def _nothingtorebase():
87 87 return 1
88 88
89 89
90 90 def _savegraft(ctx, extra):
91 91 s = ctx.extra().get(b'source', None)
92 92 if s is not None:
93 93 extra[b'source'] = s
94 94 s = ctx.extra().get(b'intermediate-source', None)
95 95 if s is not None:
96 96 extra[b'intermediate-source'] = s
97 97
98 98
99 99 def _savebranch(ctx, extra):
100 100 extra[b'branch'] = ctx.branch()
101 101
102 102
103 103 def _destrebase(repo, sourceset, destspace=None):
104 104 """small wrapper around destmerge to pass the right extra args
105 105
106 106 Please wrap destutil.destmerge instead."""
107 107 return destutil.destmerge(
108 108 repo,
109 109 action=b'rebase',
110 110 sourceset=sourceset,
111 111 onheadcheck=False,
112 112 destspace=destspace,
113 113 )
114 114
115 115
116 116 revsetpredicate = registrar.revsetpredicate()
117 117
118 118
119 119 @revsetpredicate(b'_destrebase')
120 120 def _revsetdestrebase(repo, subset, x):
121 121 # ``_rebasedefaultdest()``
122 122
123 123 # default destination for rebase.
124 124 # # XXX: Currently private because I expect the signature to change.
125 125 # # XXX: - bailing out in case of ambiguity vs returning all data.
126 126 # i18n: "_rebasedefaultdest" is a keyword
127 127 sourceset = None
128 128 if x is not None:
129 129 sourceset = revset.getset(repo, smartset.fullreposet(repo), x)
130 130 return subset & smartset.baseset([_destrebase(repo, sourceset)])
131 131
132 132
133 133 @revsetpredicate(b'_destautoorphanrebase')
134 134 def _revsetdestautoorphanrebase(repo, subset, x):
135 135 # ``_destautoorphanrebase()``
136 136
137 137 # automatic rebase destination for a single orphan revision.
138 138 unfi = repo.unfiltered()
139 139 obsoleted = unfi.revs(b'obsolete()')
140 140
141 141 src = revset.getset(repo, subset, x).first()
142 142
143 143 # Empty src or already obsoleted - Do not return a destination
144 144 if not src or src in obsoleted:
145 145 return smartset.baseset()
146 146 dests = destutil.orphanpossibledestination(repo, src)
147 147 if len(dests) > 1:
148 148 raise error.StateError(
149 149 _(b"ambiguous automatic rebase: %r could end up on any of %r")
150 150 % (src, dests)
151 151 )
152 152 # We have zero or one destination, so we can just return here.
153 153 return smartset.baseset(dests)
154 154
155 155
156 156 def _ctxdesc(ctx):
157 157 """short description for a context"""
158 158 return cmdutil.format_changeset_summary(
159 159 ctx.repo().ui, ctx, command=b'rebase'
160 160 )
161 161
162 162
163 163 class rebaseruntime(object):
164 164 """This class is a container for rebase runtime state"""
165 165
166 166 def __init__(self, repo, ui, inmemory=False, dryrun=False, opts=None):
167 167 if opts is None:
168 168 opts = {}
169 169
170 170 # prepared: whether we have rebasestate prepared or not. Currently it
171 171 # decides whether "self.repo" is unfiltered or not.
172 172 # The rebasestate has explicit hash to hash instructions not depending
173 173 # on visibility. If rebasestate exists (in-memory or on-disk), use
174 174 # unfiltered repo to avoid visibility issues.
175 175 # Before knowing rebasestate (i.e. when starting a new rebase (not
176 176 # --continue or --abort)), the original repo should be used so
177 177 # visibility-dependent revsets are correct.
178 178 self.prepared = False
179 179 self.resume = False
180 180 self._repo = repo
181 181
182 182 self.ui = ui
183 183 self.opts = opts
184 184 self.originalwd = None
185 185 self.external = nullrev
186 186 # Mapping between the old revision id and either what is the new rebased
187 187 # revision or what needs to be done with the old revision. The state
188 188 # dict will be what contains most of the rebase progress state.
189 189 self.state = {}
190 190 self.activebookmark = None
191 191 self.destmap = {}
192 192 self.skipped = set()
193 193
194 194 self.collapsef = opts.get('collapse', False)
195 195 self.collapsemsg = cmdutil.logmessage(ui, pycompat.byteskwargs(opts))
196 196 self.date = opts.get('date', None)
197 197
198 198 e = opts.get('extrafn') # internal, used by e.g. hgsubversion
199 199 self.extrafns = [_savegraft]
200 200 if e:
201 201 self.extrafns = [e]
202 202
203 203 self.backupf = ui.configbool(b'rewrite', b'backup-bundle')
204 204 self.keepf = opts.get('keep', False)
205 205 self.keepbranchesf = opts.get('keepbranches', False)
206 206 self.skipemptysuccessorf = rewriteutil.skip_empty_successor(
207 207 repo.ui, b'rebase'
208 208 )
209 209 self.obsolete_with_successor_in_destination = {}
210 210 self.obsolete_with_successor_in_rebase_set = set()
211 211 self.inmemory = inmemory
212 212 self.dryrun = dryrun
213 213 self.stateobj = statemod.cmdstate(repo, b'rebasestate')
214 214
215 215 @property
216 216 def repo(self):
217 217 if self.prepared:
218 218 return self._repo.unfiltered()
219 219 else:
220 220 return self._repo
221 221
222 222 def storestatus(self, tr=None):
223 223 """Store the current status to allow recovery"""
224 224 if tr:
225 225 tr.addfilegenerator(
226 226 b'rebasestate',
227 227 (b'rebasestate',),
228 228 self._writestatus,
229 229 location=b'plain',
230 230 )
231 231 else:
232 232 with self.repo.vfs(b"rebasestate", b"w") as f:
233 233 self._writestatus(f)
234 234
235 235 def _writestatus(self, f):
236 236 repo = self.repo
237 237 assert repo.filtername is None
238 238 f.write(repo[self.originalwd].hex() + b'\n')
239 239 # was "dest". we now write dest per src root below.
240 240 f.write(b'\n')
241 241 f.write(repo[self.external].hex() + b'\n')
242 242 f.write(b'%d\n' % int(self.collapsef))
243 243 f.write(b'%d\n' % int(self.keepf))
244 244 f.write(b'%d\n' % int(self.keepbranchesf))
245 245 f.write(b'%s\n' % (self.activebookmark or b''))
246 246 destmap = self.destmap
247 247 for d, v in pycompat.iteritems(self.state):
248 248 oldrev = repo[d].hex()
249 249 if v >= 0:
250 250 newrev = repo[v].hex()
251 251 else:
252 252 newrev = b"%d" % v
253 253 destnode = repo[destmap[d]].hex()
254 254 f.write(b"%s:%s:%s\n" % (oldrev, newrev, destnode))
255 255 repo.ui.debug(b'rebase status stored\n')
256 256
257 257 def restorestatus(self):
258 258 """Restore a previously stored status"""
259 259 if not self.stateobj.exists():
260 260 cmdutil.wrongtooltocontinue(self.repo, _(b'rebase'))
261 261
262 262 data = self._read()
263 263 self.repo.ui.debug(b'rebase status resumed\n')
264 264
265 265 self.originalwd = data[b'originalwd']
266 266 self.destmap = data[b'destmap']
267 267 self.state = data[b'state']
268 268 self.skipped = data[b'skipped']
269 269 self.collapsef = data[b'collapse']
270 270 self.keepf = data[b'keep']
271 271 self.keepbranchesf = data[b'keepbranches']
272 272 self.external = data[b'external']
273 273 self.activebookmark = data[b'activebookmark']
274 274
275 275 def _read(self):
276 276 self.prepared = True
277 277 repo = self.repo
278 278 assert repo.filtername is None
279 279 data = {
280 280 b'keepbranches': None,
281 281 b'collapse': None,
282 282 b'activebookmark': None,
283 283 b'external': nullrev,
284 284 b'keep': None,
285 285 b'originalwd': None,
286 286 }
287 287 legacydest = None
288 288 state = {}
289 289 destmap = {}
290 290
291 291 if True:
292 292 f = repo.vfs(b"rebasestate")
293 293 for i, l in enumerate(f.read().splitlines()):
294 294 if i == 0:
295 295 data[b'originalwd'] = repo[l].rev()
296 296 elif i == 1:
297 297 # this line should be empty in newer version. but legacy
298 298 # clients may still use it
299 299 if l:
300 300 legacydest = repo[l].rev()
301 301 elif i == 2:
302 302 data[b'external'] = repo[l].rev()
303 303 elif i == 3:
304 304 data[b'collapse'] = bool(int(l))
305 305 elif i == 4:
306 306 data[b'keep'] = bool(int(l))
307 307 elif i == 5:
308 308 data[b'keepbranches'] = bool(int(l))
309 309 elif i == 6 and not (len(l) == 81 and b':' in l):
310 310 # line 6 is a recent addition, so for backwards
311 311 # compatibility check that the line doesn't look like the
312 312 # oldrev:newrev lines
313 313 data[b'activebookmark'] = l
314 314 else:
315 315 args = l.split(b':')
316 316 oldrev = repo[args[0]].rev()
317 317 newrev = args[1]
318 318 if newrev in legacystates:
319 319 continue
320 320 if len(args) > 2:
321 321 destrev = repo[args[2]].rev()
322 322 else:
323 323 destrev = legacydest
324 324 destmap[oldrev] = destrev
325 325 if newrev == revtodostr:
326 326 state[oldrev] = revtodo
327 327 # Legacy compat special case
328 328 else:
329 329 state[oldrev] = repo[newrev].rev()
330 330
331 331 if data[b'keepbranches'] is None:
332 332 raise error.Abort(_(b'.hg/rebasestate is incomplete'))
333 333
334 334 data[b'destmap'] = destmap
335 335 data[b'state'] = state
336 336 skipped = set()
337 337 # recompute the set of skipped revs
338 338 if not data[b'collapse']:
339 339 seen = set(destmap.values())
340 340 for old, new in sorted(state.items()):
341 341 if new != revtodo and new in seen:
342 342 skipped.add(old)
343 343 seen.add(new)
344 344 data[b'skipped'] = skipped
345 345 repo.ui.debug(
346 346 b'computed skipped revs: %s\n'
347 347 % (b' '.join(b'%d' % r for r in sorted(skipped)) or b'')
348 348 )
349 349
350 350 return data
351 351
352 352 def _handleskippingobsolete(self):
353 353 """Compute structures necessary for skipping obsolete revisions"""
354 354 if self.keepf:
355 355 return
356 356 if not self.ui.configbool(b'experimental', b'rebaseskipobsolete'):
357 357 return
358 358 obsoleteset = {r for r in self.state if self.repo[r].obsolete()}
359 359 (
360 360 self.obsolete_with_successor_in_destination,
361 361 self.obsolete_with_successor_in_rebase_set,
362 362 ) = _compute_obsolete_sets(self.repo, obsoleteset, self.destmap)
363 363 skippedset = set(self.obsolete_with_successor_in_destination)
364 364 skippedset.update(self.obsolete_with_successor_in_rebase_set)
365 365 _checkobsrebase(self.repo, self.ui, obsoleteset, skippedset)
366 366 if obsolete.isenabled(self.repo, obsolete.allowdivergenceopt):
367 367 self.obsolete_with_successor_in_rebase_set = set()
368 368 else:
369 369 for rev in self.repo.revs(
370 370 b'descendants(%ld) and not %ld',
371 371 self.obsolete_with_successor_in_rebase_set,
372 372 self.obsolete_with_successor_in_rebase_set,
373 373 ):
374 374 self.state.pop(rev, None)
375 375 self.destmap.pop(rev, None)
376 376
377 377 def _prepareabortorcontinue(
378 378 self, isabort, backup=True, suppwarns=False, dryrun=False, confirm=False
379 379 ):
380 380 self.resume = True
381 381 try:
382 382 self.restorestatus()
383 383 # Calculate self.obsolete_* sets
384 384 self._handleskippingobsolete()
385 385 self.collapsemsg = restorecollapsemsg(self.repo, isabort)
386 386 except error.RepoLookupError:
387 387 if isabort:
388 388 clearstatus(self.repo)
389 389 clearcollapsemsg(self.repo)
390 390 self.repo.ui.warn(
391 391 _(
392 392 b'rebase aborted (no revision is removed,'
393 393 b' only broken state is cleared)\n'
394 394 )
395 395 )
396 396 return 0
397 397 else:
398 398 msg = _(b'cannot continue inconsistent rebase')
399 399 hint = _(b'use "hg rebase --abort" to clear broken state')
400 400 raise error.Abort(msg, hint=hint)
401 401
402 402 if isabort:
403 403 backup = backup and self.backupf
404 404 return self._abort(
405 405 backup=backup,
406 406 suppwarns=suppwarns,
407 407 dryrun=dryrun,
408 408 confirm=confirm,
409 409 )
410 410
411 411 def _preparenewrebase(self, destmap):
412 412 if not destmap:
413 413 return _nothingtorebase()
414 414
415 415 result = buildstate(self.repo, destmap, self.collapsef)
416 416
417 417 if not result:
418 418 # Empty state built, nothing to rebase
419 419 self.ui.status(_(b'nothing to rebase\n'))
420 420 return _nothingtorebase()
421 421
422 422 (self.originalwd, self.destmap, self.state) = result
423 423 if self.collapsef:
424 424 dests = set(self.destmap.values())
425 425 if len(dests) != 1:
426 426 raise error.InputError(
427 427 _(b'--collapse does not work with multiple destinations')
428 428 )
429 429 destrev = next(iter(dests))
430 430 destancestors = self.repo.changelog.ancestors(
431 431 [destrev], inclusive=True
432 432 )
433 433 self.external = externalparent(self.repo, self.state, destancestors)
434 434
435 435 for destrev in sorted(set(destmap.values())):
436 436 dest = self.repo[destrev]
437 437 if dest.closesbranch() and not self.keepbranchesf:
438 438 self.ui.status(_(b'reopening closed branch head %s\n') % dest)
439 439
440 440 # Calculate self.obsolete_* sets
441 441 self._handleskippingobsolete()
442 442
443 443 if not self.keepf:
444 444 rebaseset = set(destmap.keys())
445 445 rebaseset -= set(self.obsolete_with_successor_in_destination)
446 446 rebaseset -= self.obsolete_with_successor_in_rebase_set
447 447 # We have our own divergence-checking in the rebase extension
448 448 overrides = {}
449 449 if obsolete.isenabled(self.repo, obsolete.createmarkersopt):
450 450 overrides = {
451 451 (b'experimental', b'evolution.allowdivergence'): b'true'
452 452 }
453 453 try:
454 454 with self.ui.configoverride(overrides):
455 455 rewriteutil.precheck(self.repo, rebaseset, action=b'rebase')
456 456 except error.Abort as e:
457 457 if e.hint is None:
458 458 e.hint = _(b'use --keep to keep original changesets')
459 459 raise e
460 460
461 461 self.prepared = True
462 462
463 463 def _assignworkingcopy(self):
464 464 if self.inmemory:
465 465 from mercurial.context import overlayworkingctx
466 466
467 467 self.wctx = overlayworkingctx(self.repo)
468 468 self.repo.ui.debug(b"rebasing in memory\n")
469 469 else:
470 470 self.wctx = self.repo[None]
471 471 self.repo.ui.debug(b"rebasing on disk\n")
472 472 self.repo.ui.log(
473 473 b"rebase",
474 474 b"using in-memory rebase: %r\n",
475 475 self.inmemory,
476 476 rebase_imm_used=self.inmemory,
477 477 )
478 478
479 479 def _performrebase(self, tr):
480 480 self._assignworkingcopy()
481 481 repo, ui = self.repo, self.ui
482 482 if self.keepbranchesf:
483 483 # insert _savebranch at the start of extrafns so if
484 484 # there's a user-provided extrafn it can clobber branch if
485 485 # desired
486 486 self.extrafns.insert(0, _savebranch)
487 487 if self.collapsef:
488 488 branches = set()
489 489 for rev in self.state:
490 490 branches.add(repo[rev].branch())
491 491 if len(branches) > 1:
492 492 raise error.InputError(
493 493 _(b'cannot collapse multiple named branches')
494 494 )
495 495
496 496 # Keep track of the active bookmarks in order to reset them later
497 497 self.activebookmark = self.activebookmark or repo._activebookmark
498 498 if self.activebookmark:
499 499 bookmarks.deactivate(repo)
500 500
501 501 # Store the state before we begin so users can run 'hg rebase --abort'
502 502 # if we fail before the transaction closes.
503 503 self.storestatus()
504 504 if tr:
505 505 # When using single transaction, store state when transaction
506 506 # commits.
507 507 self.storestatus(tr)
508 508
509 509 cands = [k for k, v in pycompat.iteritems(self.state) if v == revtodo]
510 510 p = repo.ui.makeprogress(
511 511 _(b"rebasing"), unit=_(b'changesets'), total=len(cands)
512 512 )
513 513
514 514 def progress(ctx):
515 515 p.increment(item=(b"%d:%s" % (ctx.rev(), ctx)))
516 516
517 517 for subset in sortsource(self.destmap):
518 518 sortedrevs = self.repo.revs(b'sort(%ld, -topo)', subset)
519 519 for rev in sortedrevs:
520 520 self._rebasenode(tr, rev, progress)
521 521 p.complete()
522 522 ui.note(_(b'rebase merging completed\n'))
523 523
524 524 def _concludenode(self, rev, editor, commitmsg=None):
525 525 """Commit the wd changes with parents p1 and p2.
526 526
527 527 Reuse commit info from rev but also store useful information in extra.
528 528 Return node of committed revision."""
529 529 repo = self.repo
530 530 ctx = repo[rev]
531 531 if commitmsg is None:
532 532 commitmsg = ctx.description()
533 533
534 534 # Skip replacement if collapsing, as that degenerates to p1 for all
535 535 # nodes.
536 536 if not self.collapsef:
537 537 cl = repo.changelog
538 538 commitmsg = rewriteutil.update_hash_refs(
539 539 repo,
540 540 commitmsg,
541 541 {
542 542 cl.node(oldrev): [cl.node(newrev)]
543 543 for oldrev, newrev in self.state.items()
544 544 if newrev != revtodo
545 545 },
546 546 )
547 547
548 548 date = self.date
549 549 if date is None:
550 550 date = ctx.date()
551 551 extra = {b'rebase_source': ctx.hex()}
552 552 for c in self.extrafns:
553 553 c(ctx, extra)
554 554 destphase = max(ctx.phase(), phases.draft)
555 555 overrides = {
556 556 (b'phases', b'new-commit'): destphase,
557 557 (b'ui', b'allowemptycommit'): not self.skipemptysuccessorf,
558 558 }
559 559 with repo.ui.configoverride(overrides, b'rebase'):
560 560 if self.inmemory:
561 561 newnode = commitmemorynode(
562 562 repo,
563 563 wctx=self.wctx,
564 564 extra=extra,
565 565 commitmsg=commitmsg,
566 566 editor=editor,
567 567 user=ctx.user(),
568 568 date=date,
569 569 )
570 570 else:
571 571 newnode = commitnode(
572 572 repo,
573 573 extra=extra,
574 574 commitmsg=commitmsg,
575 575 editor=editor,
576 576 user=ctx.user(),
577 577 date=date,
578 578 )
579 579
580 580 return newnode
581 581
582 582 def _rebasenode(self, tr, rev, progressfn):
583 583 repo, ui, opts = self.repo, self.ui, self.opts
584 584 ctx = repo[rev]
585 585 desc = _ctxdesc(ctx)
586 586 if self.state[rev] == rev:
587 587 ui.status(_(b'already rebased %s\n') % desc)
588 588 elif rev in self.obsolete_with_successor_in_rebase_set:
589 589 msg = (
590 590 _(
591 591 b'note: not rebasing %s and its descendants as '
592 592 b'this would cause divergence\n'
593 593 )
594 594 % desc
595 595 )
596 596 repo.ui.status(msg)
597 597 self.skipped.add(rev)
598 598 elif rev in self.obsolete_with_successor_in_destination:
599 599 succ = self.obsolete_with_successor_in_destination[rev]
600 600 if succ is None:
601 601 msg = _(b'note: not rebasing %s, it has no successor\n') % desc
602 602 else:
603 603 succdesc = _ctxdesc(repo[succ])
604 604 msg = _(
605 605 b'note: not rebasing %s, already in destination as %s\n'
606 606 ) % (desc, succdesc)
607 607 repo.ui.status(msg)
608 608 # Make clearrebased aware state[rev] is not a true successor
609 609 self.skipped.add(rev)
610 610 # Record rev as moved to its desired destination in self.state.
611 611 # This helps bookmark and working parent movement.
612 612 dest = max(
613 613 adjustdest(repo, rev, self.destmap, self.state, self.skipped)
614 614 )
615 615 self.state[rev] = dest
616 616 elif self.state[rev] == revtodo:
617 617 ui.status(_(b'rebasing %s\n') % desc)
618 618 progressfn(ctx)
619 619 p1, p2, base = defineparents(
620 620 repo,
621 621 rev,
622 622 self.destmap,
623 623 self.state,
624 624 self.skipped,
625 625 self.obsolete_with_successor_in_destination,
626 626 )
627 627 if self.resume and self.wctx.p1().rev() == p1:
628 628 repo.ui.debug(b'resuming interrupted rebase\n')
629 629 self.resume = False
630 630 else:
631 631 overrides = {(b'ui', b'forcemerge'): opts.get('tool', b'')}
632 632 with ui.configoverride(overrides, b'rebase'):
633 633 try:
634 634 rebasenode(
635 635 repo,
636 636 rev,
637 637 p1,
638 638 p2,
639 639 base,
640 640 self.collapsef,
641 641 wctx=self.wctx,
642 642 )
643 643 except error.InMemoryMergeConflictsError:
644 644 if self.dryrun:
645 645 raise error.ConflictResolutionRequired(b'rebase')
646 646 if self.collapsef:
647 647 # TODO: Make the overlayworkingctx reflected
648 648 # in the working copy here instead of re-raising
649 649 # so the entire rebase operation is retried.
650 650 raise
651 651 ui.status(
652 652 _(
653 653 b"hit merge conflicts; rebasing that "
654 654 b"commit again in the working copy\n"
655 655 )
656 656 )
657 657 try:
658 658 cmdutil.bailifchanged(repo)
659 659 except error.Abort:
660 660 clearstatus(repo)
661 661 clearcollapsemsg(repo)
662 662 raise
663 663 self.inmemory = False
664 664 self._assignworkingcopy()
665 665 mergemod.update(repo[p1], wc=self.wctx)
666 666 rebasenode(
667 667 repo,
668 668 rev,
669 669 p1,
670 670 p2,
671 671 base,
672 672 self.collapsef,
673 673 wctx=self.wctx,
674 674 )
675 675 if not self.collapsef:
676 676 merging = p2 != nullrev
677 677 editform = cmdutil.mergeeditform(merging, b'rebase')
678 678 editor = cmdutil.getcommiteditor(editform=editform, **opts)
679 679 # We need to set parents again here just in case we're continuing
680 680 # a rebase started with an old hg version (before 9c9cfecd4600),
681 681 # because those old versions would have left us with two dirstate
682 682 # parents, and we don't want to create a merge commit here (unless
683 683 # we're rebasing a merge commit).
684 684 self.wctx.setparents(repo[p1].node(), repo[p2].node())
685 685 newnode = self._concludenode(rev, editor)
686 686 else:
687 687 # Skip commit if we are collapsing
688 688 newnode = None
689 689 # Update the state
690 690 if newnode is not None:
691 691 self.state[rev] = repo[newnode].rev()
692 692 ui.debug(b'rebased as %s\n' % short(newnode))
693 693 if repo[newnode].isempty():
694 694 ui.warn(
695 695 _(
696 696 b'note: created empty successor for %s, its '
697 697 b'destination already has all its changes\n'
698 698 )
699 699 % desc
700 700 )
701 701 else:
702 702 if not self.collapsef:
703 703 ui.warn(
704 704 _(
705 705 b'note: not rebasing %s, its destination already '
706 706 b'has all its changes\n'
707 707 )
708 708 % desc
709 709 )
710 710 self.skipped.add(rev)
711 711 self.state[rev] = p1
712 712 ui.debug(b'next revision set to %d\n' % p1)
713 713 else:
714 714 ui.status(
715 715 _(b'already rebased %s as %s\n') % (desc, repo[self.state[rev]])
716 716 )
717 717 if not tr:
718 718 # When not using single transaction, store state after each
719 719 # commit is completely done. On InterventionRequired, we thus
720 720 # won't store the status. Instead, we'll hit the "len(parents) == 2"
721 721 # case and realize that the commit was in progress.
722 722 self.storestatus()
723 723
724 724 def _finishrebase(self):
725 725 repo, ui, opts = self.repo, self.ui, self.opts
726 726 fm = ui.formatter(b'rebase', pycompat.byteskwargs(opts))
727 727 fm.startitem()
728 728 if self.collapsef:
729 729 p1, p2, _base = defineparents(
730 730 repo,
731 731 min(self.state),
732 732 self.destmap,
733 733 self.state,
734 734 self.skipped,
735 735 self.obsolete_with_successor_in_destination,
736 736 )
737 737 editopt = opts.get('edit')
738 738 editform = b'rebase.collapse'
739 739 if self.collapsemsg:
740 740 commitmsg = self.collapsemsg
741 741 else:
742 742 commitmsg = b'Collapsed revision'
743 743 for rebased in sorted(self.state):
744 744 if rebased not in self.skipped:
745 745 commitmsg += b'\n* %s' % repo[rebased].description()
746 746 editopt = True
747 747 editor = cmdutil.getcommiteditor(edit=editopt, editform=editform)
748 748 revtoreuse = max(self.state)
749 749
750 750 self.wctx.setparents(repo[p1].node(), repo[self.external].node())
751 751 newnode = self._concludenode(
752 752 revtoreuse, editor, commitmsg=commitmsg
753 753 )
754 754
755 755 if newnode is not None:
756 756 newrev = repo[newnode].rev()
757 757 for oldrev in self.state:
758 758 self.state[oldrev] = newrev
759 759
760 760 if b'qtip' in repo.tags():
761 761 updatemq(repo, self.state, self.skipped, **opts)
762 762
763 763 # restore original working directory
764 764 # (we do this before stripping)
765 765 newwd = self.state.get(self.originalwd, self.originalwd)
766 766 if newwd < 0:
767 767 # original directory is a parent of rebase set root or ignored
768 768 newwd = self.originalwd
769 769 if newwd not in [c.rev() for c in repo[None].parents()]:
770 770 ui.note(_(b"update back to initial working directory parent\n"))
771 771 mergemod.update(repo[newwd])
772 772
773 773 collapsedas = None
774 774 if self.collapsef and not self.keepf:
775 775 collapsedas = newnode
776 776 clearrebased(
777 777 ui,
778 778 repo,
779 779 self.destmap,
780 780 self.state,
781 781 self.skipped,
782 782 collapsedas,
783 783 self.keepf,
784 784 fm=fm,
785 785 backup=self.backupf,
786 786 )
787 787
788 788 clearstatus(repo)
789 789 clearcollapsemsg(repo)
790 790
791 791 ui.note(_(b"rebase completed\n"))
792 792 util.unlinkpath(repo.sjoin(b'undo'), ignoremissing=True)
793 793 if self.skipped:
794 794 skippedlen = len(self.skipped)
795 795 ui.note(_(b"%d revisions have been skipped\n") % skippedlen)
796 796 fm.end()
797 797
798 798 if (
799 799 self.activebookmark
800 800 and self.activebookmark in repo._bookmarks
801 801 and repo[b'.'].node() == repo._bookmarks[self.activebookmark]
802 802 ):
803 803 bookmarks.activate(repo, self.activebookmark)
804 804
805 805 def _abort(self, backup=True, suppwarns=False, dryrun=False, confirm=False):
806 806 '''Restore the repository to its original state.'''
807 807
808 808 repo = self.repo
809 809 try:
810 810 # If the first commits in the rebased set get skipped during the
811 811 # rebase, their values within the state mapping will be the dest
812 812 # rev id. The rebased list must must not contain the dest rev
813 813 # (issue4896)
814 814 rebased = [
815 815 s
816 816 for r, s in self.state.items()
817 817 if s >= 0 and s != r and s != self.destmap[r]
818 818 ]
819 819 immutable = [d for d in rebased if not repo[d].mutable()]
820 820 cleanup = True
821 821 if immutable:
822 822 repo.ui.warn(
823 823 _(b"warning: can't clean up public changesets %s\n")
824 824 % b', '.join(bytes(repo[r]) for r in immutable),
825 825 hint=_(b"see 'hg help phases' for details"),
826 826 )
827 827 cleanup = False
828 828
829 829 descendants = set()
830 830 if rebased:
831 831 descendants = set(repo.changelog.descendants(rebased))
832 832 if descendants - set(rebased):
833 833 repo.ui.warn(
834 834 _(
835 835 b"warning: new changesets detected on "
836 836 b"destination branch, can't strip\n"
837 837 )
838 838 )
839 839 cleanup = False
840 840
841 841 if cleanup:
842 842 if rebased:
843 843 strippoints = [
844 844 c.node() for c in repo.set(b'roots(%ld)', rebased)
845 845 ]
846 846
847 847 updateifonnodes = set(rebased)
848 848 updateifonnodes.update(self.destmap.values())
849 849
850 850 if not dryrun and not confirm:
851 851 updateifonnodes.add(self.originalwd)
852 852
853 853 shouldupdate = repo[b'.'].rev() in updateifonnodes
854 854
855 855 # Update away from the rebase if necessary
856 856 if shouldupdate:
857 857 mergemod.clean_update(repo[self.originalwd])
858 858
859 859 # Strip from the first rebased revision
860 860 if rebased:
861 861 repair.strip(repo.ui, repo, strippoints, backup=backup)
862 862
863 863 if self.activebookmark and self.activebookmark in repo._bookmarks:
864 864 bookmarks.activate(repo, self.activebookmark)
865 865
866 866 finally:
867 867 clearstatus(repo)
868 868 clearcollapsemsg(repo)
869 869 if not suppwarns:
870 870 repo.ui.warn(_(b'rebase aborted\n'))
871 871 return 0
872 872
873 873
874 874 @command(
875 875 b'rebase',
876 876 [
877 877 (
878 878 b's',
879 879 b'source',
880 880 [],
881 881 _(b'rebase the specified changesets and their descendants'),
882 882 _(b'REV'),
883 883 ),
884 884 (
885 885 b'b',
886 886 b'base',
887 887 [],
888 888 _(b'rebase everything from branching point of specified changeset'),
889 889 _(b'REV'),
890 890 ),
891 891 (b'r', b'rev', [], _(b'rebase these revisions'), _(b'REV')),
892 892 (
893 893 b'd',
894 894 b'dest',
895 895 b'',
896 896 _(b'rebase onto the specified changeset'),
897 897 _(b'REV'),
898 898 ),
899 899 (b'', b'collapse', False, _(b'collapse the rebased changesets')),
900 900 (
901 901 b'm',
902 902 b'message',
903 903 b'',
904 904 _(b'use text as collapse commit message'),
905 905 _(b'TEXT'),
906 906 ),
907 907 (b'e', b'edit', False, _(b'invoke editor on commit messages')),
908 908 (
909 909 b'l',
910 910 b'logfile',
911 911 b'',
912 912 _(b'read collapse commit message from file'),
913 913 _(b'FILE'),
914 914 ),
915 915 (b'k', b'keep', False, _(b'keep original changesets')),
916 916 (b'', b'keepbranches', False, _(b'keep original branch names')),
917 917 (b'D', b'detach', False, _(b'(DEPRECATED)')),
918 918 (b'i', b'interactive', False, _(b'(DEPRECATED)')),
919 919 (b't', b'tool', b'', _(b'specify merge tool')),
920 920 (b'', b'stop', False, _(b'stop interrupted rebase')),
921 921 (b'c', b'continue', False, _(b'continue an interrupted rebase')),
922 922 (b'a', b'abort', False, _(b'abort an interrupted rebase')),
923 923 (
924 924 b'',
925 925 b'auto-orphans',
926 926 b'',
927 927 _(
928 928 b'automatically rebase orphan revisions '
929 929 b'in the specified revset (EXPERIMENTAL)'
930 930 ),
931 931 ),
932 932 ]
933 933 + cmdutil.dryrunopts
934 934 + cmdutil.formatteropts
935 935 + cmdutil.confirmopts,
936 936 _(b'[[-s REV]... | [-b REV]... | [-r REV]...] [-d REV] [OPTION]...'),
937 937 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
938 938 )
939 939 def rebase(ui, repo, **opts):
940 940 """move changeset (and descendants) to a different branch
941 941
942 942 Rebase uses repeated merging to graft changesets from one part of
943 943 history (the source) onto another (the destination). This can be
944 944 useful for linearizing *local* changes relative to a master
945 945 development tree.
946 946
947 947 Published commits cannot be rebased (see :hg:`help phases`).
948 948 To copy commits, see :hg:`help graft`.
949 949
950 950 If you don't specify a destination changeset (``-d/--dest``), rebase
951 951 will use the same logic as :hg:`merge` to pick a destination. if
952 952 the current branch contains exactly one other head, the other head
953 953 is merged with by default. Otherwise, an explicit revision with
954 954 which to merge with must be provided. (destination changeset is not
955 955 modified by rebasing, but new changesets are added as its
956 956 descendants.)
957 957
958 958 Here are the ways to select changesets:
959 959
960 960 1. Explicitly select them using ``--rev``.
961 961
962 962 2. Use ``--source`` to select a root changeset and include all of its
963 963 descendants.
964 964
965 965 3. Use ``--base`` to select a changeset; rebase will find ancestors
966 966 and their descendants which are not also ancestors of the destination.
967 967
968 968 4. If you do not specify any of ``--rev``, ``--source``, or ``--base``,
969 969 rebase will use ``--base .`` as above.
970 970
971 971 If ``--source`` or ``--rev`` is used, special names ``SRC`` and ``ALLSRC``
972 972 can be used in ``--dest``. Destination would be calculated per source
973 973 revision with ``SRC`` substituted by that single source revision and
974 974 ``ALLSRC`` substituted by all source revisions.
975 975
976 976 Rebase will destroy original changesets unless you use ``--keep``.
977 977 It will also move your bookmarks (even if you do).
978 978
979 979 Some changesets may be dropped if they do not contribute changes
980 980 (e.g. merges from the destination branch).
981 981
982 982 Unlike ``merge``, rebase will do nothing if you are at the branch tip of
983 983 a named branch with two heads. You will need to explicitly specify source
984 984 and/or destination.
985 985
986 986 If you need to use a tool to automate merge/conflict decisions, you
987 987 can specify one with ``--tool``, see :hg:`help merge-tools`.
988 988 As a caveat: the tool will not be used to mediate when a file was
989 989 deleted, there is no hook presently available for this.
990 990
991 991 If a rebase is interrupted to manually resolve a conflict, it can be
992 992 continued with --continue/-c, aborted with --abort/-a, or stopped with
993 993 --stop.
994 994
995 995 .. container:: verbose
996 996
997 997 Examples:
998 998
999 999 - move "local changes" (current commit back to branching point)
1000 1000 to the current branch tip after a pull::
1001 1001
1002 1002 hg rebase
1003 1003
1004 1004 - move a single changeset to the stable branch::
1005 1005
1006 1006 hg rebase -r 5f493448 -d stable
1007 1007
1008 1008 - splice a commit and all its descendants onto another part of history::
1009 1009
1010 1010 hg rebase --source c0c3 --dest 4cf9
1011 1011
1012 1012 - rebase everything on a branch marked by a bookmark onto the
1013 1013 default branch::
1014 1014
1015 1015 hg rebase --base myfeature --dest default
1016 1016
1017 1017 - collapse a sequence of changes into a single commit::
1018 1018
1019 1019 hg rebase --collapse -r 1520:1525 -d .
1020 1020
1021 1021 - move a named branch while preserving its name::
1022 1022
1023 1023 hg rebase -r "branch(featureX)" -d 1.3 --keepbranches
1024 1024
1025 1025 - stabilize orphaned changesets so history looks linear::
1026 1026
1027 1027 hg rebase -r 'orphan()-obsolete()'\
1028 1028 -d 'first(max((successors(max(roots(ALLSRC) & ::SRC)^)-obsolete())::) +\
1029 1029 max(::((roots(ALLSRC) & ::SRC)^)-obsolete()))'
1030 1030
1031 1031 Configuration Options:
1032 1032
1033 1033 You can make rebase require a destination if you set the following config
1034 1034 option::
1035 1035
1036 1036 [commands]
1037 1037 rebase.requiredest = True
1038 1038
1039 1039 By default, rebase will close the transaction after each commit. For
1040 1040 performance purposes, you can configure rebase to use a single transaction
1041 1041 across the entire rebase. WARNING: This setting introduces a significant
1042 1042 risk of losing the work you've done in a rebase if the rebase aborts
1043 1043 unexpectedly::
1044 1044
1045 1045 [rebase]
1046 1046 singletransaction = True
1047 1047
1048 1048 By default, rebase writes to the working copy, but you can configure it to
1049 1049 run in-memory for better performance. When the rebase is not moving the
1050 1050 parent(s) of the working copy (AKA the "currently checked out changesets"),
1051 1051 this may also allow it to run even if the working copy is dirty::
1052 1052
1053 1053 [rebase]
1054 1054 experimental.inmemory = True
1055 1055
1056 1056 Return Values:
1057 1057
1058 1058 Returns 0 on success, 1 if nothing to rebase or there are
1059 1059 unresolved conflicts.
1060 1060
1061 1061 """
1062 1062 inmemory = ui.configbool(b'rebase', b'experimental.inmemory')
1063 1063 action = cmdutil.check_at_most_one_arg(opts, 'abort', 'stop', 'continue')
1064 1064 if action:
1065 1065 cmdutil.check_incompatible_arguments(
1066 1066 opts, action, ['confirm', 'dry_run']
1067 1067 )
1068 1068 cmdutil.check_incompatible_arguments(
1069 1069 opts, action, ['rev', 'source', 'base', 'dest']
1070 1070 )
1071 1071 cmdutil.check_at_most_one_arg(opts, 'confirm', 'dry_run')
1072 1072 cmdutil.check_at_most_one_arg(opts, 'rev', 'source', 'base')
1073 1073
1074 1074 if action or repo.currenttransaction() is not None:
1075 1075 # in-memory rebase is not compatible with resuming rebases.
1076 1076 # (Or if it is run within a transaction, since the restart logic can
1077 1077 # fail the entire transaction.)
1078 1078 inmemory = False
1079 1079
1080 1080 if opts.get('auto_orphans'):
1081 1081 disallowed_opts = set(opts) - {'auto_orphans'}
1082 1082 cmdutil.check_incompatible_arguments(
1083 1083 opts, 'auto_orphans', disallowed_opts
1084 1084 )
1085 1085
1086 1086 userrevs = list(repo.revs(opts.get('auto_orphans')))
1087 1087 opts['rev'] = [revsetlang.formatspec(b'%ld and orphan()', userrevs)]
1088 1088 opts['dest'] = b'_destautoorphanrebase(SRC)'
1089 1089
1090 1090 if opts.get('dry_run') or opts.get('confirm'):
1091 1091 return _dryrunrebase(ui, repo, action, opts)
1092 1092 elif action == 'stop':
1093 1093 rbsrt = rebaseruntime(repo, ui)
1094 1094 with repo.wlock(), repo.lock():
1095 1095 rbsrt.restorestatus()
1096 1096 if rbsrt.collapsef:
1097 1097 raise error.StateError(_(b"cannot stop in --collapse session"))
1098 1098 allowunstable = obsolete.isenabled(repo, obsolete.allowunstableopt)
1099 1099 if not (rbsrt.keepf or allowunstable):
1100 1100 raise error.StateError(
1101 1101 _(
1102 1102 b"cannot remove original changesets with"
1103 1103 b" unrebased descendants"
1104 1104 ),
1105 1105 hint=_(
1106 1106 b'either enable obsmarkers to allow unstable '
1107 1107 b'revisions or use --keep to keep original '
1108 1108 b'changesets'
1109 1109 ),
1110 1110 )
1111 1111 # update to the current working revision
1112 1112 # to clear interrupted merge
1113 1113 mergemod.clean_update(repo[rbsrt.originalwd])
1114 1114 rbsrt._finishrebase()
1115 1115 return 0
1116 1116 elif inmemory:
1117 1117 try:
1118 1118 # in-memory merge doesn't support conflicts, so if we hit any, abort
1119 1119 # and re-run as an on-disk merge.
1120 1120 overrides = {(b'rebase', b'singletransaction'): True}
1121 1121 with ui.configoverride(overrides, b'rebase'):
1122 1122 return _dorebase(ui, repo, action, opts, inmemory=inmemory)
1123 1123 except error.InMemoryMergeConflictsError:
1124 1124 if ui.configbool(b'devel', b'rebase.force-in-memory-merge'):
1125 1125 raise
1126 1126 ui.warn(
1127 1127 _(
1128 1128 b'hit merge conflicts; re-running rebase without in-memory'
1129 1129 b' merge\n'
1130 1130 )
1131 1131 )
1132 1132 clearstatus(repo)
1133 1133 clearcollapsemsg(repo)
1134 1134 return _dorebase(ui, repo, action, opts, inmemory=False)
1135 1135 else:
1136 1136 return _dorebase(ui, repo, action, opts)
1137 1137
1138 1138
1139 1139 def _dryrunrebase(ui, repo, action, opts):
1140 1140 rbsrt = rebaseruntime(repo, ui, inmemory=True, dryrun=True, opts=opts)
1141 1141 confirm = opts.get('confirm')
1142 1142 if confirm:
1143 1143 ui.status(_(b'starting in-memory rebase\n'))
1144 1144 else:
1145 1145 ui.status(
1146 1146 _(b'starting dry-run rebase; repository will not be changed\n')
1147 1147 )
1148 1148 with repo.wlock(), repo.lock():
1149 1149 needsabort = True
1150 1150 try:
1151 1151 overrides = {(b'rebase', b'singletransaction'): True}
1152 1152 with ui.configoverride(overrides, b'rebase'):
1153 1153 res = _origrebase(
1154 1154 ui,
1155 1155 repo,
1156 1156 action,
1157 1157 opts,
1158 1158 rbsrt,
1159 1159 )
1160 1160 if res == _nothingtorebase():
1161 1161 needsabort = False
1162 1162 return res
1163 1163 except error.ConflictResolutionRequired:
1164 1164 ui.status(_(b'hit a merge conflict\n'))
1165 1165 return 1
1166 1166 except error.Abort:
1167 1167 needsabort = False
1168 1168 raise
1169 1169 else:
1170 1170 if confirm:
1171 1171 ui.status(_(b'rebase completed successfully\n'))
1172 1172 if not ui.promptchoice(_(b'apply changes (yn)?$$ &Yes $$ &No')):
1173 1173 # finish unfinished rebase
1174 1174 rbsrt._finishrebase()
1175 1175 else:
1176 1176 rbsrt._prepareabortorcontinue(
1177 1177 isabort=True,
1178 1178 backup=False,
1179 1179 suppwarns=True,
1180 1180 confirm=confirm,
1181 1181 )
1182 1182 needsabort = False
1183 1183 else:
1184 1184 ui.status(
1185 1185 _(
1186 1186 b'dry-run rebase completed successfully; run without'
1187 1187 b' -n/--dry-run to perform this rebase\n'
1188 1188 )
1189 1189 )
1190 1190 return 0
1191 1191 finally:
1192 1192 if needsabort:
1193 1193 # no need to store backup in case of dryrun
1194 1194 rbsrt._prepareabortorcontinue(
1195 1195 isabort=True,
1196 1196 backup=False,
1197 1197 suppwarns=True,
1198 1198 dryrun=opts.get('dry_run'),
1199 1199 )
1200 1200
1201 1201
1202 1202 def _dorebase(ui, repo, action, opts, inmemory=False):
1203 1203 rbsrt = rebaseruntime(repo, ui, inmemory, opts=opts)
1204 1204 return _origrebase(ui, repo, action, opts, rbsrt)
1205 1205
1206 1206
1207 1207 def _origrebase(ui, repo, action, opts, rbsrt):
1208 1208 assert action != 'stop'
1209 1209 with repo.wlock(), repo.lock():
1210 1210 if opts.get('interactive'):
1211 1211 try:
1212 1212 if extensions.find(b'histedit'):
1213 1213 enablehistedit = b''
1214 1214 except KeyError:
1215 1215 enablehistedit = b" --config extensions.histedit="
1216 1216 help = b"hg%s help -e histedit" % enablehistedit
1217 1217 msg = (
1218 1218 _(
1219 1219 b"interactive history editing is supported by the "
1220 1220 b"'histedit' extension (see \"%s\")"
1221 1221 )
1222 1222 % help
1223 1223 )
1224 1224 raise error.InputError(msg)
1225 1225
1226 1226 if rbsrt.collapsemsg and not rbsrt.collapsef:
1227 1227 raise error.InputError(
1228 1228 _(b'message can only be specified with collapse')
1229 1229 )
1230 1230
1231 1231 if action:
1232 1232 if rbsrt.collapsef:
1233 1233 raise error.InputError(
1234 1234 _(b'cannot use collapse with continue or abort')
1235 1235 )
1236 1236 if action == 'abort' and opts.get('tool', False):
1237 1237 ui.warn(_(b'tool option will be ignored\n'))
1238 1238 if action == 'continue':
1239 1239 ms = mergestatemod.mergestate.read(repo)
1240 1240 mergeutil.checkunresolved(ms)
1241 1241
1242 1242 retcode = rbsrt._prepareabortorcontinue(isabort=(action == 'abort'))
1243 1243 if retcode is not None:
1244 1244 return retcode
1245 1245 else:
1246 1246 # search default destination in this space
1247 1247 # used in the 'hg pull --rebase' case, see issue 5214.
1248 1248 destspace = opts.get('_destspace')
1249 1249 destmap = _definedestmap(
1250 1250 ui,
1251 1251 repo,
1252 1252 rbsrt.inmemory,
1253 1253 opts.get('dest', None),
1254 1254 opts.get('source', []),
1255 1255 opts.get('base', []),
1256 1256 opts.get('rev', []),
1257 1257 destspace=destspace,
1258 1258 )
1259 1259 retcode = rbsrt._preparenewrebase(destmap)
1260 1260 if retcode is not None:
1261 1261 return retcode
1262 1262 storecollapsemsg(repo, rbsrt.collapsemsg)
1263 1263
1264 1264 tr = None
1265 1265
1266 1266 singletr = ui.configbool(b'rebase', b'singletransaction')
1267 1267 if singletr:
1268 1268 tr = repo.transaction(b'rebase')
1269 1269
1270 1270 # If `rebase.singletransaction` is enabled, wrap the entire operation in
1271 1271 # one transaction here. Otherwise, transactions are obtained when
1272 1272 # committing each node, which is slower but allows partial success.
1273 1273 with util.acceptintervention(tr):
1274 1274 # Same logic for the dirstate guard, except we don't create one when
1275 1275 # rebasing in-memory (it's not needed).
1276 1276 dsguard = None
1277 1277 if singletr and not rbsrt.inmemory:
1278 1278 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1279 1279 with util.acceptintervention(dsguard):
1280 1280 rbsrt._performrebase(tr)
1281 1281 if not rbsrt.dryrun:
1282 1282 rbsrt._finishrebase()
1283 1283
1284 1284
1285 1285 def _definedestmap(ui, repo, inmemory, destf, srcf, basef, revf, destspace):
1286 1286 """use revisions argument to define destmap {srcrev: destrev}"""
1287 1287 if revf is None:
1288 1288 revf = []
1289 1289
1290 1290 # destspace is here to work around issues with `hg pull --rebase` see
1291 1291 # issue5214 for details
1292 1292
1293 1293 cmdutil.checkunfinished(repo)
1294 1294 if not inmemory:
1295 1295 cmdutil.bailifchanged(repo)
1296 1296
1297 1297 if ui.configbool(b'commands', b'rebase.requiredest') and not destf:
1298 1298 raise error.InputError(
1299 1299 _(b'you must specify a destination'),
1300 1300 hint=_(b'use: hg rebase -d REV'),
1301 1301 )
1302 1302
1303 1303 dest = None
1304 1304
1305 1305 if revf:
1306 1306 rebaseset = logcmdutil.revrange(repo, revf)
1307 1307 if not rebaseset:
1308 1308 ui.status(_(b'empty "rev" revision set - nothing to rebase\n'))
1309 1309 return None
1310 1310 elif srcf:
1311 1311 src = logcmdutil.revrange(repo, srcf)
1312 1312 if not src:
1313 1313 ui.status(_(b'empty "source" revision set - nothing to rebase\n'))
1314 1314 return None
1315 1315 # `+ (%ld)` to work around `wdir()::` being empty
1316 1316 rebaseset = repo.revs(b'(%ld):: + (%ld)', src, src)
1317 1317 else:
1318 1318 base = logcmdutil.revrange(repo, basef or [b'.'])
1319 1319 if not base:
1320 1320 ui.status(
1321 1321 _(b'empty "base" revision set - ' b"can't compute rebase set\n")
1322 1322 )
1323 1323 return None
1324 1324 if destf:
1325 1325 # --base does not support multiple destinations
1326 dest = scmutil.revsingle(repo, destf)
1326 dest = logcmdutil.revsingle(repo, destf)
1327 1327 else:
1328 1328 dest = repo[_destrebase(repo, base, destspace=destspace)]
1329 1329 destf = bytes(dest)
1330 1330
1331 1331 roots = [] # selected children of branching points
1332 1332 bpbase = {} # {branchingpoint: [origbase]}
1333 1333 for b in base: # group bases by branching points
1334 1334 bp = repo.revs(b'ancestor(%d, %d)', b, dest.rev()).first()
1335 1335 bpbase[bp] = bpbase.get(bp, []) + [b]
1336 1336 if None in bpbase:
1337 1337 # emulate the old behavior, showing "nothing to rebase" (a better
1338 1338 # behavior may be abort with "cannot find branching point" error)
1339 1339 bpbase.clear()
1340 1340 for bp, bs in pycompat.iteritems(bpbase): # calculate roots
1341 1341 roots += list(repo.revs(b'children(%d) & ancestors(%ld)', bp, bs))
1342 1342
1343 1343 rebaseset = repo.revs(b'%ld::', roots)
1344 1344
1345 1345 if not rebaseset:
1346 1346 # transform to list because smartsets are not comparable to
1347 1347 # lists. This should be improved to honor laziness of
1348 1348 # smartset.
1349 1349 if list(base) == [dest.rev()]:
1350 1350 if basef:
1351 1351 ui.status(
1352 1352 _(
1353 1353 b'nothing to rebase - %s is both "base"'
1354 1354 b' and destination\n'
1355 1355 )
1356 1356 % dest
1357 1357 )
1358 1358 else:
1359 1359 ui.status(
1360 1360 _(
1361 1361 b'nothing to rebase - working directory '
1362 1362 b'parent is also destination\n'
1363 1363 )
1364 1364 )
1365 1365 elif not repo.revs(b'%ld - ::%d', base, dest.rev()):
1366 1366 if basef:
1367 1367 ui.status(
1368 1368 _(
1369 1369 b'nothing to rebase - "base" %s is '
1370 1370 b'already an ancestor of destination '
1371 1371 b'%s\n'
1372 1372 )
1373 1373 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1374 1374 )
1375 1375 else:
1376 1376 ui.status(
1377 1377 _(
1378 1378 b'nothing to rebase - working '
1379 1379 b'directory parent is already an '
1380 1380 b'ancestor of destination %s\n'
1381 1381 )
1382 1382 % dest
1383 1383 )
1384 1384 else: # can it happen?
1385 1385 ui.status(
1386 1386 _(b'nothing to rebase from %s to %s\n')
1387 1387 % (b'+'.join(bytes(repo[r]) for r in base), dest)
1388 1388 )
1389 1389 return None
1390 1390
1391 1391 if wdirrev in rebaseset:
1392 1392 raise error.InputError(_(b'cannot rebase the working copy'))
1393 1393 rebasingwcp = repo[b'.'].rev() in rebaseset
1394 1394 ui.log(
1395 1395 b"rebase",
1396 1396 b"rebasing working copy parent: %r\n",
1397 1397 rebasingwcp,
1398 1398 rebase_rebasing_wcp=rebasingwcp,
1399 1399 )
1400 1400 if inmemory and rebasingwcp:
1401 1401 # Check these since we did not before.
1402 1402 cmdutil.checkunfinished(repo)
1403 1403 cmdutil.bailifchanged(repo)
1404 1404
1405 1405 if not destf:
1406 1406 dest = repo[_destrebase(repo, rebaseset, destspace=destspace)]
1407 1407 destf = bytes(dest)
1408 1408
1409 1409 allsrc = revsetlang.formatspec(b'%ld', rebaseset)
1410 1410 alias = {b'ALLSRC': allsrc}
1411 1411
1412 1412 if dest is None:
1413 1413 try:
1414 1414 # fast path: try to resolve dest without SRC alias
1415 1415 dest = scmutil.revsingle(repo, destf, localalias=alias)
1416 1416 except error.RepoLookupError:
1417 1417 # multi-dest path: resolve dest for each SRC separately
1418 1418 destmap = {}
1419 1419 for r in rebaseset:
1420 1420 alias[b'SRC'] = revsetlang.formatspec(b'%d', r)
1421 1421 # use repo.anyrevs instead of scmutil.revsingle because we
1422 1422 # don't want to abort if destset is empty.
1423 1423 destset = repo.anyrevs([destf], user=True, localalias=alias)
1424 1424 size = len(destset)
1425 1425 if size == 1:
1426 1426 destmap[r] = destset.first()
1427 1427 elif size == 0:
1428 1428 ui.note(_(b'skipping %s - empty destination\n') % repo[r])
1429 1429 else:
1430 1430 raise error.InputError(
1431 1431 _(b'rebase destination for %s is not unique') % repo[r]
1432 1432 )
1433 1433
1434 1434 if dest is not None:
1435 1435 # single-dest case: assign dest to each rev in rebaseset
1436 1436 destrev = dest.rev()
1437 1437 destmap = {r: destrev for r in rebaseset} # {srcrev: destrev}
1438 1438
1439 1439 if not destmap:
1440 1440 ui.status(_(b'nothing to rebase - empty destination\n'))
1441 1441 return None
1442 1442
1443 1443 return destmap
1444 1444
1445 1445
1446 1446 def externalparent(repo, state, destancestors):
1447 1447 """Return the revision that should be used as the second parent
1448 1448 when the revisions in state is collapsed on top of destancestors.
1449 1449 Abort if there is more than one parent.
1450 1450 """
1451 1451 parents = set()
1452 1452 source = min(state)
1453 1453 for rev in state:
1454 1454 if rev == source:
1455 1455 continue
1456 1456 for p in repo[rev].parents():
1457 1457 if p.rev() not in state and p.rev() not in destancestors:
1458 1458 parents.add(p.rev())
1459 1459 if not parents:
1460 1460 return nullrev
1461 1461 if len(parents) == 1:
1462 1462 return parents.pop()
1463 1463 raise error.StateError(
1464 1464 _(
1465 1465 b'unable to collapse on top of %d, there is more '
1466 1466 b'than one external parent: %s'
1467 1467 )
1468 1468 % (max(destancestors), b', '.join(b"%d" % p for p in sorted(parents)))
1469 1469 )
1470 1470
1471 1471
1472 1472 def commitmemorynode(repo, wctx, editor, extra, user, date, commitmsg):
1473 1473 """Commit the memory changes with parents p1 and p2.
1474 1474 Return node of committed revision."""
1475 1475 # By convention, ``extra['branch']`` (set by extrafn) clobbers
1476 1476 # ``branch`` (used when passing ``--keepbranches``).
1477 1477 branch = None
1478 1478 if b'branch' in extra:
1479 1479 branch = extra[b'branch']
1480 1480
1481 1481 # FIXME: We call _compact() because it's required to correctly detect
1482 1482 # changed files. This was added to fix a regression shortly before the 5.5
1483 1483 # release. A proper fix will be done in the default branch.
1484 1484 wctx._compact()
1485 1485 memctx = wctx.tomemctx(
1486 1486 commitmsg,
1487 1487 date=date,
1488 1488 extra=extra,
1489 1489 user=user,
1490 1490 branch=branch,
1491 1491 editor=editor,
1492 1492 )
1493 1493 if memctx.isempty() and not repo.ui.configbool(b'ui', b'allowemptycommit'):
1494 1494 return None
1495 1495 commitres = repo.commitctx(memctx)
1496 1496 wctx.clean() # Might be reused
1497 1497 return commitres
1498 1498
1499 1499
1500 1500 def commitnode(repo, editor, extra, user, date, commitmsg):
1501 1501 """Commit the wd changes with parents p1 and p2.
1502 1502 Return node of committed revision."""
1503 1503 dsguard = util.nullcontextmanager()
1504 1504 if not repo.ui.configbool(b'rebase', b'singletransaction'):
1505 1505 dsguard = dirstateguard.dirstateguard(repo, b'rebase')
1506 1506 with dsguard:
1507 1507 # Commit might fail if unresolved files exist
1508 1508 newnode = repo.commit(
1509 1509 text=commitmsg, user=user, date=date, extra=extra, editor=editor
1510 1510 )
1511 1511
1512 1512 repo.dirstate.setbranch(repo[newnode].branch())
1513 1513 return newnode
1514 1514
1515 1515
1516 1516 def rebasenode(repo, rev, p1, p2, base, collapse, wctx):
1517 1517 """Rebase a single revision rev on top of p1 using base as merge ancestor"""
1518 1518 # Merge phase
1519 1519 # Update to destination and merge it with local
1520 1520 p1ctx = repo[p1]
1521 1521 if wctx.isinmemory():
1522 1522 wctx.setbase(p1ctx)
1523 1523 else:
1524 1524 if repo[b'.'].rev() != p1:
1525 1525 repo.ui.debug(b" update to %d:%s\n" % (p1, p1ctx))
1526 1526 mergemod.clean_update(p1ctx)
1527 1527 else:
1528 1528 repo.ui.debug(b" already in destination\n")
1529 1529 # This is, alas, necessary to invalidate workingctx's manifest cache,
1530 1530 # as well as other data we litter on it in other places.
1531 1531 wctx = repo[None]
1532 1532 repo.dirstate.write(repo.currenttransaction())
1533 1533 ctx = repo[rev]
1534 1534 repo.ui.debug(b" merge against %d:%s\n" % (rev, ctx))
1535 1535 if base is not None:
1536 1536 repo.ui.debug(b" detach base %d:%s\n" % (base, repo[base]))
1537 1537
1538 1538 # See explanation in merge.graft()
1539 1539 mergeancestor = repo.changelog.isancestor(p1ctx.node(), ctx.node())
1540 1540 stats = mergemod._update(
1541 1541 repo,
1542 1542 rev,
1543 1543 branchmerge=True,
1544 1544 force=True,
1545 1545 ancestor=base,
1546 1546 mergeancestor=mergeancestor,
1547 1547 labels=[b'dest', b'source'],
1548 1548 wc=wctx,
1549 1549 )
1550 1550 wctx.setparents(p1ctx.node(), repo[p2].node())
1551 1551 if collapse:
1552 1552 copies.graftcopies(wctx, ctx, p1ctx)
1553 1553 else:
1554 1554 # If we're not using --collapse, we need to
1555 1555 # duplicate copies between the revision we're
1556 1556 # rebasing and its first parent.
1557 1557 copies.graftcopies(wctx, ctx, ctx.p1())
1558 1558
1559 1559 if stats.unresolvedcount > 0:
1560 1560 if wctx.isinmemory():
1561 1561 raise error.InMemoryMergeConflictsError()
1562 1562 else:
1563 1563 raise error.ConflictResolutionRequired(b'rebase')
1564 1564
1565 1565
1566 1566 def adjustdest(repo, rev, destmap, state, skipped):
1567 1567 r"""adjust rebase destination given the current rebase state
1568 1568
1569 1569 rev is what is being rebased. Return a list of two revs, which are the
1570 1570 adjusted destinations for rev's p1 and p2, respectively. If a parent is
1571 1571 nullrev, return dest without adjustment for it.
1572 1572
1573 1573 For example, when doing rebasing B+E to F, C to G, rebase will first move B
1574 1574 to B1, and E's destination will be adjusted from F to B1.
1575 1575
1576 1576 B1 <- written during rebasing B
1577 1577 |
1578 1578 F <- original destination of B, E
1579 1579 |
1580 1580 | E <- rev, which is being rebased
1581 1581 | |
1582 1582 | D <- prev, one parent of rev being checked
1583 1583 | |
1584 1584 | x <- skipped, ex. no successor or successor in (::dest)
1585 1585 | |
1586 1586 | C <- rebased as C', different destination
1587 1587 | |
1588 1588 | B <- rebased as B1 C'
1589 1589 |/ |
1590 1590 A G <- destination of C, different
1591 1591
1592 1592 Another example about merge changeset, rebase -r C+G+H -d K, rebase will
1593 1593 first move C to C1, G to G1, and when it's checking H, the adjusted
1594 1594 destinations will be [C1, G1].
1595 1595
1596 1596 H C1 G1
1597 1597 /| | /
1598 1598 F G |/
1599 1599 K | | -> K
1600 1600 | C D |
1601 1601 | |/ |
1602 1602 | B | ...
1603 1603 |/ |/
1604 1604 A A
1605 1605
1606 1606 Besides, adjust dest according to existing rebase information. For example,
1607 1607
1608 1608 B C D B needs to be rebased on top of C, C needs to be rebased on top
1609 1609 \|/ of D. We will rebase C first.
1610 1610 A
1611 1611
1612 1612 C' After rebasing C, when considering B's destination, use C'
1613 1613 | instead of the original C.
1614 1614 B D
1615 1615 \ /
1616 1616 A
1617 1617 """
1618 1618 # pick already rebased revs with same dest from state as interesting source
1619 1619 dest = destmap[rev]
1620 1620 source = [
1621 1621 s
1622 1622 for s, d in state.items()
1623 1623 if d > 0 and destmap[s] == dest and s not in skipped
1624 1624 ]
1625 1625
1626 1626 result = []
1627 1627 for prev in repo.changelog.parentrevs(rev):
1628 1628 adjusted = dest
1629 1629 if prev != nullrev:
1630 1630 candidate = repo.revs(b'max(%ld and (::%d))', source, prev).first()
1631 1631 if candidate is not None:
1632 1632 adjusted = state[candidate]
1633 1633 if adjusted == dest and dest in state:
1634 1634 adjusted = state[dest]
1635 1635 if adjusted == revtodo:
1636 1636 # sortsource should produce an order that makes this impossible
1637 1637 raise error.ProgrammingError(
1638 1638 b'rev %d should be rebased already at this time' % dest
1639 1639 )
1640 1640 result.append(adjusted)
1641 1641 return result
1642 1642
1643 1643
1644 1644 def _checkobsrebase(repo, ui, rebaseobsrevs, rebaseobsskipped):
1645 1645 """
1646 1646 Abort if rebase will create divergence or rebase is noop because of markers
1647 1647
1648 1648 `rebaseobsrevs`: set of obsolete revision in source
1649 1649 `rebaseobsskipped`: set of revisions from source skipped because they have
1650 1650 successors in destination or no non-obsolete successor.
1651 1651 """
1652 1652 # Obsolete node with successors not in dest leads to divergence
1653 1653 divergenceok = obsolete.isenabled(repo, obsolete.allowdivergenceopt)
1654 1654 divergencebasecandidates = rebaseobsrevs - rebaseobsskipped
1655 1655
1656 1656 if divergencebasecandidates and not divergenceok:
1657 1657 divhashes = (bytes(repo[r]) for r in divergencebasecandidates)
1658 1658 msg = _(b"this rebase will cause divergences from: %s")
1659 1659 h = _(
1660 1660 b"to force the rebase please set "
1661 1661 b"experimental.evolution.allowdivergence=True"
1662 1662 )
1663 1663 raise error.StateError(msg % (b",".join(divhashes),), hint=h)
1664 1664
1665 1665
1666 1666 def successorrevs(unfi, rev):
1667 1667 """yield revision numbers for successors of rev"""
1668 1668 assert unfi.filtername is None
1669 1669 get_rev = unfi.changelog.index.get_rev
1670 1670 for s in obsutil.allsuccessors(unfi.obsstore, [unfi[rev].node()]):
1671 1671 r = get_rev(s)
1672 1672 if r is not None:
1673 1673 yield r
1674 1674
1675 1675
1676 1676 def defineparents(repo, rev, destmap, state, skipped, obsskipped):
1677 1677 """Return new parents and optionally a merge base for rev being rebased
1678 1678
1679 1679 The destination specified by "dest" cannot always be used directly because
1680 1680 previously rebase result could affect destination. For example,
1681 1681
1682 1682 D E rebase -r C+D+E -d B
1683 1683 |/ C will be rebased to C'
1684 1684 B C D's new destination will be C' instead of B
1685 1685 |/ E's new destination will be C' instead of B
1686 1686 A
1687 1687
1688 1688 The new parents of a merge is slightly more complicated. See the comment
1689 1689 block below.
1690 1690 """
1691 1691 # use unfiltered changelog since successorrevs may return filtered nodes
1692 1692 assert repo.filtername is None
1693 1693 cl = repo.changelog
1694 1694 isancestor = cl.isancestorrev
1695 1695
1696 1696 dest = destmap[rev]
1697 1697 oldps = repo.changelog.parentrevs(rev) # old parents
1698 1698 newps = [nullrev, nullrev] # new parents
1699 1699 dests = adjustdest(repo, rev, destmap, state, skipped)
1700 1700 bases = list(oldps) # merge base candidates, initially just old parents
1701 1701
1702 1702 if all(r == nullrev for r in oldps[1:]):
1703 1703 # For non-merge changeset, just move p to adjusted dest as requested.
1704 1704 newps[0] = dests[0]
1705 1705 else:
1706 1706 # For merge changeset, if we move p to dests[i] unconditionally, both
1707 1707 # parents may change and the end result looks like "the merge loses a
1708 1708 # parent", which is a surprise. This is a limit because "--dest" only
1709 1709 # accepts one dest per src.
1710 1710 #
1711 1711 # Therefore, only move p with reasonable conditions (in this order):
1712 1712 # 1. use dest, if dest is a descendent of (p or one of p's successors)
1713 1713 # 2. use p's rebased result, if p is rebased (state[p] > 0)
1714 1714 #
1715 1715 # Comparing with adjustdest, the logic here does some additional work:
1716 1716 # 1. decide which parents will not be moved towards dest
1717 1717 # 2. if the above decision is "no", should a parent still be moved
1718 1718 # because it was rebased?
1719 1719 #
1720 1720 # For example:
1721 1721 #
1722 1722 # C # "rebase -r C -d D" is an error since none of the parents
1723 1723 # /| # can be moved. "rebase -r B+C -d D" will move C's parent
1724 1724 # A B D # B (using rule "2."), since B will be rebased.
1725 1725 #
1726 1726 # The loop tries to be not rely on the fact that a Mercurial node has
1727 1727 # at most 2 parents.
1728 1728 for i, p in enumerate(oldps):
1729 1729 np = p # new parent
1730 1730 if any(isancestor(x, dests[i]) for x in successorrevs(repo, p)):
1731 1731 np = dests[i]
1732 1732 elif p in state and state[p] > 0:
1733 1733 np = state[p]
1734 1734
1735 1735 # If one parent becomes an ancestor of the other, drop the ancestor
1736 1736 for j, x in enumerate(newps[:i]):
1737 1737 if x == nullrev:
1738 1738 continue
1739 1739 if isancestor(np, x): # CASE-1
1740 1740 np = nullrev
1741 1741 elif isancestor(x, np): # CASE-2
1742 1742 newps[j] = np
1743 1743 np = nullrev
1744 1744 # New parents forming an ancestor relationship does not
1745 1745 # mean the old parents have a similar relationship. Do not
1746 1746 # set bases[x] to nullrev.
1747 1747 bases[j], bases[i] = bases[i], bases[j]
1748 1748
1749 1749 newps[i] = np
1750 1750
1751 1751 # "rebasenode" updates to new p1, and the old p1 will be used as merge
1752 1752 # base. If only p2 changes, merging using unchanged p1 as merge base is
1753 1753 # suboptimal. Therefore swap parents to make the merge sane.
1754 1754 if newps[1] != nullrev and oldps[0] == newps[0]:
1755 1755 assert len(newps) == 2 and len(oldps) == 2
1756 1756 newps.reverse()
1757 1757 bases.reverse()
1758 1758
1759 1759 # No parent change might be an error because we fail to make rev a
1760 1760 # descendent of requested dest. This can happen, for example:
1761 1761 #
1762 1762 # C # rebase -r C -d D
1763 1763 # /| # None of A and B will be changed to D and rebase fails.
1764 1764 # A B D
1765 1765 if set(newps) == set(oldps) and dest not in newps:
1766 1766 raise error.InputError(
1767 1767 _(
1768 1768 b'cannot rebase %d:%s without '
1769 1769 b'moving at least one of its parents'
1770 1770 )
1771 1771 % (rev, repo[rev])
1772 1772 )
1773 1773
1774 1774 # Source should not be ancestor of dest. The check here guarantees it's
1775 1775 # impossible. With multi-dest, the initial check does not cover complex
1776 1776 # cases since we don't have abstractions to dry-run rebase cheaply.
1777 1777 if any(p != nullrev and isancestor(rev, p) for p in newps):
1778 1778 raise error.InputError(_(b'source is ancestor of destination'))
1779 1779
1780 1780 # Check if the merge will contain unwanted changes. That may happen if
1781 1781 # there are multiple special (non-changelog ancestor) merge bases, which
1782 1782 # cannot be handled well by the 3-way merge algorithm. For example:
1783 1783 #
1784 1784 # F
1785 1785 # /|
1786 1786 # D E # "rebase -r D+E+F -d Z", when rebasing F, if "D" was chosen
1787 1787 # | | # as merge base, the difference between D and F will include
1788 1788 # B C # C, so the rebased F will contain C surprisingly. If "E" was
1789 1789 # |/ # chosen, the rebased F will contain B.
1790 1790 # A Z
1791 1791 #
1792 1792 # But our merge base candidates (D and E in above case) could still be
1793 1793 # better than the default (ancestor(F, Z) == null). Therefore still
1794 1794 # pick one (so choose p1 above).
1795 1795 if sum(1 for b in set(bases) if b != nullrev and b not in newps) > 1:
1796 1796 unwanted = [None, None] # unwanted[i]: unwanted revs if choose bases[i]
1797 1797 for i, base in enumerate(bases):
1798 1798 if base == nullrev or base in newps:
1799 1799 continue
1800 1800 # Revisions in the side (not chosen as merge base) branch that
1801 1801 # might contain "surprising" contents
1802 1802 other_bases = set(bases) - {base}
1803 1803 siderevs = list(
1804 1804 repo.revs(b'(%ld %% (%d+%d))', other_bases, base, dest)
1805 1805 )
1806 1806
1807 1807 # If those revisions are covered by rebaseset, the result is good.
1808 1808 # A merge in rebaseset would be considered to cover its ancestors.
1809 1809 if siderevs:
1810 1810 rebaseset = [
1811 1811 r for r, d in state.items() if d > 0 and r not in obsskipped
1812 1812 ]
1813 1813 merges = [
1814 1814 r for r in rebaseset if cl.parentrevs(r)[1] != nullrev
1815 1815 ]
1816 1816 unwanted[i] = list(
1817 1817 repo.revs(
1818 1818 b'%ld - (::%ld) - %ld', siderevs, merges, rebaseset
1819 1819 )
1820 1820 )
1821 1821
1822 1822 if any(revs is not None for revs in unwanted):
1823 1823 # Choose a merge base that has a minimal number of unwanted revs.
1824 1824 l, i = min(
1825 1825 (len(revs), i)
1826 1826 for i, revs in enumerate(unwanted)
1827 1827 if revs is not None
1828 1828 )
1829 1829
1830 1830 # The merge will include unwanted revisions. Abort now. Revisit this if
1831 1831 # we have a more advanced merge algorithm that handles multiple bases.
1832 1832 if l > 0:
1833 1833 unwanteddesc = _(b' or ').join(
1834 1834 (
1835 1835 b', '.join(b'%d:%s' % (r, repo[r]) for r in revs)
1836 1836 for revs in unwanted
1837 1837 if revs is not None
1838 1838 )
1839 1839 )
1840 1840 raise error.InputError(
1841 1841 _(b'rebasing %d:%s will include unwanted changes from %s')
1842 1842 % (rev, repo[rev], unwanteddesc)
1843 1843 )
1844 1844
1845 1845 # newps[0] should match merge base if possible. Currently, if newps[i]
1846 1846 # is nullrev, the only case is newps[i] and newps[j] (j < i), one is
1847 1847 # the other's ancestor. In that case, it's fine to not swap newps here.
1848 1848 # (see CASE-1 and CASE-2 above)
1849 1849 if i != 0:
1850 1850 if newps[i] != nullrev:
1851 1851 newps[0], newps[i] = newps[i], newps[0]
1852 1852 bases[0], bases[i] = bases[i], bases[0]
1853 1853
1854 1854 # "rebasenode" updates to new p1, use the corresponding merge base.
1855 1855 base = bases[0]
1856 1856
1857 1857 repo.ui.debug(b" future parents are %d and %d\n" % tuple(newps))
1858 1858
1859 1859 return newps[0], newps[1], base
1860 1860
1861 1861
1862 1862 def isagitpatch(repo, patchname):
1863 1863 """Return true if the given patch is in git format"""
1864 1864 mqpatch = os.path.join(repo.mq.path, patchname)
1865 1865 for line in patch.linereader(open(mqpatch, b'rb')):
1866 1866 if line.startswith(b'diff --git'):
1867 1867 return True
1868 1868 return False
1869 1869
1870 1870
1871 1871 def updatemq(repo, state, skipped, **opts):
1872 1872 """Update rebased mq patches - finalize and then import them"""
1873 1873 mqrebase = {}
1874 1874 mq = repo.mq
1875 1875 original_series = mq.fullseries[:]
1876 1876 skippedpatches = set()
1877 1877
1878 1878 for p in mq.applied:
1879 1879 rev = repo[p.node].rev()
1880 1880 if rev in state:
1881 1881 repo.ui.debug(
1882 1882 b'revision %d is an mq patch (%s), finalize it.\n'
1883 1883 % (rev, p.name)
1884 1884 )
1885 1885 mqrebase[rev] = (p.name, isagitpatch(repo, p.name))
1886 1886 else:
1887 1887 # Applied but not rebased, not sure this should happen
1888 1888 skippedpatches.add(p.name)
1889 1889
1890 1890 if mqrebase:
1891 1891 mq.finish(repo, mqrebase.keys())
1892 1892
1893 1893 # We must start import from the newest revision
1894 1894 for rev in sorted(mqrebase, reverse=True):
1895 1895 if rev not in skipped:
1896 1896 name, isgit = mqrebase[rev]
1897 1897 repo.ui.note(
1898 1898 _(b'updating mq patch %s to %d:%s\n')
1899 1899 % (name, state[rev], repo[state[rev]])
1900 1900 )
1901 1901 mq.qimport(
1902 1902 repo,
1903 1903 (),
1904 1904 patchname=name,
1905 1905 git=isgit,
1906 1906 rev=[b"%d" % state[rev]],
1907 1907 )
1908 1908 else:
1909 1909 # Rebased and skipped
1910 1910 skippedpatches.add(mqrebase[rev][0])
1911 1911
1912 1912 # Patches were either applied and rebased and imported in
1913 1913 # order, applied and removed or unapplied. Discard the removed
1914 1914 # ones while preserving the original series order and guards.
1915 1915 newseries = [
1916 1916 s
1917 1917 for s in original_series
1918 1918 if mq.guard_re.split(s, 1)[0] not in skippedpatches
1919 1919 ]
1920 1920 mq.fullseries[:] = newseries
1921 1921 mq.seriesdirty = True
1922 1922 mq.savedirty()
1923 1923
1924 1924
1925 1925 def storecollapsemsg(repo, collapsemsg):
1926 1926 """Store the collapse message to allow recovery"""
1927 1927 collapsemsg = collapsemsg or b''
1928 1928 f = repo.vfs(b"last-message.txt", b"w")
1929 1929 f.write(b"%s\n" % collapsemsg)
1930 1930 f.close()
1931 1931
1932 1932
1933 1933 def clearcollapsemsg(repo):
1934 1934 """Remove collapse message file"""
1935 1935 repo.vfs.unlinkpath(b"last-message.txt", ignoremissing=True)
1936 1936
1937 1937
1938 1938 def restorecollapsemsg(repo, isabort):
1939 1939 """Restore previously stored collapse message"""
1940 1940 try:
1941 1941 f = repo.vfs(b"last-message.txt")
1942 1942 collapsemsg = f.readline().strip()
1943 1943 f.close()
1944 1944 except IOError as err:
1945 1945 if err.errno != errno.ENOENT:
1946 1946 raise
1947 1947 if isabort:
1948 1948 # Oh well, just abort like normal
1949 1949 collapsemsg = b''
1950 1950 else:
1951 1951 raise error.Abort(_(b'missing .hg/last-message.txt for rebase'))
1952 1952 return collapsemsg
1953 1953
1954 1954
1955 1955 def clearstatus(repo):
1956 1956 """Remove the status files"""
1957 1957 # Make sure the active transaction won't write the state file
1958 1958 tr = repo.currenttransaction()
1959 1959 if tr:
1960 1960 tr.removefilegenerator(b'rebasestate')
1961 1961 repo.vfs.unlinkpath(b"rebasestate", ignoremissing=True)
1962 1962
1963 1963
1964 1964 def sortsource(destmap):
1965 1965 """yield source revisions in an order that we only rebase things once
1966 1966
1967 1967 If source and destination overlaps, we should filter out revisions
1968 1968 depending on other revisions which hasn't been rebased yet.
1969 1969
1970 1970 Yield a sorted list of revisions each time.
1971 1971
1972 1972 For example, when rebasing A to B, B to C. This function yields [B], then
1973 1973 [A], indicating B needs to be rebased first.
1974 1974
1975 1975 Raise if there is a cycle so the rebase is impossible.
1976 1976 """
1977 1977 srcset = set(destmap)
1978 1978 while srcset:
1979 1979 srclist = sorted(srcset)
1980 1980 result = []
1981 1981 for r in srclist:
1982 1982 if destmap[r] not in srcset:
1983 1983 result.append(r)
1984 1984 if not result:
1985 1985 raise error.InputError(_(b'source and destination form a cycle'))
1986 1986 srcset -= set(result)
1987 1987 yield result
1988 1988
1989 1989
1990 1990 def buildstate(repo, destmap, collapse):
1991 1991 """Define which revisions are going to be rebased and where
1992 1992
1993 1993 repo: repo
1994 1994 destmap: {srcrev: destrev}
1995 1995 """
1996 1996 rebaseset = destmap.keys()
1997 1997 originalwd = repo[b'.'].rev()
1998 1998
1999 1999 # This check isn't strictly necessary, since mq detects commits over an
2000 2000 # applied patch. But it prevents messing up the working directory when
2001 2001 # a partially completed rebase is blocked by mq.
2002 2002 if b'qtip' in repo.tags():
2003 2003 mqapplied = {repo[s.node].rev() for s in repo.mq.applied}
2004 2004 if set(destmap.values()) & mqapplied:
2005 2005 raise error.StateError(_(b'cannot rebase onto an applied mq patch'))
2006 2006
2007 2007 # Get "cycle" error early by exhausting the generator.
2008 2008 sortedsrc = list(sortsource(destmap)) # a list of sorted revs
2009 2009 if not sortedsrc:
2010 2010 raise error.InputError(_(b'no matching revisions'))
2011 2011
2012 2012 # Only check the first batch of revisions to rebase not depending on other
2013 2013 # rebaseset. This means "source is ancestor of destination" for the second
2014 2014 # (and following) batches of revisions are not checked here. We rely on
2015 2015 # "defineparents" to do that check.
2016 2016 roots = list(repo.set(b'roots(%ld)', sortedsrc[0]))
2017 2017 if not roots:
2018 2018 raise error.InputError(_(b'no matching revisions'))
2019 2019
2020 2020 def revof(r):
2021 2021 return r.rev()
2022 2022
2023 2023 roots = sorted(roots, key=revof)
2024 2024 state = dict.fromkeys(rebaseset, revtodo)
2025 2025 emptyrebase = len(sortedsrc) == 1
2026 2026 for root in roots:
2027 2027 dest = repo[destmap[root.rev()]]
2028 2028 commonbase = root.ancestor(dest)
2029 2029 if commonbase == root:
2030 2030 raise error.InputError(_(b'source is ancestor of destination'))
2031 2031 if commonbase == dest:
2032 2032 wctx = repo[None]
2033 2033 if dest == wctx.p1():
2034 2034 # when rebasing to '.', it will use the current wd branch name
2035 2035 samebranch = root.branch() == wctx.branch()
2036 2036 else:
2037 2037 samebranch = root.branch() == dest.branch()
2038 2038 if not collapse and samebranch and dest in root.parents():
2039 2039 # mark the revision as done by setting its new revision
2040 2040 # equal to its old (current) revisions
2041 2041 state[root.rev()] = root.rev()
2042 2042 repo.ui.debug(b'source is a child of destination\n')
2043 2043 continue
2044 2044
2045 2045 emptyrebase = False
2046 2046 repo.ui.debug(b'rebase onto %s starting from %s\n' % (dest, root))
2047 2047 if emptyrebase:
2048 2048 return None
2049 2049 for rev in sorted(state):
2050 2050 parents = [p for p in repo.changelog.parentrevs(rev) if p != nullrev]
2051 2051 # if all parents of this revision are done, then so is this revision
2052 2052 if parents and all((state.get(p) == p for p in parents)):
2053 2053 state[rev] = rev
2054 2054 return originalwd, destmap, state
2055 2055
2056 2056
2057 2057 def clearrebased(
2058 2058 ui,
2059 2059 repo,
2060 2060 destmap,
2061 2061 state,
2062 2062 skipped,
2063 2063 collapsedas=None,
2064 2064 keepf=False,
2065 2065 fm=None,
2066 2066 backup=True,
2067 2067 ):
2068 2068 """dispose of rebased revision at the end of the rebase
2069 2069
2070 2070 If `collapsedas` is not None, the rebase was a collapse whose result if the
2071 2071 `collapsedas` node.
2072 2072
2073 2073 If `keepf` is not True, the rebase has --keep set and no nodes should be
2074 2074 removed (but bookmarks still need to be moved).
2075 2075
2076 2076 If `backup` is False, no backup will be stored when stripping rebased
2077 2077 revisions.
2078 2078 """
2079 2079 tonode = repo.changelog.node
2080 2080 replacements = {}
2081 2081 moves = {}
2082 2082 stripcleanup = not obsolete.isenabled(repo, obsolete.createmarkersopt)
2083 2083
2084 2084 collapsednodes = []
2085 2085 for rev, newrev in sorted(state.items()):
2086 2086 if newrev >= 0 and newrev != rev:
2087 2087 oldnode = tonode(rev)
2088 2088 newnode = collapsedas or tonode(newrev)
2089 2089 moves[oldnode] = newnode
2090 2090 succs = None
2091 2091 if rev in skipped:
2092 2092 if stripcleanup or not repo[rev].obsolete():
2093 2093 succs = ()
2094 2094 elif collapsedas:
2095 2095 collapsednodes.append(oldnode)
2096 2096 else:
2097 2097 succs = (newnode,)
2098 2098 if succs is not None:
2099 2099 replacements[(oldnode,)] = succs
2100 2100 if collapsednodes:
2101 2101 replacements[tuple(collapsednodes)] = (collapsedas,)
2102 2102 if fm:
2103 2103 hf = fm.hexfunc
2104 2104 fl = fm.formatlist
2105 2105 fd = fm.formatdict
2106 2106 changes = {}
2107 2107 for oldns, newn in pycompat.iteritems(replacements):
2108 2108 for oldn in oldns:
2109 2109 changes[hf(oldn)] = fl([hf(n) for n in newn], name=b'node')
2110 2110 nodechanges = fd(changes, key=b"oldnode", value=b"newnodes")
2111 2111 fm.data(nodechanges=nodechanges)
2112 2112 if keepf:
2113 2113 replacements = {}
2114 2114 scmutil.cleanupnodes(repo, replacements, b'rebase', moves, backup=backup)
2115 2115
2116 2116
2117 2117 def pullrebase(orig, ui, repo, *args, **opts):
2118 2118 """Call rebase after pull if the latter has been invoked with --rebase"""
2119 2119 if opts.get('rebase'):
2120 2120 if ui.configbool(b'commands', b'rebase.requiredest'):
2121 2121 msg = _(b'rebase destination required by configuration')
2122 2122 hint = _(b'use hg pull followed by hg rebase -d DEST')
2123 2123 raise error.InputError(msg, hint=hint)
2124 2124
2125 2125 with repo.wlock(), repo.lock():
2126 2126 if opts.get('update'):
2127 2127 del opts['update']
2128 2128 ui.debug(
2129 2129 b'--update and --rebase are not compatible, ignoring '
2130 2130 b'the update flag\n'
2131 2131 )
2132 2132
2133 2133 cmdutil.checkunfinished(repo, skipmerge=True)
2134 2134 cmdutil.bailifchanged(
2135 2135 repo,
2136 2136 hint=_(
2137 2137 b'cannot pull with rebase: '
2138 2138 b'please commit or shelve your changes first'
2139 2139 ),
2140 2140 )
2141 2141
2142 2142 revsprepull = len(repo)
2143 2143 origpostincoming = commands.postincoming
2144 2144
2145 2145 def _dummy(*args, **kwargs):
2146 2146 pass
2147 2147
2148 2148 commands.postincoming = _dummy
2149 2149 try:
2150 2150 ret = orig(ui, repo, *args, **opts)
2151 2151 finally:
2152 2152 commands.postincoming = origpostincoming
2153 2153 revspostpull = len(repo)
2154 2154 if revspostpull > revsprepull:
2155 2155 # --rev option from pull conflict with rebase own --rev
2156 2156 # dropping it
2157 2157 if 'rev' in opts:
2158 2158 del opts['rev']
2159 2159 # positional argument from pull conflicts with rebase's own
2160 2160 # --source.
2161 2161 if 'source' in opts:
2162 2162 del opts['source']
2163 2163 # revsprepull is the len of the repo, not revnum of tip.
2164 2164 destspace = list(repo.changelog.revs(start=revsprepull))
2165 2165 opts['_destspace'] = destspace
2166 2166 try:
2167 2167 rebase(ui, repo, **opts)
2168 2168 except error.NoMergeDestAbort:
2169 2169 # we can maybe update instead
2170 2170 rev, _a, _b = destutil.destupdate(repo)
2171 2171 if rev == repo[b'.'].rev():
2172 2172 ui.status(_(b'nothing to rebase\n'))
2173 2173 else:
2174 2174 ui.status(_(b'nothing to rebase - updating instead\n'))
2175 2175 # not passing argument to get the bare update behavior
2176 2176 # with warning and trumpets
2177 2177 commands.update(ui, repo)
2178 2178 else:
2179 2179 if opts.get('tool'):
2180 2180 raise error.InputError(_(b'--tool can only be used with --rebase'))
2181 2181 ret = orig(ui, repo, *args, **opts)
2182 2182
2183 2183 return ret
2184 2184
2185 2185
2186 2186 def _compute_obsolete_sets(repo, rebaseobsrevs, destmap):
2187 2187 """Figure out what to do about about obsolete revisions
2188 2188
2189 2189 `obsolete_with_successor_in_destination` is a mapping mapping obsolete => successor for all
2190 2190 obsolete nodes to be rebased given in `rebaseobsrevs`.
2191 2191
2192 2192 `obsolete_with_successor_in_rebase_set` is a set with obsolete revisions,
2193 2193 without a successor in destination, that would cause divergence.
2194 2194 """
2195 2195 obsolete_with_successor_in_destination = {}
2196 2196 obsolete_with_successor_in_rebase_set = set()
2197 2197
2198 2198 cl = repo.changelog
2199 2199 get_rev = cl.index.get_rev
2200 2200 extinctrevs = set(repo.revs(b'extinct()'))
2201 2201 for srcrev in rebaseobsrevs:
2202 2202 srcnode = cl.node(srcrev)
2203 2203 # XXX: more advanced APIs are required to handle split correctly
2204 2204 successors = set(obsutil.allsuccessors(repo.obsstore, [srcnode]))
2205 2205 # obsutil.allsuccessors includes node itself
2206 2206 successors.remove(srcnode)
2207 2207 succrevs = {get_rev(s) for s in successors}
2208 2208 succrevs.discard(None)
2209 2209 if not successors or succrevs.issubset(extinctrevs):
2210 2210 # no successor, or all successors are extinct
2211 2211 obsolete_with_successor_in_destination[srcrev] = None
2212 2212 else:
2213 2213 dstrev = destmap[srcrev]
2214 2214 for succrev in succrevs:
2215 2215 if cl.isancestorrev(succrev, dstrev):
2216 2216 obsolete_with_successor_in_destination[srcrev] = succrev
2217 2217 break
2218 2218 else:
2219 2219 # If 'srcrev' has a successor in rebase set but none in
2220 2220 # destination (which would be catched above), we shall skip it
2221 2221 # and its descendants to avoid divergence.
2222 2222 if srcrev in extinctrevs or any(s in destmap for s in succrevs):
2223 2223 obsolete_with_successor_in_rebase_set.add(srcrev)
2224 2224
2225 2225 return (
2226 2226 obsolete_with_successor_in_destination,
2227 2227 obsolete_with_successor_in_rebase_set,
2228 2228 )
2229 2229
2230 2230
2231 2231 def abortrebase(ui, repo):
2232 2232 with repo.wlock(), repo.lock():
2233 2233 rbsrt = rebaseruntime(repo, ui)
2234 2234 rbsrt._prepareabortorcontinue(isabort=True)
2235 2235
2236 2236
2237 2237 def continuerebase(ui, repo):
2238 2238 with repo.wlock(), repo.lock():
2239 2239 rbsrt = rebaseruntime(repo, ui)
2240 2240 ms = mergestatemod.mergestate.read(repo)
2241 2241 mergeutil.checkunresolved(ms)
2242 2242 retcode = rbsrt._prepareabortorcontinue(isabort=False)
2243 2243 if retcode is not None:
2244 2244 return retcode
2245 2245 rbsrt._performrebase(None)
2246 2246 rbsrt._finishrebase()
2247 2247
2248 2248
2249 2249 def summaryhook(ui, repo):
2250 2250 if not repo.vfs.exists(b'rebasestate'):
2251 2251 return
2252 2252 try:
2253 2253 rbsrt = rebaseruntime(repo, ui, {})
2254 2254 rbsrt.restorestatus()
2255 2255 state = rbsrt.state
2256 2256 except error.RepoLookupError:
2257 2257 # i18n: column positioning for "hg summary"
2258 2258 msg = _(b'rebase: (use "hg rebase --abort" to clear broken state)\n')
2259 2259 ui.write(msg)
2260 2260 return
2261 2261 numrebased = len([i for i in pycompat.itervalues(state) if i >= 0])
2262 2262 # i18n: column positioning for "hg summary"
2263 2263 ui.write(
2264 2264 _(b'rebase: %s, %s (rebase --continue)\n')
2265 2265 % (
2266 2266 ui.label(_(b'%d rebased'), b'rebase.rebased') % numrebased,
2267 2267 ui.label(_(b'%d remaining'), b'rebase.remaining')
2268 2268 % (len(state) - numrebased),
2269 2269 )
2270 2270 )
2271 2271
2272 2272
2273 2273 def uisetup(ui):
2274 2274 # Replace pull with a decorator to provide --rebase option
2275 2275 entry = extensions.wrapcommand(commands.table, b'pull', pullrebase)
2276 2276 entry[1].append(
2277 2277 (b'', b'rebase', None, _(b"rebase working directory to branch head"))
2278 2278 )
2279 2279 entry[1].append((b't', b'tool', b'', _(b"specify merge tool for rebase")))
2280 2280 cmdutil.summaryhooks.add(b'rebase', summaryhook)
2281 2281 statemod.addunfinished(
2282 2282 b'rebase',
2283 2283 fname=b'rebasestate',
2284 2284 stopflag=True,
2285 2285 continueflag=True,
2286 2286 abortfunc=abortrebase,
2287 2287 continuefunc=continuerebase,
2288 2288 )
@@ -1,3946 +1,3946 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullrev,
19 19 short,
20 20 )
21 21 from .pycompat import (
22 22 getattr,
23 23 open,
24 24 setattr,
25 25 )
26 26 from .thirdparty import attr
27 27
28 28 from . import (
29 29 bookmarks,
30 30 changelog,
31 31 copies,
32 32 crecord as crecordmod,
33 33 dirstateguard,
34 34 encoding,
35 35 error,
36 36 formatter,
37 37 logcmdutil,
38 38 match as matchmod,
39 39 merge as mergemod,
40 40 mergestate as mergestatemod,
41 41 mergeutil,
42 42 obsolete,
43 43 patch,
44 44 pathutil,
45 45 phases,
46 46 pycompat,
47 47 repair,
48 48 revlog,
49 49 rewriteutil,
50 50 scmutil,
51 51 state as statemod,
52 52 subrepoutil,
53 53 templatekw,
54 54 templater,
55 55 util,
56 56 vfs as vfsmod,
57 57 )
58 58
59 59 from .utils import (
60 60 dateutil,
61 61 stringutil,
62 62 )
63 63
64 64 from .revlogutils import (
65 65 constants as revlog_constants,
66 66 )
67 67
68 68 if pycompat.TYPE_CHECKING:
69 69 from typing import (
70 70 Any,
71 71 Dict,
72 72 )
73 73
74 74 for t in (Any, Dict):
75 75 assert t
76 76
77 77 stringio = util.stringio
78 78
79 79 # templates of common command options
80 80
81 81 dryrunopts = [
82 82 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
83 83 ]
84 84
85 85 confirmopts = [
86 86 (b'', b'confirm', None, _(b'ask before applying actions')),
87 87 ]
88 88
89 89 remoteopts = [
90 90 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
91 91 (
92 92 b'',
93 93 b'remotecmd',
94 94 b'',
95 95 _(b'specify hg command to run on the remote side'),
96 96 _(b'CMD'),
97 97 ),
98 98 (
99 99 b'',
100 100 b'insecure',
101 101 None,
102 102 _(b'do not verify server certificate (ignoring web.cacerts config)'),
103 103 ),
104 104 ]
105 105
106 106 walkopts = [
107 107 (
108 108 b'I',
109 109 b'include',
110 110 [],
111 111 _(b'include names matching the given patterns'),
112 112 _(b'PATTERN'),
113 113 ),
114 114 (
115 115 b'X',
116 116 b'exclude',
117 117 [],
118 118 _(b'exclude names matching the given patterns'),
119 119 _(b'PATTERN'),
120 120 ),
121 121 ]
122 122
123 123 commitopts = [
124 124 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
125 125 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
126 126 ]
127 127
128 128 commitopts2 = [
129 129 (
130 130 b'd',
131 131 b'date',
132 132 b'',
133 133 _(b'record the specified date as commit date'),
134 134 _(b'DATE'),
135 135 ),
136 136 (
137 137 b'u',
138 138 b'user',
139 139 b'',
140 140 _(b'record the specified user as committer'),
141 141 _(b'USER'),
142 142 ),
143 143 ]
144 144
145 145 commitopts3 = [
146 146 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
147 147 (b'U', b'currentuser', None, _(b'record the current user as committer')),
148 148 ]
149 149
150 150 formatteropts = [
151 151 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
152 152 ]
153 153
154 154 templateopts = [
155 155 (
156 156 b'',
157 157 b'style',
158 158 b'',
159 159 _(b'display using template map file (DEPRECATED)'),
160 160 _(b'STYLE'),
161 161 ),
162 162 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
163 163 ]
164 164
165 165 logopts = [
166 166 (b'p', b'patch', None, _(b'show patch')),
167 167 (b'g', b'git', None, _(b'use git extended diff format')),
168 168 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
169 169 (b'M', b'no-merges', None, _(b'do not show merges')),
170 170 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
171 171 (b'G', b'graph', None, _(b"show the revision DAG")),
172 172 ] + templateopts
173 173
174 174 diffopts = [
175 175 (b'a', b'text', None, _(b'treat all files as text')),
176 176 (
177 177 b'g',
178 178 b'git',
179 179 None,
180 180 _(b'use git extended diff format (DEFAULT: diff.git)'),
181 181 ),
182 182 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
183 183 (b'', b'nodates', None, _(b'omit dates from diff headers')),
184 184 ]
185 185
186 186 diffwsopts = [
187 187 (
188 188 b'w',
189 189 b'ignore-all-space',
190 190 None,
191 191 _(b'ignore white space when comparing lines'),
192 192 ),
193 193 (
194 194 b'b',
195 195 b'ignore-space-change',
196 196 None,
197 197 _(b'ignore changes in the amount of white space'),
198 198 ),
199 199 (
200 200 b'B',
201 201 b'ignore-blank-lines',
202 202 None,
203 203 _(b'ignore changes whose lines are all blank'),
204 204 ),
205 205 (
206 206 b'Z',
207 207 b'ignore-space-at-eol',
208 208 None,
209 209 _(b'ignore changes in whitespace at EOL'),
210 210 ),
211 211 ]
212 212
213 213 diffopts2 = (
214 214 [
215 215 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
216 216 (
217 217 b'p',
218 218 b'show-function',
219 219 None,
220 220 _(
221 221 b'show which function each change is in (DEFAULT: diff.showfunc)'
222 222 ),
223 223 ),
224 224 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
225 225 ]
226 226 + diffwsopts
227 227 + [
228 228 (
229 229 b'U',
230 230 b'unified',
231 231 b'',
232 232 _(b'number of lines of context to show'),
233 233 _(b'NUM'),
234 234 ),
235 235 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
236 236 (
237 237 b'',
238 238 b'root',
239 239 b'',
240 240 _(b'produce diffs relative to subdirectory'),
241 241 _(b'DIR'),
242 242 ),
243 243 ]
244 244 )
245 245
246 246 mergetoolopts = [
247 247 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
248 248 ]
249 249
250 250 similarityopts = [
251 251 (
252 252 b's',
253 253 b'similarity',
254 254 b'',
255 255 _(b'guess renamed files by similarity (0<=s<=100)'),
256 256 _(b'SIMILARITY'),
257 257 )
258 258 ]
259 259
260 260 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
261 261
262 262 debugrevlogopts = [
263 263 (b'c', b'changelog', False, _(b'open changelog')),
264 264 (b'm', b'manifest', False, _(b'open manifest')),
265 265 (b'', b'dir', b'', _(b'open directory manifest')),
266 266 ]
267 267
268 268 # special string such that everything below this line will be ingored in the
269 269 # editor text
270 270 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
271 271
272 272
273 273 def check_at_most_one_arg(opts, *args):
274 274 """abort if more than one of the arguments are in opts
275 275
276 276 Returns the unique argument or None if none of them were specified.
277 277 """
278 278
279 279 def to_display(name):
280 280 return pycompat.sysbytes(name).replace(b'_', b'-')
281 281
282 282 previous = None
283 283 for x in args:
284 284 if opts.get(x):
285 285 if previous:
286 286 raise error.InputError(
287 287 _(b'cannot specify both --%s and --%s')
288 288 % (to_display(previous), to_display(x))
289 289 )
290 290 previous = x
291 291 return previous
292 292
293 293
294 294 def check_incompatible_arguments(opts, first, others):
295 295 """abort if the first argument is given along with any of the others
296 296
297 297 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
298 298 among themselves, and they're passed as a single collection.
299 299 """
300 300 for other in others:
301 301 check_at_most_one_arg(opts, first, other)
302 302
303 303
304 304 def resolve_commit_options(ui, opts):
305 305 """modify commit options dict to handle related options
306 306
307 307 The return value indicates that ``rewrite.update-timestamp`` is the reason
308 308 the ``date`` option is set.
309 309 """
310 310 check_at_most_one_arg(opts, 'date', 'currentdate')
311 311 check_at_most_one_arg(opts, 'user', 'currentuser')
312 312
313 313 datemaydiffer = False # date-only change should be ignored?
314 314
315 315 if opts.get('currentdate'):
316 316 opts['date'] = b'%d %d' % dateutil.makedate()
317 317 elif (
318 318 not opts.get('date')
319 319 and ui.configbool(b'rewrite', b'update-timestamp')
320 320 and opts.get('currentdate') is None
321 321 ):
322 322 opts['date'] = b'%d %d' % dateutil.makedate()
323 323 datemaydiffer = True
324 324
325 325 if opts.get('currentuser'):
326 326 opts['user'] = ui.username()
327 327
328 328 return datemaydiffer
329 329
330 330
331 331 def check_note_size(opts):
332 332 """make sure note is of valid format"""
333 333
334 334 note = opts.get('note')
335 335 if not note:
336 336 return
337 337
338 338 if len(note) > 255:
339 339 raise error.InputError(_(b"cannot store a note of more than 255 bytes"))
340 340 if b'\n' in note:
341 341 raise error.InputError(_(b"note cannot contain a newline"))
342 342
343 343
344 344 def ishunk(x):
345 345 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
346 346 return isinstance(x, hunkclasses)
347 347
348 348
349 349 def isheader(x):
350 350 headerclasses = (crecordmod.uiheader, patch.header)
351 351 return isinstance(x, headerclasses)
352 352
353 353
354 354 def newandmodified(chunks):
355 355 newlyaddedandmodifiedfiles = set()
356 356 alsorestore = set()
357 357 for chunk in chunks:
358 358 if isheader(chunk) and chunk.isnewfile():
359 359 newlyaddedandmodifiedfiles.add(chunk.filename())
360 360 alsorestore.update(set(chunk.files()) - {chunk.filename()})
361 361 return newlyaddedandmodifiedfiles, alsorestore
362 362
363 363
364 364 def parsealiases(cmd):
365 365 base_aliases = cmd.split(b"|")
366 366 all_aliases = set(base_aliases)
367 367 extra_aliases = []
368 368 for alias in base_aliases:
369 369 if b'-' in alias:
370 370 folded_alias = alias.replace(b'-', b'')
371 371 if folded_alias not in all_aliases:
372 372 all_aliases.add(folded_alias)
373 373 extra_aliases.append(folded_alias)
374 374 base_aliases.extend(extra_aliases)
375 375 return base_aliases
376 376
377 377
378 378 def setupwrapcolorwrite(ui):
379 379 # wrap ui.write so diff output can be labeled/colorized
380 380 def wrapwrite(orig, *args, **kw):
381 381 label = kw.pop('label', b'')
382 382 for chunk, l in patch.difflabel(lambda: args):
383 383 orig(chunk, label=label + l)
384 384
385 385 oldwrite = ui.write
386 386
387 387 def wrap(*args, **kwargs):
388 388 return wrapwrite(oldwrite, *args, **kwargs)
389 389
390 390 setattr(ui, 'write', wrap)
391 391 return oldwrite
392 392
393 393
394 394 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
395 395 try:
396 396 if usecurses:
397 397 if testfile:
398 398 recordfn = crecordmod.testdecorator(
399 399 testfile, crecordmod.testchunkselector
400 400 )
401 401 else:
402 402 recordfn = crecordmod.chunkselector
403 403
404 404 return crecordmod.filterpatch(
405 405 ui, originalhunks, recordfn, operation
406 406 )
407 407 except crecordmod.fallbackerror as e:
408 408 ui.warn(b'%s\n' % e)
409 409 ui.warn(_(b'falling back to text mode\n'))
410 410
411 411 return patch.filterpatch(ui, originalhunks, match, operation)
412 412
413 413
414 414 def recordfilter(ui, originalhunks, match, operation=None):
415 415 """Prompts the user to filter the originalhunks and return a list of
416 416 selected hunks.
417 417 *operation* is used for to build ui messages to indicate the user what
418 418 kind of filtering they are doing: reverting, committing, shelving, etc.
419 419 (see patch.filterpatch).
420 420 """
421 421 usecurses = crecordmod.checkcurses(ui)
422 422 testfile = ui.config(b'experimental', b'crecordtest')
423 423 oldwrite = setupwrapcolorwrite(ui)
424 424 try:
425 425 newchunks, newopts = filterchunks(
426 426 ui, originalhunks, usecurses, testfile, match, operation
427 427 )
428 428 finally:
429 429 ui.write = oldwrite
430 430 return newchunks, newopts
431 431
432 432
433 433 def dorecord(
434 434 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
435 435 ):
436 436 opts = pycompat.byteskwargs(opts)
437 437 if not ui.interactive():
438 438 if cmdsuggest:
439 439 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
440 440 else:
441 441 msg = _(b'running non-interactively')
442 442 raise error.InputError(msg)
443 443
444 444 # make sure username is set before going interactive
445 445 if not opts.get(b'user'):
446 446 ui.username() # raise exception, username not provided
447 447
448 448 def recordfunc(ui, repo, message, match, opts):
449 449 """This is generic record driver.
450 450
451 451 Its job is to interactively filter local changes, and
452 452 accordingly prepare working directory into a state in which the
453 453 job can be delegated to a non-interactive commit command such as
454 454 'commit' or 'qrefresh'.
455 455
456 456 After the actual job is done by non-interactive command, the
457 457 working directory is restored to its original state.
458 458
459 459 In the end we'll record interesting changes, and everything else
460 460 will be left in place, so the user can continue working.
461 461 """
462 462 if not opts.get(b'interactive-unshelve'):
463 463 checkunfinished(repo, commit=True)
464 464 wctx = repo[None]
465 465 merge = len(wctx.parents()) > 1
466 466 if merge:
467 467 raise error.InputError(
468 468 _(
469 469 b'cannot partially commit a merge '
470 470 b'(use "hg commit" instead)'
471 471 )
472 472 )
473 473
474 474 def fail(f, msg):
475 475 raise error.InputError(b'%s: %s' % (f, msg))
476 476
477 477 force = opts.get(b'force')
478 478 if not force:
479 479 match = matchmod.badmatch(match, fail)
480 480
481 481 status = repo.status(match=match)
482 482
483 483 overrides = {(b'ui', b'commitsubrepos'): True}
484 484
485 485 with repo.ui.configoverride(overrides, b'record'):
486 486 # subrepoutil.precommit() modifies the status
487 487 tmpstatus = scmutil.status(
488 488 copymod.copy(status.modified),
489 489 copymod.copy(status.added),
490 490 copymod.copy(status.removed),
491 491 copymod.copy(status.deleted),
492 492 copymod.copy(status.unknown),
493 493 copymod.copy(status.ignored),
494 494 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
495 495 )
496 496
497 497 # Force allows -X subrepo to skip the subrepo.
498 498 subs, commitsubs, newstate = subrepoutil.precommit(
499 499 repo.ui, wctx, tmpstatus, match, force=True
500 500 )
501 501 for s in subs:
502 502 if s in commitsubs:
503 503 dirtyreason = wctx.sub(s).dirtyreason(True)
504 504 raise error.Abort(dirtyreason)
505 505
506 506 if not force:
507 507 repo.checkcommitpatterns(wctx, match, status, fail)
508 508 diffopts = patch.difffeatureopts(
509 509 ui,
510 510 opts=opts,
511 511 whitespace=True,
512 512 section=b'commands',
513 513 configprefix=b'commit.interactive.',
514 514 )
515 515 diffopts.nodates = True
516 516 diffopts.git = True
517 517 diffopts.showfunc = True
518 518 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
519 519 original_headers = patch.parsepatch(originaldiff)
520 520 match = scmutil.match(repo[None], pats)
521 521
522 522 # 1. filter patch, since we are intending to apply subset of it
523 523 try:
524 524 chunks, newopts = filterfn(ui, original_headers, match)
525 525 except error.PatchError as err:
526 526 raise error.InputError(_(b'error parsing patch: %s') % err)
527 527 opts.update(newopts)
528 528
529 529 # We need to keep a backup of files that have been newly added and
530 530 # modified during the recording process because there is a previous
531 531 # version without the edit in the workdir. We also will need to restore
532 532 # files that were the sources of renames so that the patch application
533 533 # works.
534 534 newlyaddedandmodifiedfiles, alsorestore = newandmodified(chunks)
535 535 contenders = set()
536 536 for h in chunks:
537 537 if isheader(h):
538 538 contenders.update(set(h.files()))
539 539
540 540 changed = status.modified + status.added + status.removed
541 541 newfiles = [f for f in changed if f in contenders]
542 542 if not newfiles:
543 543 ui.status(_(b'no changes to record\n'))
544 544 return 0
545 545
546 546 modified = set(status.modified)
547 547
548 548 # 2. backup changed files, so we can restore them in the end
549 549
550 550 if backupall:
551 551 tobackup = changed
552 552 else:
553 553 tobackup = [
554 554 f
555 555 for f in newfiles
556 556 if f in modified or f in newlyaddedandmodifiedfiles
557 557 ]
558 558 backups = {}
559 559 if tobackup:
560 560 backupdir = repo.vfs.join(b'record-backups')
561 561 try:
562 562 os.mkdir(backupdir)
563 563 except OSError as err:
564 564 if err.errno != errno.EEXIST:
565 565 raise
566 566 try:
567 567 # backup continues
568 568 for f in tobackup:
569 569 fd, tmpname = pycompat.mkstemp(
570 570 prefix=os.path.basename(f) + b'.', dir=backupdir
571 571 )
572 572 os.close(fd)
573 573 ui.debug(b'backup %r as %r\n' % (f, tmpname))
574 574 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
575 575 backups[f] = tmpname
576 576
577 577 fp = stringio()
578 578 for c in chunks:
579 579 fname = c.filename()
580 580 if fname in backups:
581 581 c.write(fp)
582 582 dopatch = fp.tell()
583 583 fp.seek(0)
584 584
585 585 # 2.5 optionally review / modify patch in text editor
586 586 if opts.get(b'review', False):
587 587 patchtext = (
588 588 crecordmod.diffhelptext
589 589 + crecordmod.patchhelptext
590 590 + fp.read()
591 591 )
592 592 reviewedpatch = ui.edit(
593 593 patchtext, b"", action=b"diff", repopath=repo.path
594 594 )
595 595 fp.truncate(0)
596 596 fp.write(reviewedpatch)
597 597 fp.seek(0)
598 598
599 599 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
600 600 # 3a. apply filtered patch to clean repo (clean)
601 601 if backups:
602 602 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
603 603 mergemod.revert_to(repo[b'.'], matcher=m)
604 604
605 605 # 3b. (apply)
606 606 if dopatch:
607 607 try:
608 608 ui.debug(b'applying patch\n')
609 609 ui.debug(fp.getvalue())
610 610 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
611 611 except error.PatchError as err:
612 612 raise error.InputError(pycompat.bytestr(err))
613 613 del fp
614 614
615 615 # 4. We prepared working directory according to filtered
616 616 # patch. Now is the time to delegate the job to
617 617 # commit/qrefresh or the like!
618 618
619 619 # Make all of the pathnames absolute.
620 620 newfiles = [repo.wjoin(nf) for nf in newfiles]
621 621 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
622 622 finally:
623 623 # 5. finally restore backed-up files
624 624 try:
625 625 dirstate = repo.dirstate
626 626 for realname, tmpname in pycompat.iteritems(backups):
627 627 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
628 628
629 629 if dirstate.get_entry(realname).maybe_clean:
630 630 # without normallookup, restoring timestamp
631 631 # may cause partially committed files
632 632 # to be treated as unmodified
633 633
634 634 # XXX-PENDINGCHANGE: We should clarify the context in
635 635 # which this function is called to make sure it
636 636 # already called within a `pendingchange`, However we
637 637 # are taking a shortcut here in order to be able to
638 638 # quickly deprecated the older API.
639 639 with dirstate.parentchange():
640 640 dirstate.update_file(
641 641 realname,
642 642 p1_tracked=True,
643 643 wc_tracked=True,
644 644 possibly_dirty=True,
645 645 )
646 646
647 647 # copystat=True here and above are a hack to trick any
648 648 # editors that have f open that we haven't modified them.
649 649 #
650 650 # Also note that this racy as an editor could notice the
651 651 # file's mtime before we've finished writing it.
652 652 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
653 653 os.unlink(tmpname)
654 654 if tobackup:
655 655 os.rmdir(backupdir)
656 656 except OSError:
657 657 pass
658 658
659 659 def recordinwlock(ui, repo, message, match, opts):
660 660 with repo.wlock():
661 661 return recordfunc(ui, repo, message, match, opts)
662 662
663 663 return commit(ui, repo, recordinwlock, pats, opts)
664 664
665 665
666 666 class dirnode(object):
667 667 """
668 668 Represent a directory in user working copy with information required for
669 669 the purpose of tersing its status.
670 670
671 671 path is the path to the directory, without a trailing '/'
672 672
673 673 statuses is a set of statuses of all files in this directory (this includes
674 674 all the files in all the subdirectories too)
675 675
676 676 files is a list of files which are direct child of this directory
677 677
678 678 subdirs is a dictionary of sub-directory name as the key and it's own
679 679 dirnode object as the value
680 680 """
681 681
682 682 def __init__(self, dirpath):
683 683 self.path = dirpath
684 684 self.statuses = set()
685 685 self.files = []
686 686 self.subdirs = {}
687 687
688 688 def _addfileindir(self, filename, status):
689 689 """Add a file in this directory as a direct child."""
690 690 self.files.append((filename, status))
691 691
692 692 def addfile(self, filename, status):
693 693 """
694 694 Add a file to this directory or to its direct parent directory.
695 695
696 696 If the file is not direct child of this directory, we traverse to the
697 697 directory of which this file is a direct child of and add the file
698 698 there.
699 699 """
700 700
701 701 # the filename contains a path separator, it means it's not the direct
702 702 # child of this directory
703 703 if b'/' in filename:
704 704 subdir, filep = filename.split(b'/', 1)
705 705
706 706 # does the dirnode object for subdir exists
707 707 if subdir not in self.subdirs:
708 708 subdirpath = pathutil.join(self.path, subdir)
709 709 self.subdirs[subdir] = dirnode(subdirpath)
710 710
711 711 # try adding the file in subdir
712 712 self.subdirs[subdir].addfile(filep, status)
713 713
714 714 else:
715 715 self._addfileindir(filename, status)
716 716
717 717 if status not in self.statuses:
718 718 self.statuses.add(status)
719 719
720 720 def iterfilepaths(self):
721 721 """Yield (status, path) for files directly under this directory."""
722 722 for f, st in self.files:
723 723 yield st, pathutil.join(self.path, f)
724 724
725 725 def tersewalk(self, terseargs):
726 726 """
727 727 Yield (status, path) obtained by processing the status of this
728 728 dirnode.
729 729
730 730 terseargs is the string of arguments passed by the user with `--terse`
731 731 flag.
732 732
733 733 Following are the cases which can happen:
734 734
735 735 1) All the files in the directory (including all the files in its
736 736 subdirectories) share the same status and the user has asked us to terse
737 737 that status. -> yield (status, dirpath). dirpath will end in '/'.
738 738
739 739 2) Otherwise, we do following:
740 740
741 741 a) Yield (status, filepath) for all the files which are in this
742 742 directory (only the ones in this directory, not the subdirs)
743 743
744 744 b) Recurse the function on all the subdirectories of this
745 745 directory
746 746 """
747 747
748 748 if len(self.statuses) == 1:
749 749 onlyst = self.statuses.pop()
750 750
751 751 # Making sure we terse only when the status abbreviation is
752 752 # passed as terse argument
753 753 if onlyst in terseargs:
754 754 yield onlyst, self.path + b'/'
755 755 return
756 756
757 757 # add the files to status list
758 758 for st, fpath in self.iterfilepaths():
759 759 yield st, fpath
760 760
761 761 # recurse on the subdirs
762 762 for dirobj in self.subdirs.values():
763 763 for st, fpath in dirobj.tersewalk(terseargs):
764 764 yield st, fpath
765 765
766 766
767 767 def tersedir(statuslist, terseargs):
768 768 """
769 769 Terse the status if all the files in a directory shares the same status.
770 770
771 771 statuslist is scmutil.status() object which contains a list of files for
772 772 each status.
773 773 terseargs is string which is passed by the user as the argument to `--terse`
774 774 flag.
775 775
776 776 The function makes a tree of objects of dirnode class, and at each node it
777 777 stores the information required to know whether we can terse a certain
778 778 directory or not.
779 779 """
780 780 # the order matters here as that is used to produce final list
781 781 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
782 782
783 783 # checking the argument validity
784 784 for s in pycompat.bytestr(terseargs):
785 785 if s not in allst:
786 786 raise error.InputError(_(b"'%s' not recognized") % s)
787 787
788 788 # creating a dirnode object for the root of the repo
789 789 rootobj = dirnode(b'')
790 790 pstatus = (
791 791 b'modified',
792 792 b'added',
793 793 b'deleted',
794 794 b'clean',
795 795 b'unknown',
796 796 b'ignored',
797 797 b'removed',
798 798 )
799 799
800 800 tersedict = {}
801 801 for attrname in pstatus:
802 802 statuschar = attrname[0:1]
803 803 for f in getattr(statuslist, attrname):
804 804 rootobj.addfile(f, statuschar)
805 805 tersedict[statuschar] = []
806 806
807 807 # we won't be tersing the root dir, so add files in it
808 808 for st, fpath in rootobj.iterfilepaths():
809 809 tersedict[st].append(fpath)
810 810
811 811 # process each sub-directory and build tersedict
812 812 for subdir in rootobj.subdirs.values():
813 813 for st, f in subdir.tersewalk(terseargs):
814 814 tersedict[st].append(f)
815 815
816 816 tersedlist = []
817 817 for st in allst:
818 818 tersedict[st].sort()
819 819 tersedlist.append(tersedict[st])
820 820
821 821 return scmutil.status(*tersedlist)
822 822
823 823
824 824 def _commentlines(raw):
825 825 '''Surround lineswith a comment char and a new line'''
826 826 lines = raw.splitlines()
827 827 commentedlines = [b'# %s' % line for line in lines]
828 828 return b'\n'.join(commentedlines) + b'\n'
829 829
830 830
831 831 @attr.s(frozen=True)
832 832 class morestatus(object):
833 833 reporoot = attr.ib()
834 834 unfinishedop = attr.ib()
835 835 unfinishedmsg = attr.ib()
836 836 activemerge = attr.ib()
837 837 unresolvedpaths = attr.ib()
838 838 _formattedpaths = attr.ib(init=False, default=set())
839 839 _label = b'status.morestatus'
840 840
841 841 def formatfile(self, path, fm):
842 842 self._formattedpaths.add(path)
843 843 if self.activemerge and path in self.unresolvedpaths:
844 844 fm.data(unresolved=True)
845 845
846 846 def formatfooter(self, fm):
847 847 if self.unfinishedop or self.unfinishedmsg:
848 848 fm.startitem()
849 849 fm.data(itemtype=b'morestatus')
850 850
851 851 if self.unfinishedop:
852 852 fm.data(unfinished=self.unfinishedop)
853 853 statemsg = (
854 854 _(b'The repository is in an unfinished *%s* state.')
855 855 % self.unfinishedop
856 856 )
857 857 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
858 858 if self.unfinishedmsg:
859 859 fm.data(unfinishedmsg=self.unfinishedmsg)
860 860
861 861 # May also start new data items.
862 862 self._formatconflicts(fm)
863 863
864 864 if self.unfinishedmsg:
865 865 fm.plain(
866 866 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
867 867 )
868 868
869 869 def _formatconflicts(self, fm):
870 870 if not self.activemerge:
871 871 return
872 872
873 873 if self.unresolvedpaths:
874 874 mergeliststr = b'\n'.join(
875 875 [
876 876 b' %s'
877 877 % util.pathto(self.reporoot, encoding.getcwd(), path)
878 878 for path in self.unresolvedpaths
879 879 ]
880 880 )
881 881 msg = (
882 882 _(
883 883 b'''Unresolved merge conflicts:
884 884
885 885 %s
886 886
887 887 To mark files as resolved: hg resolve --mark FILE'''
888 888 )
889 889 % mergeliststr
890 890 )
891 891
892 892 # If any paths with unresolved conflicts were not previously
893 893 # formatted, output them now.
894 894 for f in self.unresolvedpaths:
895 895 if f in self._formattedpaths:
896 896 # Already output.
897 897 continue
898 898 fm.startitem()
899 899 # We can't claim to know the status of the file - it may just
900 900 # have been in one of the states that were not requested for
901 901 # display, so it could be anything.
902 902 fm.data(itemtype=b'file', path=f, unresolved=True)
903 903
904 904 else:
905 905 msg = _(b'No unresolved merge conflicts.')
906 906
907 907 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
908 908
909 909
910 910 def readmorestatus(repo):
911 911 """Returns a morestatus object if the repo has unfinished state."""
912 912 statetuple = statemod.getrepostate(repo)
913 913 mergestate = mergestatemod.mergestate.read(repo)
914 914 activemerge = mergestate.active()
915 915 if not statetuple and not activemerge:
916 916 return None
917 917
918 918 unfinishedop = unfinishedmsg = unresolved = None
919 919 if statetuple:
920 920 unfinishedop, unfinishedmsg = statetuple
921 921 if activemerge:
922 922 unresolved = sorted(mergestate.unresolved())
923 923 return morestatus(
924 924 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
925 925 )
926 926
927 927
928 928 def findpossible(cmd, table, strict=False):
929 929 """
930 930 Return cmd -> (aliases, command table entry)
931 931 for each matching command.
932 932 Return debug commands (or their aliases) only if no normal command matches.
933 933 """
934 934 choice = {}
935 935 debugchoice = {}
936 936
937 937 if cmd in table:
938 938 # short-circuit exact matches, "log" alias beats "log|history"
939 939 keys = [cmd]
940 940 else:
941 941 keys = table.keys()
942 942
943 943 allcmds = []
944 944 for e in keys:
945 945 aliases = parsealiases(e)
946 946 allcmds.extend(aliases)
947 947 found = None
948 948 if cmd in aliases:
949 949 found = cmd
950 950 elif not strict:
951 951 for a in aliases:
952 952 if a.startswith(cmd):
953 953 found = a
954 954 break
955 955 if found is not None:
956 956 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
957 957 debugchoice[found] = (aliases, table[e])
958 958 else:
959 959 choice[found] = (aliases, table[e])
960 960
961 961 if not choice and debugchoice:
962 962 choice = debugchoice
963 963
964 964 return choice, allcmds
965 965
966 966
967 967 def findcmd(cmd, table, strict=True):
968 968 """Return (aliases, command table entry) for command string."""
969 969 choice, allcmds = findpossible(cmd, table, strict)
970 970
971 971 if cmd in choice:
972 972 return choice[cmd]
973 973
974 974 if len(choice) > 1:
975 975 clist = sorted(choice)
976 976 raise error.AmbiguousCommand(cmd, clist)
977 977
978 978 if choice:
979 979 return list(choice.values())[0]
980 980
981 981 raise error.UnknownCommand(cmd, allcmds)
982 982
983 983
984 984 def changebranch(ui, repo, revs, label, opts):
985 985 """Change the branch name of given revs to label"""
986 986
987 987 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
988 988 # abort in case of uncommitted merge or dirty wdir
989 989 bailifchanged(repo)
990 990 revs = logcmdutil.revrange(repo, revs)
991 991 if not revs:
992 992 raise error.InputError(b"empty revision set")
993 993 roots = repo.revs(b'roots(%ld)', revs)
994 994 if len(roots) > 1:
995 995 raise error.InputError(
996 996 _(b"cannot change branch of non-linear revisions")
997 997 )
998 998 rewriteutil.precheck(repo, revs, b'change branch of')
999 999
1000 1000 root = repo[roots.first()]
1001 1001 rpb = {parent.branch() for parent in root.parents()}
1002 1002 if (
1003 1003 not opts.get(b'force')
1004 1004 and label not in rpb
1005 1005 and label in repo.branchmap()
1006 1006 ):
1007 1007 raise error.InputError(
1008 1008 _(b"a branch of the same name already exists")
1009 1009 )
1010 1010
1011 1011 # make sure only topological heads
1012 1012 if repo.revs(b'heads(%ld) - head()', revs):
1013 1013 raise error.InputError(
1014 1014 _(b"cannot change branch in middle of a stack")
1015 1015 )
1016 1016
1017 1017 replacements = {}
1018 1018 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1019 1019 # mercurial.subrepo -> mercurial.cmdutil
1020 1020 from . import context
1021 1021
1022 1022 for rev in revs:
1023 1023 ctx = repo[rev]
1024 1024 oldbranch = ctx.branch()
1025 1025 # check if ctx has same branch
1026 1026 if oldbranch == label:
1027 1027 continue
1028 1028
1029 1029 def filectxfn(repo, newctx, path):
1030 1030 try:
1031 1031 return ctx[path]
1032 1032 except error.ManifestLookupError:
1033 1033 return None
1034 1034
1035 1035 ui.debug(
1036 1036 b"changing branch of '%s' from '%s' to '%s'\n"
1037 1037 % (hex(ctx.node()), oldbranch, label)
1038 1038 )
1039 1039 extra = ctx.extra()
1040 1040 extra[b'branch_change'] = hex(ctx.node())
1041 1041 # While changing branch of set of linear commits, make sure that
1042 1042 # we base our commits on new parent rather than old parent which
1043 1043 # was obsoleted while changing the branch
1044 1044 p1 = ctx.p1().node()
1045 1045 p2 = ctx.p2().node()
1046 1046 if p1 in replacements:
1047 1047 p1 = replacements[p1][0]
1048 1048 if p2 in replacements:
1049 1049 p2 = replacements[p2][0]
1050 1050
1051 1051 mc = context.memctx(
1052 1052 repo,
1053 1053 (p1, p2),
1054 1054 ctx.description(),
1055 1055 ctx.files(),
1056 1056 filectxfn,
1057 1057 user=ctx.user(),
1058 1058 date=ctx.date(),
1059 1059 extra=extra,
1060 1060 branch=label,
1061 1061 )
1062 1062
1063 1063 newnode = repo.commitctx(mc)
1064 1064 replacements[ctx.node()] = (newnode,)
1065 1065 ui.debug(b'new node id is %s\n' % hex(newnode))
1066 1066
1067 1067 # create obsmarkers and move bookmarks
1068 1068 scmutil.cleanupnodes(
1069 1069 repo, replacements, b'branch-change', fixphase=True
1070 1070 )
1071 1071
1072 1072 # move the working copy too
1073 1073 wctx = repo[None]
1074 1074 # in-progress merge is a bit too complex for now.
1075 1075 if len(wctx.parents()) == 1:
1076 1076 newid = replacements.get(wctx.p1().node())
1077 1077 if newid is not None:
1078 1078 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1079 1079 # mercurial.cmdutil
1080 1080 from . import hg
1081 1081
1082 1082 hg.update(repo, newid[0], quietempty=True)
1083 1083
1084 1084 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1085 1085
1086 1086
1087 1087 def findrepo(p):
1088 1088 while not os.path.isdir(os.path.join(p, b".hg")):
1089 1089 oldp, p = p, os.path.dirname(p)
1090 1090 if p == oldp:
1091 1091 return None
1092 1092
1093 1093 return p
1094 1094
1095 1095
1096 1096 def bailifchanged(repo, merge=True, hint=None):
1097 1097 """enforce the precondition that working directory must be clean.
1098 1098
1099 1099 'merge' can be set to false if a pending uncommitted merge should be
1100 1100 ignored (such as when 'update --check' runs).
1101 1101
1102 1102 'hint' is the usual hint given to Abort exception.
1103 1103 """
1104 1104
1105 1105 if merge and repo.dirstate.p2() != repo.nullid:
1106 1106 raise error.StateError(_(b'outstanding uncommitted merge'), hint=hint)
1107 1107 st = repo.status()
1108 1108 if st.modified or st.added or st.removed or st.deleted:
1109 1109 raise error.StateError(_(b'uncommitted changes'), hint=hint)
1110 1110 ctx = repo[None]
1111 1111 for s in sorted(ctx.substate):
1112 1112 ctx.sub(s).bailifchanged(hint=hint)
1113 1113
1114 1114
1115 1115 def logmessage(ui, opts):
1116 1116 """get the log message according to -m and -l option"""
1117 1117
1118 1118 check_at_most_one_arg(opts, b'message', b'logfile')
1119 1119
1120 1120 message = opts.get(b'message')
1121 1121 logfile = opts.get(b'logfile')
1122 1122
1123 1123 if not message and logfile:
1124 1124 try:
1125 1125 if isstdiofilename(logfile):
1126 1126 message = ui.fin.read()
1127 1127 else:
1128 1128 message = b'\n'.join(util.readfile(logfile).splitlines())
1129 1129 except IOError as inst:
1130 1130 raise error.Abort(
1131 1131 _(b"can't read commit message '%s': %s")
1132 1132 % (logfile, encoding.strtolocal(inst.strerror))
1133 1133 )
1134 1134 return message
1135 1135
1136 1136
1137 1137 def mergeeditform(ctxorbool, baseformname):
1138 1138 """return appropriate editform name (referencing a committemplate)
1139 1139
1140 1140 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1141 1141 merging is committed.
1142 1142
1143 1143 This returns baseformname with '.merge' appended if it is a merge,
1144 1144 otherwise '.normal' is appended.
1145 1145 """
1146 1146 if isinstance(ctxorbool, bool):
1147 1147 if ctxorbool:
1148 1148 return baseformname + b".merge"
1149 1149 elif len(ctxorbool.parents()) > 1:
1150 1150 return baseformname + b".merge"
1151 1151
1152 1152 return baseformname + b".normal"
1153 1153
1154 1154
1155 1155 def getcommiteditor(
1156 1156 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1157 1157 ):
1158 1158 """get appropriate commit message editor according to '--edit' option
1159 1159
1160 1160 'finishdesc' is a function to be called with edited commit message
1161 1161 (= 'description' of the new changeset) just after editing, but
1162 1162 before checking empty-ness. It should return actual text to be
1163 1163 stored into history. This allows to change description before
1164 1164 storing.
1165 1165
1166 1166 'extramsg' is a extra message to be shown in the editor instead of
1167 1167 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1168 1168 is automatically added.
1169 1169
1170 1170 'editform' is a dot-separated list of names, to distinguish
1171 1171 the purpose of commit text editing.
1172 1172
1173 1173 'getcommiteditor' returns 'commitforceeditor' regardless of
1174 1174 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1175 1175 they are specific for usage in MQ.
1176 1176 """
1177 1177 if edit or finishdesc or extramsg:
1178 1178 return lambda r, c, s: commitforceeditor(
1179 1179 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1180 1180 )
1181 1181 elif editform:
1182 1182 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1183 1183 else:
1184 1184 return commiteditor
1185 1185
1186 1186
1187 1187 def _escapecommandtemplate(tmpl):
1188 1188 parts = []
1189 1189 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1190 1190 if typ == b'string':
1191 1191 parts.append(stringutil.escapestr(tmpl[start:end]))
1192 1192 else:
1193 1193 parts.append(tmpl[start:end])
1194 1194 return b''.join(parts)
1195 1195
1196 1196
1197 1197 def rendercommandtemplate(ui, tmpl, props):
1198 1198 r"""Expand a literal template 'tmpl' in a way suitable for command line
1199 1199
1200 1200 '\' in outermost string is not taken as an escape character because it
1201 1201 is a directory separator on Windows.
1202 1202
1203 1203 >>> from . import ui as uimod
1204 1204 >>> ui = uimod.ui()
1205 1205 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1206 1206 'c:\\foo'
1207 1207 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1208 1208 'c:{path}'
1209 1209 """
1210 1210 if not tmpl:
1211 1211 return tmpl
1212 1212 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1213 1213 return t.renderdefault(props)
1214 1214
1215 1215
1216 1216 def rendertemplate(ctx, tmpl, props=None):
1217 1217 """Expand a literal template 'tmpl' byte-string against one changeset
1218 1218
1219 1219 Each props item must be a stringify-able value or a callable returning
1220 1220 such value, i.e. no bare list nor dict should be passed.
1221 1221 """
1222 1222 repo = ctx.repo()
1223 1223 tres = formatter.templateresources(repo.ui, repo)
1224 1224 t = formatter.maketemplater(
1225 1225 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1226 1226 )
1227 1227 mapping = {b'ctx': ctx}
1228 1228 if props:
1229 1229 mapping.update(props)
1230 1230 return t.renderdefault(mapping)
1231 1231
1232 1232
1233 1233 def format_changeset_summary(ui, ctx, command=None, default_spec=None):
1234 1234 """Format a changeset summary (one line)."""
1235 1235 spec = None
1236 1236 if command:
1237 1237 spec = ui.config(
1238 1238 b'command-templates', b'oneline-summary.%s' % command, None
1239 1239 )
1240 1240 if not spec:
1241 1241 spec = ui.config(b'command-templates', b'oneline-summary')
1242 1242 if not spec:
1243 1243 spec = default_spec
1244 1244 if not spec:
1245 1245 spec = (
1246 1246 b'{separate(" ", '
1247 1247 b'label("oneline-summary.changeset", "{rev}:{node|short}")'
1248 1248 b', '
1249 1249 b'join(filter(namespaces % "{ifeq(namespace, "branches", "", join(names % "{label("oneline-summary.{namespace}", name)}", " "))}"), " ")'
1250 1250 b')} '
1251 1251 b'"{label("oneline-summary.desc", desc|firstline)}"'
1252 1252 )
1253 1253 text = rendertemplate(ctx, spec)
1254 1254 return text.split(b'\n')[0]
1255 1255
1256 1256
1257 1257 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1258 1258 r"""Convert old-style filename format string to template string
1259 1259
1260 1260 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1261 1261 'foo-{reporoot|basename}-{seqno}.patch'
1262 1262 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1263 1263 '{rev}{tags % "{tag}"}{node}'
1264 1264
1265 1265 '\' in outermost strings has to be escaped because it is a directory
1266 1266 separator on Windows:
1267 1267
1268 1268 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1269 1269 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1270 1270 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1271 1271 '\\\\\\\\foo\\\\bar.patch'
1272 1272 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1273 1273 '\\\\{tags % "{tag}"}'
1274 1274
1275 1275 but inner strings follow the template rules (i.e. '\' is taken as an
1276 1276 escape character):
1277 1277
1278 1278 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1279 1279 '{"c:\\tmp"}'
1280 1280 """
1281 1281 expander = {
1282 1282 b'H': b'{node}',
1283 1283 b'R': b'{rev}',
1284 1284 b'h': b'{node|short}',
1285 1285 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1286 1286 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1287 1287 b'%': b'%',
1288 1288 b'b': b'{reporoot|basename}',
1289 1289 }
1290 1290 if total is not None:
1291 1291 expander[b'N'] = b'{total}'
1292 1292 if seqno is not None:
1293 1293 expander[b'n'] = b'{seqno}'
1294 1294 if total is not None and seqno is not None:
1295 1295 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1296 1296 if pathname is not None:
1297 1297 expander[b's'] = b'{pathname|basename}'
1298 1298 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1299 1299 expander[b'p'] = b'{pathname}'
1300 1300
1301 1301 newname = []
1302 1302 for typ, start, end in templater.scantemplate(pat, raw=True):
1303 1303 if typ != b'string':
1304 1304 newname.append(pat[start:end])
1305 1305 continue
1306 1306 i = start
1307 1307 while i < end:
1308 1308 n = pat.find(b'%', i, end)
1309 1309 if n < 0:
1310 1310 newname.append(stringutil.escapestr(pat[i:end]))
1311 1311 break
1312 1312 newname.append(stringutil.escapestr(pat[i:n]))
1313 1313 if n + 2 > end:
1314 1314 raise error.Abort(
1315 1315 _(b"incomplete format spec in output filename")
1316 1316 )
1317 1317 c = pat[n + 1 : n + 2]
1318 1318 i = n + 2
1319 1319 try:
1320 1320 newname.append(expander[c])
1321 1321 except KeyError:
1322 1322 raise error.Abort(
1323 1323 _(b"invalid format spec '%%%s' in output filename") % c
1324 1324 )
1325 1325 return b''.join(newname)
1326 1326
1327 1327
1328 1328 def makefilename(ctx, pat, **props):
1329 1329 if not pat:
1330 1330 return pat
1331 1331 tmpl = _buildfntemplate(pat, **props)
1332 1332 # BUG: alias expansion shouldn't be made against template fragments
1333 1333 # rewritten from %-format strings, but we have no easy way to partially
1334 1334 # disable the expansion.
1335 1335 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1336 1336
1337 1337
1338 1338 def isstdiofilename(pat):
1339 1339 """True if the given pat looks like a filename denoting stdin/stdout"""
1340 1340 return not pat or pat == b'-'
1341 1341
1342 1342
1343 1343 class _unclosablefile(object):
1344 1344 def __init__(self, fp):
1345 1345 self._fp = fp
1346 1346
1347 1347 def close(self):
1348 1348 pass
1349 1349
1350 1350 def __iter__(self):
1351 1351 return iter(self._fp)
1352 1352
1353 1353 def __getattr__(self, attr):
1354 1354 return getattr(self._fp, attr)
1355 1355
1356 1356 def __enter__(self):
1357 1357 return self
1358 1358
1359 1359 def __exit__(self, exc_type, exc_value, exc_tb):
1360 1360 pass
1361 1361
1362 1362
1363 1363 def makefileobj(ctx, pat, mode=b'wb', **props):
1364 1364 writable = mode not in (b'r', b'rb')
1365 1365
1366 1366 if isstdiofilename(pat):
1367 1367 repo = ctx.repo()
1368 1368 if writable:
1369 1369 fp = repo.ui.fout
1370 1370 else:
1371 1371 fp = repo.ui.fin
1372 1372 return _unclosablefile(fp)
1373 1373 fn = makefilename(ctx, pat, **props)
1374 1374 return open(fn, mode)
1375 1375
1376 1376
1377 1377 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1378 1378 """opens the changelog, manifest, a filelog or a given revlog"""
1379 1379 cl = opts[b'changelog']
1380 1380 mf = opts[b'manifest']
1381 1381 dir = opts[b'dir']
1382 1382 msg = None
1383 1383 if cl and mf:
1384 1384 msg = _(b'cannot specify --changelog and --manifest at the same time')
1385 1385 elif cl and dir:
1386 1386 msg = _(b'cannot specify --changelog and --dir at the same time')
1387 1387 elif cl or mf or dir:
1388 1388 if file_:
1389 1389 msg = _(b'cannot specify filename with --changelog or --manifest')
1390 1390 elif not repo:
1391 1391 msg = _(
1392 1392 b'cannot specify --changelog or --manifest or --dir '
1393 1393 b'without a repository'
1394 1394 )
1395 1395 if msg:
1396 1396 raise error.InputError(msg)
1397 1397
1398 1398 r = None
1399 1399 if repo:
1400 1400 if cl:
1401 1401 r = repo.unfiltered().changelog
1402 1402 elif dir:
1403 1403 if not scmutil.istreemanifest(repo):
1404 1404 raise error.InputError(
1405 1405 _(
1406 1406 b"--dir can only be used on repos with "
1407 1407 b"treemanifest enabled"
1408 1408 )
1409 1409 )
1410 1410 if not dir.endswith(b'/'):
1411 1411 dir = dir + b'/'
1412 1412 dirlog = repo.manifestlog.getstorage(dir)
1413 1413 if len(dirlog):
1414 1414 r = dirlog
1415 1415 elif mf:
1416 1416 r = repo.manifestlog.getstorage(b'')
1417 1417 elif file_:
1418 1418 filelog = repo.file(file_)
1419 1419 if len(filelog):
1420 1420 r = filelog
1421 1421
1422 1422 # Not all storage may be revlogs. If requested, try to return an actual
1423 1423 # revlog instance.
1424 1424 if returnrevlog:
1425 1425 if isinstance(r, revlog.revlog):
1426 1426 pass
1427 1427 elif util.safehasattr(r, b'_revlog'):
1428 1428 r = r._revlog # pytype: disable=attribute-error
1429 1429 elif r is not None:
1430 1430 raise error.InputError(
1431 1431 _(b'%r does not appear to be a revlog') % r
1432 1432 )
1433 1433
1434 1434 if not r:
1435 1435 if not returnrevlog:
1436 1436 raise error.InputError(_(b'cannot give path to non-revlog'))
1437 1437
1438 1438 if not file_:
1439 1439 raise error.CommandError(cmd, _(b'invalid arguments'))
1440 1440 if not os.path.isfile(file_):
1441 1441 raise error.InputError(_(b"revlog '%s' not found") % file_)
1442 1442
1443 1443 target = (revlog_constants.KIND_OTHER, b'free-form:%s' % file_)
1444 1444 r = revlog.revlog(
1445 1445 vfsmod.vfs(encoding.getcwd(), audit=False),
1446 1446 target=target,
1447 1447 radix=file_[:-2],
1448 1448 )
1449 1449 return r
1450 1450
1451 1451
1452 1452 def openrevlog(repo, cmd, file_, opts):
1453 1453 """Obtain a revlog backing storage of an item.
1454 1454
1455 1455 This is similar to ``openstorage()`` except it always returns a revlog.
1456 1456
1457 1457 In most cases, a caller cares about the main storage object - not the
1458 1458 revlog backing it. Therefore, this function should only be used by code
1459 1459 that needs to examine low-level revlog implementation details. e.g. debug
1460 1460 commands.
1461 1461 """
1462 1462 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1463 1463
1464 1464
1465 1465 def copy(ui, repo, pats, opts, rename=False):
1466 1466 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1467 1467
1468 1468 # called with the repo lock held
1469 1469 #
1470 1470 # hgsep => pathname that uses "/" to separate directories
1471 1471 # ossep => pathname that uses os.sep to separate directories
1472 1472 cwd = repo.getcwd()
1473 1473 targets = {}
1474 1474 forget = opts.get(b"forget")
1475 1475 after = opts.get(b"after")
1476 1476 dryrun = opts.get(b"dry_run")
1477 1477 rev = opts.get(b'at_rev')
1478 1478 if rev:
1479 1479 if not forget and not after:
1480 1480 # TODO: Remove this restriction and make it also create the copy
1481 1481 # targets (and remove the rename source if rename==True).
1482 1482 raise error.InputError(_(b'--at-rev requires --after'))
1483 ctx = scmutil.revsingle(repo, rev)
1483 ctx = logcmdutil.revsingle(repo, rev)
1484 1484 if len(ctx.parents()) > 1:
1485 1485 raise error.InputError(
1486 1486 _(b'cannot mark/unmark copy in merge commit')
1487 1487 )
1488 1488 else:
1489 1489 ctx = repo[None]
1490 1490
1491 1491 pctx = ctx.p1()
1492 1492
1493 1493 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1494 1494
1495 1495 if forget:
1496 1496 if ctx.rev() is None:
1497 1497 new_ctx = ctx
1498 1498 else:
1499 1499 if len(ctx.parents()) > 1:
1500 1500 raise error.InputError(_(b'cannot unmark copy in merge commit'))
1501 1501 # avoid cycle context -> subrepo -> cmdutil
1502 1502 from . import context
1503 1503
1504 1504 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1505 1505 new_ctx = context.overlayworkingctx(repo)
1506 1506 new_ctx.setbase(ctx.p1())
1507 1507 mergemod.graft(repo, ctx, wctx=new_ctx)
1508 1508
1509 1509 match = scmutil.match(ctx, pats, opts)
1510 1510
1511 1511 current_copies = ctx.p1copies()
1512 1512 current_copies.update(ctx.p2copies())
1513 1513
1514 1514 uipathfn = scmutil.getuipathfn(repo)
1515 1515 for f in ctx.walk(match):
1516 1516 if f in current_copies:
1517 1517 new_ctx[f].markcopied(None)
1518 1518 elif match.exact(f):
1519 1519 ui.warn(
1520 1520 _(
1521 1521 b'%s: not unmarking as copy - file is not marked as copied\n'
1522 1522 )
1523 1523 % uipathfn(f)
1524 1524 )
1525 1525
1526 1526 if ctx.rev() is not None:
1527 1527 with repo.lock():
1528 1528 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1529 1529 new_node = mem_ctx.commit()
1530 1530
1531 1531 if repo.dirstate.p1() == ctx.node():
1532 1532 with repo.dirstate.parentchange():
1533 1533 scmutil.movedirstate(repo, repo[new_node])
1534 1534 replacements = {ctx.node(): [new_node]}
1535 1535 scmutil.cleanupnodes(
1536 1536 repo, replacements, b'uncopy', fixphase=True
1537 1537 )
1538 1538
1539 1539 return
1540 1540
1541 1541 pats = scmutil.expandpats(pats)
1542 1542 if not pats:
1543 1543 raise error.InputError(_(b'no source or destination specified'))
1544 1544 if len(pats) == 1:
1545 1545 raise error.InputError(_(b'no destination specified'))
1546 1546 dest = pats.pop()
1547 1547
1548 1548 def walkpat(pat):
1549 1549 srcs = []
1550 1550 # TODO: Inline and simplify the non-working-copy version of this code
1551 1551 # since it shares very little with the working-copy version of it.
1552 1552 ctx_to_walk = ctx if ctx.rev() is None else pctx
1553 1553 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1554 1554 for abs in ctx_to_walk.walk(m):
1555 1555 rel = uipathfn(abs)
1556 1556 exact = m.exact(abs)
1557 1557 if abs not in ctx:
1558 1558 if abs in pctx:
1559 1559 if not after:
1560 1560 if exact:
1561 1561 ui.warn(
1562 1562 _(
1563 1563 b'%s: not copying - file has been marked '
1564 1564 b'for remove\n'
1565 1565 )
1566 1566 % rel
1567 1567 )
1568 1568 continue
1569 1569 else:
1570 1570 if exact:
1571 1571 ui.warn(
1572 1572 _(b'%s: not copying - file is not managed\n') % rel
1573 1573 )
1574 1574 continue
1575 1575
1576 1576 # abs: hgsep
1577 1577 # rel: ossep
1578 1578 srcs.append((abs, rel, exact))
1579 1579 return srcs
1580 1580
1581 1581 if ctx.rev() is not None:
1582 1582 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1583 1583 absdest = pathutil.canonpath(repo.root, cwd, dest)
1584 1584 if ctx.hasdir(absdest):
1585 1585 raise error.InputError(
1586 1586 _(b'%s: --at-rev does not support a directory as destination')
1587 1587 % uipathfn(absdest)
1588 1588 )
1589 1589 if absdest not in ctx:
1590 1590 raise error.InputError(
1591 1591 _(b'%s: copy destination does not exist in %s')
1592 1592 % (uipathfn(absdest), ctx)
1593 1593 )
1594 1594
1595 1595 # avoid cycle context -> subrepo -> cmdutil
1596 1596 from . import context
1597 1597
1598 1598 copylist = []
1599 1599 for pat in pats:
1600 1600 srcs = walkpat(pat)
1601 1601 if not srcs:
1602 1602 continue
1603 1603 for abs, rel, exact in srcs:
1604 1604 copylist.append(abs)
1605 1605
1606 1606 if not copylist:
1607 1607 raise error.InputError(_(b'no files to copy'))
1608 1608 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1609 1609 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1610 1610 # existing functions below.
1611 1611 if len(copylist) != 1:
1612 1612 raise error.InputError(_(b'--at-rev requires a single source'))
1613 1613
1614 1614 new_ctx = context.overlayworkingctx(repo)
1615 1615 new_ctx.setbase(ctx.p1())
1616 1616 mergemod.graft(repo, ctx, wctx=new_ctx)
1617 1617
1618 1618 new_ctx.markcopied(absdest, copylist[0])
1619 1619
1620 1620 with repo.lock():
1621 1621 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1622 1622 new_node = mem_ctx.commit()
1623 1623
1624 1624 if repo.dirstate.p1() == ctx.node():
1625 1625 with repo.dirstate.parentchange():
1626 1626 scmutil.movedirstate(repo, repo[new_node])
1627 1627 replacements = {ctx.node(): [new_node]}
1628 1628 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1629 1629
1630 1630 return
1631 1631
1632 1632 # abssrc: hgsep
1633 1633 # relsrc: ossep
1634 1634 # otarget: ossep
1635 1635 def copyfile(abssrc, relsrc, otarget, exact):
1636 1636 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1637 1637 if b'/' in abstarget:
1638 1638 # We cannot normalize abstarget itself, this would prevent
1639 1639 # case only renames, like a => A.
1640 1640 abspath, absname = abstarget.rsplit(b'/', 1)
1641 1641 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1642 1642 reltarget = repo.pathto(abstarget, cwd)
1643 1643 target = repo.wjoin(abstarget)
1644 1644 src = repo.wjoin(abssrc)
1645 1645 entry = repo.dirstate.get_entry(abstarget)
1646 1646
1647 1647 already_commited = entry.tracked and not entry.added
1648 1648
1649 1649 scmutil.checkportable(ui, abstarget)
1650 1650
1651 1651 # check for collisions
1652 1652 prevsrc = targets.get(abstarget)
1653 1653 if prevsrc is not None:
1654 1654 ui.warn(
1655 1655 _(b'%s: not overwriting - %s collides with %s\n')
1656 1656 % (
1657 1657 reltarget,
1658 1658 repo.pathto(abssrc, cwd),
1659 1659 repo.pathto(prevsrc, cwd),
1660 1660 )
1661 1661 )
1662 1662 return True # report a failure
1663 1663
1664 1664 # check for overwrites
1665 1665 exists = os.path.lexists(target)
1666 1666 samefile = False
1667 1667 if exists and abssrc != abstarget:
1668 1668 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1669 1669 abstarget
1670 1670 ):
1671 1671 if not rename:
1672 1672 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1673 1673 return True # report a failure
1674 1674 exists = False
1675 1675 samefile = True
1676 1676
1677 1677 if not after and exists or after and already_commited:
1678 1678 if not opts[b'force']:
1679 1679 if already_commited:
1680 1680 msg = _(b'%s: not overwriting - file already committed\n')
1681 1681 if after:
1682 1682 flags = b'--after --force'
1683 1683 else:
1684 1684 flags = b'--force'
1685 1685 if rename:
1686 1686 hint = (
1687 1687 _(
1688 1688 b"('hg rename %s' to replace the file by "
1689 1689 b'recording a rename)\n'
1690 1690 )
1691 1691 % flags
1692 1692 )
1693 1693 else:
1694 1694 hint = (
1695 1695 _(
1696 1696 b"('hg copy %s' to replace the file by "
1697 1697 b'recording a copy)\n'
1698 1698 )
1699 1699 % flags
1700 1700 )
1701 1701 else:
1702 1702 msg = _(b'%s: not overwriting - file exists\n')
1703 1703 if rename:
1704 1704 hint = _(
1705 1705 b"('hg rename --after' to record the rename)\n"
1706 1706 )
1707 1707 else:
1708 1708 hint = _(b"('hg copy --after' to record the copy)\n")
1709 1709 ui.warn(msg % reltarget)
1710 1710 ui.warn(hint)
1711 1711 return True # report a failure
1712 1712
1713 1713 if after:
1714 1714 if not exists:
1715 1715 if rename:
1716 1716 ui.warn(
1717 1717 _(b'%s: not recording move - %s does not exist\n')
1718 1718 % (relsrc, reltarget)
1719 1719 )
1720 1720 else:
1721 1721 ui.warn(
1722 1722 _(b'%s: not recording copy - %s does not exist\n')
1723 1723 % (relsrc, reltarget)
1724 1724 )
1725 1725 return True # report a failure
1726 1726 elif not dryrun:
1727 1727 try:
1728 1728 if exists:
1729 1729 os.unlink(target)
1730 1730 targetdir = os.path.dirname(target) or b'.'
1731 1731 if not os.path.isdir(targetdir):
1732 1732 os.makedirs(targetdir)
1733 1733 if samefile:
1734 1734 tmp = target + b"~hgrename"
1735 1735 os.rename(src, tmp)
1736 1736 os.rename(tmp, target)
1737 1737 else:
1738 1738 # Preserve stat info on renames, not on copies; this matches
1739 1739 # Linux CLI behavior.
1740 1740 util.copyfile(src, target, copystat=rename)
1741 1741 srcexists = True
1742 1742 except IOError as inst:
1743 1743 if inst.errno == errno.ENOENT:
1744 1744 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1745 1745 srcexists = False
1746 1746 else:
1747 1747 ui.warn(
1748 1748 _(b'%s: cannot copy - %s\n')
1749 1749 % (relsrc, encoding.strtolocal(inst.strerror))
1750 1750 )
1751 1751 return True # report a failure
1752 1752
1753 1753 if ui.verbose or not exact:
1754 1754 if rename:
1755 1755 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1756 1756 else:
1757 1757 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1758 1758
1759 1759 targets[abstarget] = abssrc
1760 1760
1761 1761 # fix up dirstate
1762 1762 scmutil.dirstatecopy(
1763 1763 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1764 1764 )
1765 1765 if rename and not dryrun:
1766 1766 if not after and srcexists and not samefile:
1767 1767 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1768 1768 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1769 1769 ctx.forget([abssrc])
1770 1770
1771 1771 # pat: ossep
1772 1772 # dest ossep
1773 1773 # srcs: list of (hgsep, hgsep, ossep, bool)
1774 1774 # return: function that takes hgsep and returns ossep
1775 1775 def targetpathfn(pat, dest, srcs):
1776 1776 if os.path.isdir(pat):
1777 1777 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1778 1778 abspfx = util.localpath(abspfx)
1779 1779 if destdirexists:
1780 1780 striplen = len(os.path.split(abspfx)[0])
1781 1781 else:
1782 1782 striplen = len(abspfx)
1783 1783 if striplen:
1784 1784 striplen += len(pycompat.ossep)
1785 1785 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1786 1786 elif destdirexists:
1787 1787 res = lambda p: os.path.join(
1788 1788 dest, os.path.basename(util.localpath(p))
1789 1789 )
1790 1790 else:
1791 1791 res = lambda p: dest
1792 1792 return res
1793 1793
1794 1794 # pat: ossep
1795 1795 # dest ossep
1796 1796 # srcs: list of (hgsep, hgsep, ossep, bool)
1797 1797 # return: function that takes hgsep and returns ossep
1798 1798 def targetpathafterfn(pat, dest, srcs):
1799 1799 if matchmod.patkind(pat):
1800 1800 # a mercurial pattern
1801 1801 res = lambda p: os.path.join(
1802 1802 dest, os.path.basename(util.localpath(p))
1803 1803 )
1804 1804 else:
1805 1805 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1806 1806 if len(abspfx) < len(srcs[0][0]):
1807 1807 # A directory. Either the target path contains the last
1808 1808 # component of the source path or it does not.
1809 1809 def evalpath(striplen):
1810 1810 score = 0
1811 1811 for s in srcs:
1812 1812 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1813 1813 if os.path.lexists(t):
1814 1814 score += 1
1815 1815 return score
1816 1816
1817 1817 abspfx = util.localpath(abspfx)
1818 1818 striplen = len(abspfx)
1819 1819 if striplen:
1820 1820 striplen += len(pycompat.ossep)
1821 1821 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1822 1822 score = evalpath(striplen)
1823 1823 striplen1 = len(os.path.split(abspfx)[0])
1824 1824 if striplen1:
1825 1825 striplen1 += len(pycompat.ossep)
1826 1826 if evalpath(striplen1) > score:
1827 1827 striplen = striplen1
1828 1828 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1829 1829 else:
1830 1830 # a file
1831 1831 if destdirexists:
1832 1832 res = lambda p: os.path.join(
1833 1833 dest, os.path.basename(util.localpath(p))
1834 1834 )
1835 1835 else:
1836 1836 res = lambda p: dest
1837 1837 return res
1838 1838
1839 1839 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1840 1840 if not destdirexists:
1841 1841 if len(pats) > 1 or matchmod.patkind(pats[0]):
1842 1842 raise error.InputError(
1843 1843 _(
1844 1844 b'with multiple sources, destination must be an '
1845 1845 b'existing directory'
1846 1846 )
1847 1847 )
1848 1848 if util.endswithsep(dest):
1849 1849 raise error.InputError(
1850 1850 _(b'destination %s is not a directory') % dest
1851 1851 )
1852 1852
1853 1853 tfn = targetpathfn
1854 1854 if after:
1855 1855 tfn = targetpathafterfn
1856 1856 copylist = []
1857 1857 for pat in pats:
1858 1858 srcs = walkpat(pat)
1859 1859 if not srcs:
1860 1860 continue
1861 1861 copylist.append((tfn(pat, dest, srcs), srcs))
1862 1862 if not copylist:
1863 1863 hint = None
1864 1864 if rename:
1865 1865 hint = _(b'maybe you meant to use --after --at-rev=.')
1866 1866 raise error.InputError(_(b'no files to copy'), hint=hint)
1867 1867
1868 1868 errors = 0
1869 1869 for targetpath, srcs in copylist:
1870 1870 for abssrc, relsrc, exact in srcs:
1871 1871 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1872 1872 errors += 1
1873 1873
1874 1874 return errors != 0
1875 1875
1876 1876
1877 1877 ## facility to let extension process additional data into an import patch
1878 1878 # list of identifier to be executed in order
1879 1879 extrapreimport = [] # run before commit
1880 1880 extrapostimport = [] # run after commit
1881 1881 # mapping from identifier to actual import function
1882 1882 #
1883 1883 # 'preimport' are run before the commit is made and are provided the following
1884 1884 # arguments:
1885 1885 # - repo: the localrepository instance,
1886 1886 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1887 1887 # - extra: the future extra dictionary of the changeset, please mutate it,
1888 1888 # - opts: the import options.
1889 1889 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1890 1890 # mutation of in memory commit and more. Feel free to rework the code to get
1891 1891 # there.
1892 1892 extrapreimportmap = {}
1893 1893 # 'postimport' are run after the commit is made and are provided the following
1894 1894 # argument:
1895 1895 # - ctx: the changectx created by import.
1896 1896 extrapostimportmap = {}
1897 1897
1898 1898
1899 1899 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1900 1900 """Utility function used by commands.import to import a single patch
1901 1901
1902 1902 This function is explicitly defined here to help the evolve extension to
1903 1903 wrap this part of the import logic.
1904 1904
1905 1905 The API is currently a bit ugly because it a simple code translation from
1906 1906 the import command. Feel free to make it better.
1907 1907
1908 1908 :patchdata: a dictionary containing parsed patch data (such as from
1909 1909 ``patch.extract()``)
1910 1910 :parents: nodes that will be parent of the created commit
1911 1911 :opts: the full dict of option passed to the import command
1912 1912 :msgs: list to save commit message to.
1913 1913 (used in case we need to save it when failing)
1914 1914 :updatefunc: a function that update a repo to a given node
1915 1915 updatefunc(<repo>, <node>)
1916 1916 """
1917 1917 # avoid cycle context -> subrepo -> cmdutil
1918 1918 from . import context
1919 1919
1920 1920 tmpname = patchdata.get(b'filename')
1921 1921 message = patchdata.get(b'message')
1922 1922 user = opts.get(b'user') or patchdata.get(b'user')
1923 1923 date = opts.get(b'date') or patchdata.get(b'date')
1924 1924 branch = patchdata.get(b'branch')
1925 1925 nodeid = patchdata.get(b'nodeid')
1926 1926 p1 = patchdata.get(b'p1')
1927 1927 p2 = patchdata.get(b'p2')
1928 1928
1929 1929 nocommit = opts.get(b'no_commit')
1930 1930 importbranch = opts.get(b'import_branch')
1931 1931 update = not opts.get(b'bypass')
1932 1932 strip = opts[b"strip"]
1933 1933 prefix = opts[b"prefix"]
1934 1934 sim = float(opts.get(b'similarity') or 0)
1935 1935
1936 1936 if not tmpname:
1937 1937 return None, None, False
1938 1938
1939 1939 rejects = False
1940 1940
1941 1941 cmdline_message = logmessage(ui, opts)
1942 1942 if cmdline_message:
1943 1943 # pickup the cmdline msg
1944 1944 message = cmdline_message
1945 1945 elif message:
1946 1946 # pickup the patch msg
1947 1947 message = message.strip()
1948 1948 else:
1949 1949 # launch the editor
1950 1950 message = None
1951 1951 ui.debug(b'message:\n%s\n' % (message or b''))
1952 1952
1953 1953 if len(parents) == 1:
1954 1954 parents.append(repo[nullrev])
1955 1955 if opts.get(b'exact'):
1956 1956 if not nodeid or not p1:
1957 1957 raise error.InputError(_(b'not a Mercurial patch'))
1958 1958 p1 = repo[p1]
1959 1959 p2 = repo[p2 or nullrev]
1960 1960 elif p2:
1961 1961 try:
1962 1962 p1 = repo[p1]
1963 1963 p2 = repo[p2]
1964 1964 # Without any options, consider p2 only if the
1965 1965 # patch is being applied on top of the recorded
1966 1966 # first parent.
1967 1967 if p1 != parents[0]:
1968 1968 p1 = parents[0]
1969 1969 p2 = repo[nullrev]
1970 1970 except error.RepoError:
1971 1971 p1, p2 = parents
1972 1972 if p2.rev() == nullrev:
1973 1973 ui.warn(
1974 1974 _(
1975 1975 b"warning: import the patch as a normal revision\n"
1976 1976 b"(use --exact to import the patch as a merge)\n"
1977 1977 )
1978 1978 )
1979 1979 else:
1980 1980 p1, p2 = parents
1981 1981
1982 1982 n = None
1983 1983 if update:
1984 1984 if p1 != parents[0]:
1985 1985 updatefunc(repo, p1.node())
1986 1986 if p2 != parents[1]:
1987 1987 repo.setparents(p1.node(), p2.node())
1988 1988
1989 1989 if opts.get(b'exact') or importbranch:
1990 1990 repo.dirstate.setbranch(branch or b'default')
1991 1991
1992 1992 partial = opts.get(b'partial', False)
1993 1993 files = set()
1994 1994 try:
1995 1995 patch.patch(
1996 1996 ui,
1997 1997 repo,
1998 1998 tmpname,
1999 1999 strip=strip,
2000 2000 prefix=prefix,
2001 2001 files=files,
2002 2002 eolmode=None,
2003 2003 similarity=sim / 100.0,
2004 2004 )
2005 2005 except error.PatchError as e:
2006 2006 if not partial:
2007 2007 raise error.Abort(pycompat.bytestr(e))
2008 2008 if partial:
2009 2009 rejects = True
2010 2010
2011 2011 files = list(files)
2012 2012 if nocommit:
2013 2013 if message:
2014 2014 msgs.append(message)
2015 2015 else:
2016 2016 if opts.get(b'exact') or p2:
2017 2017 # If you got here, you either use --force and know what
2018 2018 # you are doing or used --exact or a merge patch while
2019 2019 # being updated to its first parent.
2020 2020 m = None
2021 2021 else:
2022 2022 m = scmutil.matchfiles(repo, files or [])
2023 2023 editform = mergeeditform(repo[None], b'import.normal')
2024 2024 if opts.get(b'exact'):
2025 2025 editor = None
2026 2026 else:
2027 2027 editor = getcommiteditor(
2028 2028 editform=editform, **pycompat.strkwargs(opts)
2029 2029 )
2030 2030 extra = {}
2031 2031 for idfunc in extrapreimport:
2032 2032 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
2033 2033 overrides = {}
2034 2034 if partial:
2035 2035 overrides[(b'ui', b'allowemptycommit')] = True
2036 2036 if opts.get(b'secret'):
2037 2037 overrides[(b'phases', b'new-commit')] = b'secret'
2038 2038 with repo.ui.configoverride(overrides, b'import'):
2039 2039 n = repo.commit(
2040 2040 message, user, date, match=m, editor=editor, extra=extra
2041 2041 )
2042 2042 for idfunc in extrapostimport:
2043 2043 extrapostimportmap[idfunc](repo[n])
2044 2044 else:
2045 2045 if opts.get(b'exact') or importbranch:
2046 2046 branch = branch or b'default'
2047 2047 else:
2048 2048 branch = p1.branch()
2049 2049 store = patch.filestore()
2050 2050 try:
2051 2051 files = set()
2052 2052 try:
2053 2053 patch.patchrepo(
2054 2054 ui,
2055 2055 repo,
2056 2056 p1,
2057 2057 store,
2058 2058 tmpname,
2059 2059 strip,
2060 2060 prefix,
2061 2061 files,
2062 2062 eolmode=None,
2063 2063 )
2064 2064 except error.PatchError as e:
2065 2065 raise error.Abort(stringutil.forcebytestr(e))
2066 2066 if opts.get(b'exact'):
2067 2067 editor = None
2068 2068 else:
2069 2069 editor = getcommiteditor(editform=b'import.bypass')
2070 2070 memctx = context.memctx(
2071 2071 repo,
2072 2072 (p1.node(), p2.node()),
2073 2073 message,
2074 2074 files=files,
2075 2075 filectxfn=store,
2076 2076 user=user,
2077 2077 date=date,
2078 2078 branch=branch,
2079 2079 editor=editor,
2080 2080 )
2081 2081
2082 2082 overrides = {}
2083 2083 if opts.get(b'secret'):
2084 2084 overrides[(b'phases', b'new-commit')] = b'secret'
2085 2085 with repo.ui.configoverride(overrides, b'import'):
2086 2086 n = memctx.commit()
2087 2087 finally:
2088 2088 store.close()
2089 2089 if opts.get(b'exact') and nocommit:
2090 2090 # --exact with --no-commit is still useful in that it does merge
2091 2091 # and branch bits
2092 2092 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2093 2093 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2094 2094 raise error.Abort(_(b'patch is damaged or loses information'))
2095 2095 msg = _(b'applied to working directory')
2096 2096 if n:
2097 2097 # i18n: refers to a short changeset id
2098 2098 msg = _(b'created %s') % short(n)
2099 2099 return msg, n, rejects
2100 2100
2101 2101
2102 2102 # facility to let extensions include additional data in an exported patch
2103 2103 # list of identifiers to be executed in order
2104 2104 extraexport = []
2105 2105 # mapping from identifier to actual export function
2106 2106 # function as to return a string to be added to the header or None
2107 2107 # it is given two arguments (sequencenumber, changectx)
2108 2108 extraexportmap = {}
2109 2109
2110 2110
2111 2111 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2112 2112 node = scmutil.binnode(ctx)
2113 2113 parents = [p.node() for p in ctx.parents() if p]
2114 2114 branch = ctx.branch()
2115 2115 if switch_parent:
2116 2116 parents.reverse()
2117 2117
2118 2118 if parents:
2119 2119 prev = parents[0]
2120 2120 else:
2121 2121 prev = repo.nullid
2122 2122
2123 2123 fm.context(ctx=ctx)
2124 2124 fm.plain(b'# HG changeset patch\n')
2125 2125 fm.write(b'user', b'# User %s\n', ctx.user())
2126 2126 fm.plain(b'# Date %d %d\n' % ctx.date())
2127 2127 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2128 2128 fm.condwrite(
2129 2129 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2130 2130 )
2131 2131 fm.write(b'node', b'# Node ID %s\n', hex(node))
2132 2132 fm.plain(b'# Parent %s\n' % hex(prev))
2133 2133 if len(parents) > 1:
2134 2134 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2135 2135 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2136 2136
2137 2137 # TODO: redesign extraexportmap function to support formatter
2138 2138 for headerid in extraexport:
2139 2139 header = extraexportmap[headerid](seqno, ctx)
2140 2140 if header is not None:
2141 2141 fm.plain(b'# %s\n' % header)
2142 2142
2143 2143 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2144 2144 fm.plain(b'\n')
2145 2145
2146 2146 if fm.isplain():
2147 2147 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2148 2148 for chunk, label in chunkiter:
2149 2149 fm.plain(chunk, label=label)
2150 2150 else:
2151 2151 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2152 2152 # TODO: make it structured?
2153 2153 fm.data(diff=b''.join(chunkiter))
2154 2154
2155 2155
2156 2156 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2157 2157 """Export changesets to stdout or a single file"""
2158 2158 for seqno, rev in enumerate(revs, 1):
2159 2159 ctx = repo[rev]
2160 2160 if not dest.startswith(b'<'):
2161 2161 repo.ui.note(b"%s\n" % dest)
2162 2162 fm.startitem()
2163 2163 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2164 2164
2165 2165
2166 2166 def _exportfntemplate(
2167 2167 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2168 2168 ):
2169 2169 """Export changesets to possibly multiple files"""
2170 2170 total = len(revs)
2171 2171 revwidth = max(len(str(rev)) for rev in revs)
2172 2172 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2173 2173
2174 2174 for seqno, rev in enumerate(revs, 1):
2175 2175 ctx = repo[rev]
2176 2176 dest = makefilename(
2177 2177 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2178 2178 )
2179 2179 filemap.setdefault(dest, []).append((seqno, rev))
2180 2180
2181 2181 for dest in filemap:
2182 2182 with formatter.maybereopen(basefm, dest) as fm:
2183 2183 repo.ui.note(b"%s\n" % dest)
2184 2184 for seqno, rev in filemap[dest]:
2185 2185 fm.startitem()
2186 2186 ctx = repo[rev]
2187 2187 _exportsingle(
2188 2188 repo, ctx, fm, match, switch_parent, seqno, diffopts
2189 2189 )
2190 2190
2191 2191
2192 2192 def _prefetchchangedfiles(repo, revs, match):
2193 2193 allfiles = set()
2194 2194 for rev in revs:
2195 2195 for file in repo[rev].files():
2196 2196 if not match or match(file):
2197 2197 allfiles.add(file)
2198 2198 match = scmutil.matchfiles(repo, allfiles)
2199 2199 revmatches = [(rev, match) for rev in revs]
2200 2200 scmutil.prefetchfiles(repo, revmatches)
2201 2201
2202 2202
2203 2203 def export(
2204 2204 repo,
2205 2205 revs,
2206 2206 basefm,
2207 2207 fntemplate=b'hg-%h.patch',
2208 2208 switch_parent=False,
2209 2209 opts=None,
2210 2210 match=None,
2211 2211 ):
2212 2212 """export changesets as hg patches
2213 2213
2214 2214 Args:
2215 2215 repo: The repository from which we're exporting revisions.
2216 2216 revs: A list of revisions to export as revision numbers.
2217 2217 basefm: A formatter to which patches should be written.
2218 2218 fntemplate: An optional string to use for generating patch file names.
2219 2219 switch_parent: If True, show diffs against second parent when not nullid.
2220 2220 Default is false, which always shows diff against p1.
2221 2221 opts: diff options to use for generating the patch.
2222 2222 match: If specified, only export changes to files matching this matcher.
2223 2223
2224 2224 Returns:
2225 2225 Nothing.
2226 2226
2227 2227 Side Effect:
2228 2228 "HG Changeset Patch" data is emitted to one of the following
2229 2229 destinations:
2230 2230 fntemplate specified: Each rev is written to a unique file named using
2231 2231 the given template.
2232 2232 Otherwise: All revs will be written to basefm.
2233 2233 """
2234 2234 _prefetchchangedfiles(repo, revs, match)
2235 2235
2236 2236 if not fntemplate:
2237 2237 _exportfile(
2238 2238 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2239 2239 )
2240 2240 else:
2241 2241 _exportfntemplate(
2242 2242 repo, revs, basefm, fntemplate, switch_parent, opts, match
2243 2243 )
2244 2244
2245 2245
2246 2246 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2247 2247 """Export changesets to the given file stream"""
2248 2248 _prefetchchangedfiles(repo, revs, match)
2249 2249
2250 2250 dest = getattr(fp, 'name', b'<unnamed>')
2251 2251 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2252 2252 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2253 2253
2254 2254
2255 2255 def showmarker(fm, marker, index=None):
2256 2256 """utility function to display obsolescence marker in a readable way
2257 2257
2258 2258 To be used by debug function."""
2259 2259 if index is not None:
2260 2260 fm.write(b'index', b'%i ', index)
2261 2261 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2262 2262 succs = marker.succnodes()
2263 2263 fm.condwrite(
2264 2264 succs,
2265 2265 b'succnodes',
2266 2266 b'%s ',
2267 2267 fm.formatlist(map(hex, succs), name=b'node'),
2268 2268 )
2269 2269 fm.write(b'flag', b'%X ', marker.flags())
2270 2270 parents = marker.parentnodes()
2271 2271 if parents is not None:
2272 2272 fm.write(
2273 2273 b'parentnodes',
2274 2274 b'{%s} ',
2275 2275 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2276 2276 )
2277 2277 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2278 2278 meta = marker.metadata().copy()
2279 2279 meta.pop(b'date', None)
2280 2280 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2281 2281 fm.write(
2282 2282 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2283 2283 )
2284 2284 fm.plain(b'\n')
2285 2285
2286 2286
2287 2287 def finddate(ui, repo, date):
2288 2288 """Find the tipmost changeset that matches the given date spec"""
2289 2289 mrevs = repo.revs(b'date(%s)', date)
2290 2290 try:
2291 2291 rev = mrevs.max()
2292 2292 except ValueError:
2293 2293 raise error.InputError(_(b"revision matching date not found"))
2294 2294
2295 2295 ui.status(
2296 2296 _(b"found revision %d from %s\n")
2297 2297 % (rev, dateutil.datestr(repo[rev].date()))
2298 2298 )
2299 2299 return b'%d' % rev
2300 2300
2301 2301
2302 2302 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2303 2303 bad = []
2304 2304
2305 2305 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2306 2306 names = []
2307 2307 wctx = repo[None]
2308 2308 cca = None
2309 2309 abort, warn = scmutil.checkportabilityalert(ui)
2310 2310 if abort or warn:
2311 2311 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2312 2312
2313 2313 match = repo.narrowmatch(match, includeexact=True)
2314 2314 badmatch = matchmod.badmatch(match, badfn)
2315 2315 dirstate = repo.dirstate
2316 2316 # We don't want to just call wctx.walk here, since it would return a lot of
2317 2317 # clean files, which we aren't interested in and takes time.
2318 2318 for f in sorted(
2319 2319 dirstate.walk(
2320 2320 badmatch,
2321 2321 subrepos=sorted(wctx.substate),
2322 2322 unknown=True,
2323 2323 ignored=False,
2324 2324 full=False,
2325 2325 )
2326 2326 ):
2327 2327 exact = match.exact(f)
2328 2328 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2329 2329 if cca:
2330 2330 cca(f)
2331 2331 names.append(f)
2332 2332 if ui.verbose or not exact:
2333 2333 ui.status(
2334 2334 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2335 2335 )
2336 2336
2337 2337 for subpath in sorted(wctx.substate):
2338 2338 sub = wctx.sub(subpath)
2339 2339 try:
2340 2340 submatch = matchmod.subdirmatcher(subpath, match)
2341 2341 subprefix = repo.wvfs.reljoin(prefix, subpath)
2342 2342 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2343 2343 if opts.get('subrepos'):
2344 2344 bad.extend(
2345 2345 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2346 2346 )
2347 2347 else:
2348 2348 bad.extend(
2349 2349 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2350 2350 )
2351 2351 except error.LookupError:
2352 2352 ui.status(
2353 2353 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2354 2354 )
2355 2355
2356 2356 if not opts.get('dry_run'):
2357 2357 rejected = wctx.add(names, prefix)
2358 2358 bad.extend(f for f in rejected if f in match.files())
2359 2359 return bad
2360 2360
2361 2361
2362 2362 def addwebdirpath(repo, serverpath, webconf):
2363 2363 webconf[serverpath] = repo.root
2364 2364 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2365 2365
2366 2366 for r in repo.revs(b'filelog("path:.hgsub")'):
2367 2367 ctx = repo[r]
2368 2368 for subpath in ctx.substate:
2369 2369 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2370 2370
2371 2371
2372 2372 def forget(
2373 2373 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2374 2374 ):
2375 2375 if dryrun and interactive:
2376 2376 raise error.InputError(
2377 2377 _(b"cannot specify both --dry-run and --interactive")
2378 2378 )
2379 2379 bad = []
2380 2380 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2381 2381 wctx = repo[None]
2382 2382 forgot = []
2383 2383
2384 2384 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2385 2385 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2386 2386 if explicitonly:
2387 2387 forget = [f for f in forget if match.exact(f)]
2388 2388
2389 2389 for subpath in sorted(wctx.substate):
2390 2390 sub = wctx.sub(subpath)
2391 2391 submatch = matchmod.subdirmatcher(subpath, match)
2392 2392 subprefix = repo.wvfs.reljoin(prefix, subpath)
2393 2393 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2394 2394 try:
2395 2395 subbad, subforgot = sub.forget(
2396 2396 submatch,
2397 2397 subprefix,
2398 2398 subuipathfn,
2399 2399 dryrun=dryrun,
2400 2400 interactive=interactive,
2401 2401 )
2402 2402 bad.extend([subpath + b'/' + f for f in subbad])
2403 2403 forgot.extend([subpath + b'/' + f for f in subforgot])
2404 2404 except error.LookupError:
2405 2405 ui.status(
2406 2406 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2407 2407 )
2408 2408
2409 2409 if not explicitonly:
2410 2410 for f in match.files():
2411 2411 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2412 2412 if f not in forgot:
2413 2413 if repo.wvfs.exists(f):
2414 2414 # Don't complain if the exact case match wasn't given.
2415 2415 # But don't do this until after checking 'forgot', so
2416 2416 # that subrepo files aren't normalized, and this op is
2417 2417 # purely from data cached by the status walk above.
2418 2418 if repo.dirstate.normalize(f) in repo.dirstate:
2419 2419 continue
2420 2420 ui.warn(
2421 2421 _(
2422 2422 b'not removing %s: '
2423 2423 b'file is already untracked\n'
2424 2424 )
2425 2425 % uipathfn(f)
2426 2426 )
2427 2427 bad.append(f)
2428 2428
2429 2429 if interactive:
2430 2430 responses = _(
2431 2431 b'[Ynsa?]'
2432 2432 b'$$ &Yes, forget this file'
2433 2433 b'$$ &No, skip this file'
2434 2434 b'$$ &Skip remaining files'
2435 2435 b'$$ Include &all remaining files'
2436 2436 b'$$ &? (display help)'
2437 2437 )
2438 2438 for filename in forget[:]:
2439 2439 r = ui.promptchoice(
2440 2440 _(b'forget %s %s') % (uipathfn(filename), responses)
2441 2441 )
2442 2442 if r == 4: # ?
2443 2443 while r == 4:
2444 2444 for c, t in ui.extractchoices(responses)[1]:
2445 2445 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2446 2446 r = ui.promptchoice(
2447 2447 _(b'forget %s %s') % (uipathfn(filename), responses)
2448 2448 )
2449 2449 if r == 0: # yes
2450 2450 continue
2451 2451 elif r == 1: # no
2452 2452 forget.remove(filename)
2453 2453 elif r == 2: # Skip
2454 2454 fnindex = forget.index(filename)
2455 2455 del forget[fnindex:]
2456 2456 break
2457 2457 elif r == 3: # All
2458 2458 break
2459 2459
2460 2460 for f in forget:
2461 2461 if ui.verbose or not match.exact(f) or interactive:
2462 2462 ui.status(
2463 2463 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2464 2464 )
2465 2465
2466 2466 if not dryrun:
2467 2467 rejected = wctx.forget(forget, prefix)
2468 2468 bad.extend(f for f in rejected if f in match.files())
2469 2469 forgot.extend(f for f in forget if f not in rejected)
2470 2470 return bad, forgot
2471 2471
2472 2472
2473 2473 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2474 2474 ret = 1
2475 2475
2476 2476 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2477 2477 if fm.isplain() and not needsfctx:
2478 2478 # Fast path. The speed-up comes from skipping the formatter, and batching
2479 2479 # calls to ui.write.
2480 2480 buf = []
2481 2481 for f in ctx.matches(m):
2482 2482 buf.append(fmt % uipathfn(f))
2483 2483 if len(buf) > 100:
2484 2484 ui.write(b''.join(buf))
2485 2485 del buf[:]
2486 2486 ret = 0
2487 2487 if buf:
2488 2488 ui.write(b''.join(buf))
2489 2489 else:
2490 2490 for f in ctx.matches(m):
2491 2491 fm.startitem()
2492 2492 fm.context(ctx=ctx)
2493 2493 if needsfctx:
2494 2494 fc = ctx[f]
2495 2495 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2496 2496 fm.data(path=f)
2497 2497 fm.plain(fmt % uipathfn(f))
2498 2498 ret = 0
2499 2499
2500 2500 for subpath in sorted(ctx.substate):
2501 2501 submatch = matchmod.subdirmatcher(subpath, m)
2502 2502 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2503 2503 if subrepos or m.exact(subpath) or any(submatch.files()):
2504 2504 sub = ctx.sub(subpath)
2505 2505 try:
2506 2506 recurse = m.exact(subpath) or subrepos
2507 2507 if (
2508 2508 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2509 2509 == 0
2510 2510 ):
2511 2511 ret = 0
2512 2512 except error.LookupError:
2513 2513 ui.status(
2514 2514 _(b"skipping missing subrepository: %s\n")
2515 2515 % uipathfn(subpath)
2516 2516 )
2517 2517
2518 2518 return ret
2519 2519
2520 2520
2521 2521 def remove(
2522 2522 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2523 2523 ):
2524 2524 ret = 0
2525 2525 s = repo.status(match=m, clean=True)
2526 2526 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2527 2527
2528 2528 wctx = repo[None]
2529 2529
2530 2530 if warnings is None:
2531 2531 warnings = []
2532 2532 warn = True
2533 2533 else:
2534 2534 warn = False
2535 2535
2536 2536 subs = sorted(wctx.substate)
2537 2537 progress = ui.makeprogress(
2538 2538 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2539 2539 )
2540 2540 for subpath in subs:
2541 2541 submatch = matchmod.subdirmatcher(subpath, m)
2542 2542 subprefix = repo.wvfs.reljoin(prefix, subpath)
2543 2543 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2544 2544 if subrepos or m.exact(subpath) or any(submatch.files()):
2545 2545 progress.increment()
2546 2546 sub = wctx.sub(subpath)
2547 2547 try:
2548 2548 if sub.removefiles(
2549 2549 submatch,
2550 2550 subprefix,
2551 2551 subuipathfn,
2552 2552 after,
2553 2553 force,
2554 2554 subrepos,
2555 2555 dryrun,
2556 2556 warnings,
2557 2557 ):
2558 2558 ret = 1
2559 2559 except error.LookupError:
2560 2560 warnings.append(
2561 2561 _(b"skipping missing subrepository: %s\n")
2562 2562 % uipathfn(subpath)
2563 2563 )
2564 2564 progress.complete()
2565 2565
2566 2566 # warn about failure to delete explicit files/dirs
2567 2567 deleteddirs = pathutil.dirs(deleted)
2568 2568 files = m.files()
2569 2569 progress = ui.makeprogress(
2570 2570 _(b'deleting'), total=len(files), unit=_(b'files')
2571 2571 )
2572 2572 for f in files:
2573 2573
2574 2574 def insubrepo():
2575 2575 for subpath in wctx.substate:
2576 2576 if f.startswith(subpath + b'/'):
2577 2577 return True
2578 2578 return False
2579 2579
2580 2580 progress.increment()
2581 2581 isdir = f in deleteddirs or wctx.hasdir(f)
2582 2582 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2583 2583 continue
2584 2584
2585 2585 if repo.wvfs.exists(f):
2586 2586 if repo.wvfs.isdir(f):
2587 2587 warnings.append(
2588 2588 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2589 2589 )
2590 2590 else:
2591 2591 warnings.append(
2592 2592 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2593 2593 )
2594 2594 # missing files will generate a warning elsewhere
2595 2595 ret = 1
2596 2596 progress.complete()
2597 2597
2598 2598 if force:
2599 2599 list = modified + deleted + clean + added
2600 2600 elif after:
2601 2601 list = deleted
2602 2602 remaining = modified + added + clean
2603 2603 progress = ui.makeprogress(
2604 2604 _(b'skipping'), total=len(remaining), unit=_(b'files')
2605 2605 )
2606 2606 for f in remaining:
2607 2607 progress.increment()
2608 2608 if ui.verbose or (f in files):
2609 2609 warnings.append(
2610 2610 _(b'not removing %s: file still exists\n') % uipathfn(f)
2611 2611 )
2612 2612 ret = 1
2613 2613 progress.complete()
2614 2614 else:
2615 2615 list = deleted + clean
2616 2616 progress = ui.makeprogress(
2617 2617 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2618 2618 )
2619 2619 for f in modified:
2620 2620 progress.increment()
2621 2621 warnings.append(
2622 2622 _(
2623 2623 b'not removing %s: file is modified (use -f'
2624 2624 b' to force removal)\n'
2625 2625 )
2626 2626 % uipathfn(f)
2627 2627 )
2628 2628 ret = 1
2629 2629 for f in added:
2630 2630 progress.increment()
2631 2631 warnings.append(
2632 2632 _(
2633 2633 b"not removing %s: file has been marked for add"
2634 2634 b" (use 'hg forget' to undo add)\n"
2635 2635 )
2636 2636 % uipathfn(f)
2637 2637 )
2638 2638 ret = 1
2639 2639 progress.complete()
2640 2640
2641 2641 list = sorted(list)
2642 2642 progress = ui.makeprogress(
2643 2643 _(b'deleting'), total=len(list), unit=_(b'files')
2644 2644 )
2645 2645 for f in list:
2646 2646 if ui.verbose or not m.exact(f):
2647 2647 progress.increment()
2648 2648 ui.status(
2649 2649 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2650 2650 )
2651 2651 progress.complete()
2652 2652
2653 2653 if not dryrun:
2654 2654 with repo.wlock():
2655 2655 if not after:
2656 2656 for f in list:
2657 2657 if f in added:
2658 2658 continue # we never unlink added files on remove
2659 2659 rmdir = repo.ui.configbool(
2660 2660 b'experimental', b'removeemptydirs'
2661 2661 )
2662 2662 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2663 2663 repo[None].forget(list)
2664 2664
2665 2665 if warn:
2666 2666 for warning in warnings:
2667 2667 ui.warn(warning)
2668 2668
2669 2669 return ret
2670 2670
2671 2671
2672 2672 def _catfmtneedsdata(fm):
2673 2673 return not fm.datahint() or b'data' in fm.datahint()
2674 2674
2675 2675
2676 2676 def _updatecatformatter(fm, ctx, matcher, path, decode):
2677 2677 """Hook for adding data to the formatter used by ``hg cat``.
2678 2678
2679 2679 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2680 2680 this method first."""
2681 2681
2682 2682 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2683 2683 # wasn't requested.
2684 2684 data = b''
2685 2685 if _catfmtneedsdata(fm):
2686 2686 data = ctx[path].data()
2687 2687 if decode:
2688 2688 data = ctx.repo().wwritedata(path, data)
2689 2689 fm.startitem()
2690 2690 fm.context(ctx=ctx)
2691 2691 fm.write(b'data', b'%s', data)
2692 2692 fm.data(path=path)
2693 2693
2694 2694
2695 2695 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2696 2696 err = 1
2697 2697 opts = pycompat.byteskwargs(opts)
2698 2698
2699 2699 def write(path):
2700 2700 filename = None
2701 2701 if fntemplate:
2702 2702 filename = makefilename(
2703 2703 ctx, fntemplate, pathname=os.path.join(prefix, path)
2704 2704 )
2705 2705 # attempt to create the directory if it does not already exist
2706 2706 try:
2707 2707 os.makedirs(os.path.dirname(filename))
2708 2708 except OSError:
2709 2709 pass
2710 2710 with formatter.maybereopen(basefm, filename) as fm:
2711 2711 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2712 2712
2713 2713 # Automation often uses hg cat on single files, so special case it
2714 2714 # for performance to avoid the cost of parsing the manifest.
2715 2715 if len(matcher.files()) == 1 and not matcher.anypats():
2716 2716 file = matcher.files()[0]
2717 2717 mfl = repo.manifestlog
2718 2718 mfnode = ctx.manifestnode()
2719 2719 try:
2720 2720 if mfnode and mfl[mfnode].find(file)[0]:
2721 2721 if _catfmtneedsdata(basefm):
2722 2722 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2723 2723 write(file)
2724 2724 return 0
2725 2725 except KeyError:
2726 2726 pass
2727 2727
2728 2728 if _catfmtneedsdata(basefm):
2729 2729 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2730 2730
2731 2731 for abs in ctx.walk(matcher):
2732 2732 write(abs)
2733 2733 err = 0
2734 2734
2735 2735 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2736 2736 for subpath in sorted(ctx.substate):
2737 2737 sub = ctx.sub(subpath)
2738 2738 try:
2739 2739 submatch = matchmod.subdirmatcher(subpath, matcher)
2740 2740 subprefix = os.path.join(prefix, subpath)
2741 2741 if not sub.cat(
2742 2742 submatch,
2743 2743 basefm,
2744 2744 fntemplate,
2745 2745 subprefix,
2746 2746 **pycompat.strkwargs(opts)
2747 2747 ):
2748 2748 err = 0
2749 2749 except error.RepoLookupError:
2750 2750 ui.status(
2751 2751 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2752 2752 )
2753 2753
2754 2754 return err
2755 2755
2756 2756
2757 2757 def commit(ui, repo, commitfunc, pats, opts):
2758 2758 '''commit the specified files or all outstanding changes'''
2759 2759 date = opts.get(b'date')
2760 2760 if date:
2761 2761 opts[b'date'] = dateutil.parsedate(date)
2762 2762 message = logmessage(ui, opts)
2763 2763 matcher = scmutil.match(repo[None], pats, opts)
2764 2764
2765 2765 dsguard = None
2766 2766 # extract addremove carefully -- this function can be called from a command
2767 2767 # that doesn't support addremove
2768 2768 if opts.get(b'addremove'):
2769 2769 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2770 2770 with dsguard or util.nullcontextmanager():
2771 2771 if dsguard:
2772 2772 relative = scmutil.anypats(pats, opts)
2773 2773 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2774 2774 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2775 2775 raise error.Abort(
2776 2776 _(b"failed to mark all new/missing files as added/removed")
2777 2777 )
2778 2778
2779 2779 return commitfunc(ui, repo, message, matcher, opts)
2780 2780
2781 2781
2782 2782 def samefile(f, ctx1, ctx2):
2783 2783 if f in ctx1.manifest():
2784 2784 a = ctx1.filectx(f)
2785 2785 if f in ctx2.manifest():
2786 2786 b = ctx2.filectx(f)
2787 2787 return not a.cmp(b) and a.flags() == b.flags()
2788 2788 else:
2789 2789 return False
2790 2790 else:
2791 2791 return f not in ctx2.manifest()
2792 2792
2793 2793
2794 2794 def amend(ui, repo, old, extra, pats, opts):
2795 2795 # avoid cycle context -> subrepo -> cmdutil
2796 2796 from . import context
2797 2797
2798 2798 # amend will reuse the existing user if not specified, but the obsolete
2799 2799 # marker creation requires that the current user's name is specified.
2800 2800 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2801 2801 ui.username() # raise exception if username not set
2802 2802
2803 2803 ui.note(_(b'amending changeset %s\n') % old)
2804 2804 base = old.p1()
2805 2805
2806 2806 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2807 2807 # Participating changesets:
2808 2808 #
2809 2809 # wctx o - workingctx that contains changes from working copy
2810 2810 # | to go into amending commit
2811 2811 # |
2812 2812 # old o - changeset to amend
2813 2813 # |
2814 2814 # base o - first parent of the changeset to amend
2815 2815 wctx = repo[None]
2816 2816
2817 2817 # Copy to avoid mutating input
2818 2818 extra = extra.copy()
2819 2819 # Update extra dict from amended commit (e.g. to preserve graft
2820 2820 # source)
2821 2821 extra.update(old.extra())
2822 2822
2823 2823 # Also update it from the from the wctx
2824 2824 extra.update(wctx.extra())
2825 2825
2826 2826 # date-only change should be ignored?
2827 2827 datemaydiffer = resolve_commit_options(ui, opts)
2828 2828 opts = pycompat.byteskwargs(opts)
2829 2829
2830 2830 date = old.date()
2831 2831 if opts.get(b'date'):
2832 2832 date = dateutil.parsedate(opts.get(b'date'))
2833 2833 user = opts.get(b'user') or old.user()
2834 2834
2835 2835 if len(old.parents()) > 1:
2836 2836 # ctx.files() isn't reliable for merges, so fall back to the
2837 2837 # slower repo.status() method
2838 2838 st = base.status(old)
2839 2839 files = set(st.modified) | set(st.added) | set(st.removed)
2840 2840 else:
2841 2841 files = set(old.files())
2842 2842
2843 2843 # add/remove the files to the working copy if the "addremove" option
2844 2844 # was specified.
2845 2845 matcher = scmutil.match(wctx, pats, opts)
2846 2846 relative = scmutil.anypats(pats, opts)
2847 2847 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2848 2848 if opts.get(b'addremove') and scmutil.addremove(
2849 2849 repo, matcher, b"", uipathfn, opts
2850 2850 ):
2851 2851 raise error.Abort(
2852 2852 _(b"failed to mark all new/missing files as added/removed")
2853 2853 )
2854 2854
2855 2855 # Check subrepos. This depends on in-place wctx._status update in
2856 2856 # subrepo.precommit(). To minimize the risk of this hack, we do
2857 2857 # nothing if .hgsub does not exist.
2858 2858 if b'.hgsub' in wctx or b'.hgsub' in old:
2859 2859 subs, commitsubs, newsubstate = subrepoutil.precommit(
2860 2860 ui, wctx, wctx._status, matcher
2861 2861 )
2862 2862 # amend should abort if commitsubrepos is enabled
2863 2863 assert not commitsubs
2864 2864 if subs:
2865 2865 subrepoutil.writestate(repo, newsubstate)
2866 2866
2867 2867 ms = mergestatemod.mergestate.read(repo)
2868 2868 mergeutil.checkunresolved(ms)
2869 2869
2870 2870 filestoamend = {f for f in wctx.files() if matcher(f)}
2871 2871
2872 2872 changes = len(filestoamend) > 0
2873 2873 if changes:
2874 2874 # Recompute copies (avoid recording a -> b -> a)
2875 2875 copied = copies.pathcopies(base, wctx, matcher)
2876 2876 if old.p2:
2877 2877 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
2878 2878
2879 2879 # Prune files which were reverted by the updates: if old
2880 2880 # introduced file X and the file was renamed in the working
2881 2881 # copy, then those two files are the same and
2882 2882 # we can discard X from our list of files. Likewise if X
2883 2883 # was removed, it's no longer relevant. If X is missing (aka
2884 2884 # deleted), old X must be preserved.
2885 2885 files.update(filestoamend)
2886 2886 files = [
2887 2887 f
2888 2888 for f in files
2889 2889 if (f not in filestoamend or not samefile(f, wctx, base))
2890 2890 ]
2891 2891
2892 2892 def filectxfn(repo, ctx_, path):
2893 2893 try:
2894 2894 # If the file being considered is not amongst the files
2895 2895 # to be amended, we should return the file context from the
2896 2896 # old changeset. This avoids issues when only some files in
2897 2897 # the working copy are being amended but there are also
2898 2898 # changes to other files from the old changeset.
2899 2899 if path not in filestoamend:
2900 2900 return old.filectx(path)
2901 2901
2902 2902 # Return None for removed files.
2903 2903 if path in wctx.removed():
2904 2904 return None
2905 2905
2906 2906 fctx = wctx[path]
2907 2907 flags = fctx.flags()
2908 2908 mctx = context.memfilectx(
2909 2909 repo,
2910 2910 ctx_,
2911 2911 fctx.path(),
2912 2912 fctx.data(),
2913 2913 islink=b'l' in flags,
2914 2914 isexec=b'x' in flags,
2915 2915 copysource=copied.get(path),
2916 2916 )
2917 2917 return mctx
2918 2918 except KeyError:
2919 2919 return None
2920 2920
2921 2921 else:
2922 2922 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
2923 2923
2924 2924 # Use version of files as in the old cset
2925 2925 def filectxfn(repo, ctx_, path):
2926 2926 try:
2927 2927 return old.filectx(path)
2928 2928 except KeyError:
2929 2929 return None
2930 2930
2931 2931 # See if we got a message from -m or -l, if not, open the editor with
2932 2932 # the message of the changeset to amend.
2933 2933 message = logmessage(ui, opts)
2934 2934
2935 2935 editform = mergeeditform(old, b'commit.amend')
2936 2936
2937 2937 if not message:
2938 2938 message = old.description()
2939 2939 # Default if message isn't provided and --edit is not passed is to
2940 2940 # invoke editor, but allow --no-edit. If somehow we don't have any
2941 2941 # description, let's always start the editor.
2942 2942 doedit = not message or opts.get(b'edit') in [True, None]
2943 2943 else:
2944 2944 # Default if message is provided is to not invoke editor, but allow
2945 2945 # --edit.
2946 2946 doedit = opts.get(b'edit') is True
2947 2947 editor = getcommiteditor(edit=doedit, editform=editform)
2948 2948
2949 2949 pureextra = extra.copy()
2950 2950 extra[b'amend_source'] = old.hex()
2951 2951
2952 2952 new = context.memctx(
2953 2953 repo,
2954 2954 parents=[base.node(), old.p2().node()],
2955 2955 text=message,
2956 2956 files=files,
2957 2957 filectxfn=filectxfn,
2958 2958 user=user,
2959 2959 date=date,
2960 2960 extra=extra,
2961 2961 editor=editor,
2962 2962 )
2963 2963
2964 2964 newdesc = changelog.stripdesc(new.description())
2965 2965 if (
2966 2966 (not changes)
2967 2967 and newdesc == old.description()
2968 2968 and user == old.user()
2969 2969 and (date == old.date() or datemaydiffer)
2970 2970 and pureextra == old.extra()
2971 2971 ):
2972 2972 # nothing changed. continuing here would create a new node
2973 2973 # anyway because of the amend_source noise.
2974 2974 #
2975 2975 # This not what we expect from amend.
2976 2976 return old.node()
2977 2977
2978 2978 commitphase = None
2979 2979 if opts.get(b'secret'):
2980 2980 commitphase = phases.secret
2981 2981 newid = repo.commitctx(new)
2982 2982 ms.reset()
2983 2983
2984 2984 with repo.dirstate.parentchange():
2985 2985 # Reroute the working copy parent to the new changeset
2986 2986 repo.setparents(newid, repo.nullid)
2987 2987
2988 2988 # Fixing the dirstate because localrepo.commitctx does not update
2989 2989 # it. This is rather convenient because we did not need to update
2990 2990 # the dirstate for all the files in the new commit which commitctx
2991 2991 # could have done if it updated the dirstate. Now, we can
2992 2992 # selectively update the dirstate only for the amended files.
2993 2993 dirstate = repo.dirstate
2994 2994
2995 2995 # Update the state of the files which were added and modified in the
2996 2996 # amend to "normal" in the dirstate. We need to use "normallookup" since
2997 2997 # the files may have changed since the command started; using "normal"
2998 2998 # would mark them as clean but with uncommitted contents.
2999 2999 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3000 3000 for f in normalfiles:
3001 3001 dirstate.update_file(
3002 3002 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
3003 3003 )
3004 3004
3005 3005 # Update the state of files which were removed in the amend
3006 3006 # to "removed" in the dirstate.
3007 3007 removedfiles = set(wctx.removed()) & filestoamend
3008 3008 for f in removedfiles:
3009 3009 dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
3010 3010
3011 3011 mapping = {old.node(): (newid,)}
3012 3012 obsmetadata = None
3013 3013 if opts.get(b'note'):
3014 3014 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3015 3015 backup = ui.configbool(b'rewrite', b'backup-bundle')
3016 3016 scmutil.cleanupnodes(
3017 3017 repo,
3018 3018 mapping,
3019 3019 b'amend',
3020 3020 metadata=obsmetadata,
3021 3021 fixphase=True,
3022 3022 targetphase=commitphase,
3023 3023 backup=backup,
3024 3024 )
3025 3025
3026 3026 return newid
3027 3027
3028 3028
3029 3029 def commiteditor(repo, ctx, subs, editform=b''):
3030 3030 if ctx.description():
3031 3031 return ctx.description()
3032 3032 return commitforceeditor(
3033 3033 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3034 3034 )
3035 3035
3036 3036
3037 3037 def commitforceeditor(
3038 3038 repo,
3039 3039 ctx,
3040 3040 subs,
3041 3041 finishdesc=None,
3042 3042 extramsg=None,
3043 3043 editform=b'',
3044 3044 unchangedmessagedetection=False,
3045 3045 ):
3046 3046 if not extramsg:
3047 3047 extramsg = _(b"Leave message empty to abort commit.")
3048 3048
3049 3049 forms = [e for e in editform.split(b'.') if e]
3050 3050 forms.insert(0, b'changeset')
3051 3051 templatetext = None
3052 3052 while forms:
3053 3053 ref = b'.'.join(forms)
3054 3054 if repo.ui.config(b'committemplate', ref):
3055 3055 templatetext = committext = buildcommittemplate(
3056 3056 repo, ctx, subs, extramsg, ref
3057 3057 )
3058 3058 break
3059 3059 forms.pop()
3060 3060 else:
3061 3061 committext = buildcommittext(repo, ctx, subs, extramsg)
3062 3062
3063 3063 # run editor in the repository root
3064 3064 olddir = encoding.getcwd()
3065 3065 os.chdir(repo.root)
3066 3066
3067 3067 # make in-memory changes visible to external process
3068 3068 tr = repo.currenttransaction()
3069 3069 repo.dirstate.write(tr)
3070 3070 pending = tr and tr.writepending() and repo.root
3071 3071
3072 3072 editortext = repo.ui.edit(
3073 3073 committext,
3074 3074 ctx.user(),
3075 3075 ctx.extra(),
3076 3076 editform=editform,
3077 3077 pending=pending,
3078 3078 repopath=repo.path,
3079 3079 action=b'commit',
3080 3080 )
3081 3081 text = editortext
3082 3082
3083 3083 # strip away anything below this special string (used for editors that want
3084 3084 # to display the diff)
3085 3085 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3086 3086 if stripbelow:
3087 3087 text = text[: stripbelow.start()]
3088 3088
3089 3089 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3090 3090 os.chdir(olddir)
3091 3091
3092 3092 if finishdesc:
3093 3093 text = finishdesc(text)
3094 3094 if not text.strip():
3095 3095 raise error.InputError(_(b"empty commit message"))
3096 3096 if unchangedmessagedetection and editortext == templatetext:
3097 3097 raise error.InputError(_(b"commit message unchanged"))
3098 3098
3099 3099 return text
3100 3100
3101 3101
3102 3102 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3103 3103 ui = repo.ui
3104 3104 spec = formatter.reference_templatespec(ref)
3105 3105 t = logcmdutil.changesettemplater(ui, repo, spec)
3106 3106 t.t.cache.update(
3107 3107 (k, templater.unquotestring(v))
3108 3108 for k, v in repo.ui.configitems(b'committemplate')
3109 3109 )
3110 3110
3111 3111 if not extramsg:
3112 3112 extramsg = b'' # ensure that extramsg is string
3113 3113
3114 3114 ui.pushbuffer()
3115 3115 t.show(ctx, extramsg=extramsg)
3116 3116 return ui.popbuffer()
3117 3117
3118 3118
3119 3119 def hgprefix(msg):
3120 3120 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3121 3121
3122 3122
3123 3123 def buildcommittext(repo, ctx, subs, extramsg):
3124 3124 edittext = []
3125 3125 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3126 3126 if ctx.description():
3127 3127 edittext.append(ctx.description())
3128 3128 edittext.append(b"")
3129 3129 edittext.append(b"") # Empty line between message and comments.
3130 3130 edittext.append(
3131 3131 hgprefix(
3132 3132 _(
3133 3133 b"Enter commit message."
3134 3134 b" Lines beginning with 'HG:' are removed."
3135 3135 )
3136 3136 )
3137 3137 )
3138 3138 edittext.append(hgprefix(extramsg))
3139 3139 edittext.append(b"HG: --")
3140 3140 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3141 3141 if ctx.p2():
3142 3142 edittext.append(hgprefix(_(b"branch merge")))
3143 3143 if ctx.branch():
3144 3144 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3145 3145 if bookmarks.isactivewdirparent(repo):
3146 3146 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3147 3147 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3148 3148 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3149 3149 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3150 3150 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3151 3151 if not added and not modified and not removed:
3152 3152 edittext.append(hgprefix(_(b"no files changed")))
3153 3153 edittext.append(b"")
3154 3154
3155 3155 return b"\n".join(edittext)
3156 3156
3157 3157
3158 3158 def commitstatus(repo, node, branch, bheads=None, tip=None, opts=None):
3159 3159 if opts is None:
3160 3160 opts = {}
3161 3161 ctx = repo[node]
3162 3162 parents = ctx.parents()
3163 3163
3164 3164 if tip is not None and repo.changelog.tip() == tip:
3165 3165 # avoid reporting something like "committed new head" when
3166 3166 # recommitting old changesets, and issue a helpful warning
3167 3167 # for most instances
3168 3168 repo.ui.warn(_(b"warning: commit already existed in the repository!\n"))
3169 3169 elif (
3170 3170 not opts.get(b'amend')
3171 3171 and bheads
3172 3172 and node not in bheads
3173 3173 and not any(
3174 3174 p.node() in bheads and p.branch() == branch for p in parents
3175 3175 )
3176 3176 ):
3177 3177 repo.ui.status(_(b'created new head\n'))
3178 3178 # The message is not printed for initial roots. For the other
3179 3179 # changesets, it is printed in the following situations:
3180 3180 #
3181 3181 # Par column: for the 2 parents with ...
3182 3182 # N: null or no parent
3183 3183 # B: parent is on another named branch
3184 3184 # C: parent is a regular non head changeset
3185 3185 # H: parent was a branch head of the current branch
3186 3186 # Msg column: whether we print "created new head" message
3187 3187 # In the following, it is assumed that there already exists some
3188 3188 # initial branch heads of the current branch, otherwise nothing is
3189 3189 # printed anyway.
3190 3190 #
3191 3191 # Par Msg Comment
3192 3192 # N N y additional topo root
3193 3193 #
3194 3194 # B N y additional branch root
3195 3195 # C N y additional topo head
3196 3196 # H N n usual case
3197 3197 #
3198 3198 # B B y weird additional branch root
3199 3199 # C B y branch merge
3200 3200 # H B n merge with named branch
3201 3201 #
3202 3202 # C C y additional head from merge
3203 3203 # C H n merge with a head
3204 3204 #
3205 3205 # H H n head merge: head count decreases
3206 3206
3207 3207 if not opts.get(b'close_branch'):
3208 3208 for r in parents:
3209 3209 if r.closesbranch() and r.branch() == branch:
3210 3210 repo.ui.status(
3211 3211 _(b'reopening closed branch head %d\n') % r.rev()
3212 3212 )
3213 3213
3214 3214 if repo.ui.debugflag:
3215 3215 repo.ui.write(
3216 3216 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3217 3217 )
3218 3218 elif repo.ui.verbose:
3219 3219 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3220 3220
3221 3221
3222 3222 def postcommitstatus(repo, pats, opts):
3223 3223 return repo.status(match=scmutil.match(repo[None], pats, opts))
3224 3224
3225 3225
3226 3226 def revert(ui, repo, ctx, *pats, **opts):
3227 3227 opts = pycompat.byteskwargs(opts)
3228 3228 parent, p2 = repo.dirstate.parents()
3229 3229 node = ctx.node()
3230 3230
3231 3231 mf = ctx.manifest()
3232 3232 if node == p2:
3233 3233 parent = p2
3234 3234
3235 3235 # need all matching names in dirstate and manifest of target rev,
3236 3236 # so have to walk both. do not print errors if files exist in one
3237 3237 # but not other. in both cases, filesets should be evaluated against
3238 3238 # workingctx to get consistent result (issue4497). this means 'set:**'
3239 3239 # cannot be used to select missing files from target rev.
3240 3240
3241 3241 # `names` is a mapping for all elements in working copy and target revision
3242 3242 # The mapping is in the form:
3243 3243 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3244 3244 names = {}
3245 3245 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3246 3246
3247 3247 with repo.wlock():
3248 3248 ## filling of the `names` mapping
3249 3249 # walk dirstate to fill `names`
3250 3250
3251 3251 interactive = opts.get(b'interactive', False)
3252 3252 wctx = repo[None]
3253 3253 m = scmutil.match(wctx, pats, opts)
3254 3254
3255 3255 # we'll need this later
3256 3256 targetsubs = sorted(s for s in wctx.substate if m(s))
3257 3257
3258 3258 if not m.always():
3259 3259 matcher = matchmod.badmatch(m, lambda x, y: False)
3260 3260 for abs in wctx.walk(matcher):
3261 3261 names[abs] = m.exact(abs)
3262 3262
3263 3263 # walk target manifest to fill `names`
3264 3264
3265 3265 def badfn(path, msg):
3266 3266 if path in names:
3267 3267 return
3268 3268 if path in ctx.substate:
3269 3269 return
3270 3270 path_ = path + b'/'
3271 3271 for f in names:
3272 3272 if f.startswith(path_):
3273 3273 return
3274 3274 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3275 3275
3276 3276 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3277 3277 if abs not in names:
3278 3278 names[abs] = m.exact(abs)
3279 3279
3280 3280 # Find status of all file in `names`.
3281 3281 m = scmutil.matchfiles(repo, names)
3282 3282
3283 3283 changes = repo.status(
3284 3284 node1=node, match=m, unknown=True, ignored=True, clean=True
3285 3285 )
3286 3286 else:
3287 3287 changes = repo.status(node1=node, match=m)
3288 3288 for kind in changes:
3289 3289 for abs in kind:
3290 3290 names[abs] = m.exact(abs)
3291 3291
3292 3292 m = scmutil.matchfiles(repo, names)
3293 3293
3294 3294 modified = set(changes.modified)
3295 3295 added = set(changes.added)
3296 3296 removed = set(changes.removed)
3297 3297 _deleted = set(changes.deleted)
3298 3298 unknown = set(changes.unknown)
3299 3299 unknown.update(changes.ignored)
3300 3300 clean = set(changes.clean)
3301 3301 modadded = set()
3302 3302
3303 3303 # We need to account for the state of the file in the dirstate,
3304 3304 # even when we revert against something else than parent. This will
3305 3305 # slightly alter the behavior of revert (doing back up or not, delete
3306 3306 # or just forget etc).
3307 3307 if parent == node:
3308 3308 dsmodified = modified
3309 3309 dsadded = added
3310 3310 dsremoved = removed
3311 3311 # store all local modifications, useful later for rename detection
3312 3312 localchanges = dsmodified | dsadded
3313 3313 modified, added, removed = set(), set(), set()
3314 3314 else:
3315 3315 changes = repo.status(node1=parent, match=m)
3316 3316 dsmodified = set(changes.modified)
3317 3317 dsadded = set(changes.added)
3318 3318 dsremoved = set(changes.removed)
3319 3319 # store all local modifications, useful later for rename detection
3320 3320 localchanges = dsmodified | dsadded
3321 3321
3322 3322 # only take into account for removes between wc and target
3323 3323 clean |= dsremoved - removed
3324 3324 dsremoved &= removed
3325 3325 # distinct between dirstate remove and other
3326 3326 removed -= dsremoved
3327 3327
3328 3328 modadded = added & dsmodified
3329 3329 added -= modadded
3330 3330
3331 3331 # tell newly modified apart.
3332 3332 dsmodified &= modified
3333 3333 dsmodified |= modified & dsadded # dirstate added may need backup
3334 3334 modified -= dsmodified
3335 3335
3336 3336 # We need to wait for some post-processing to update this set
3337 3337 # before making the distinction. The dirstate will be used for
3338 3338 # that purpose.
3339 3339 dsadded = added
3340 3340
3341 3341 # in case of merge, files that are actually added can be reported as
3342 3342 # modified, we need to post process the result
3343 3343 if p2 != repo.nullid:
3344 3344 mergeadd = set(dsmodified)
3345 3345 for path in dsmodified:
3346 3346 if path in mf:
3347 3347 mergeadd.remove(path)
3348 3348 dsadded |= mergeadd
3349 3349 dsmodified -= mergeadd
3350 3350
3351 3351 # if f is a rename, update `names` to also revert the source
3352 3352 for f in localchanges:
3353 3353 src = repo.dirstate.copied(f)
3354 3354 # XXX should we check for rename down to target node?
3355 3355 if (
3356 3356 src
3357 3357 and src not in names
3358 3358 and repo.dirstate.get_entry(src).removed
3359 3359 ):
3360 3360 dsremoved.add(src)
3361 3361 names[src] = True
3362 3362
3363 3363 # determine the exact nature of the deleted changesets
3364 3364 deladded = set(_deleted)
3365 3365 for path in _deleted:
3366 3366 if path in mf:
3367 3367 deladded.remove(path)
3368 3368 deleted = _deleted - deladded
3369 3369
3370 3370 # distinguish between file to forget and the other
3371 3371 added = set()
3372 3372 for abs in dsadded:
3373 3373 if not repo.dirstate.get_entry(abs).added:
3374 3374 added.add(abs)
3375 3375 dsadded -= added
3376 3376
3377 3377 for abs in deladded:
3378 3378 if repo.dirstate.get_entry(abs).added:
3379 3379 dsadded.add(abs)
3380 3380 deladded -= dsadded
3381 3381
3382 3382 # For files marked as removed, we check if an unknown file is present at
3383 3383 # the same path. If a such file exists it may need to be backed up.
3384 3384 # Making the distinction at this stage helps have simpler backup
3385 3385 # logic.
3386 3386 removunk = set()
3387 3387 for abs in removed:
3388 3388 target = repo.wjoin(abs)
3389 3389 if os.path.lexists(target):
3390 3390 removunk.add(abs)
3391 3391 removed -= removunk
3392 3392
3393 3393 dsremovunk = set()
3394 3394 for abs in dsremoved:
3395 3395 target = repo.wjoin(abs)
3396 3396 if os.path.lexists(target):
3397 3397 dsremovunk.add(abs)
3398 3398 dsremoved -= dsremovunk
3399 3399
3400 3400 # action to be actually performed by revert
3401 3401 # (<list of file>, message>) tuple
3402 3402 actions = {
3403 3403 b'revert': ([], _(b'reverting %s\n')),
3404 3404 b'add': ([], _(b'adding %s\n')),
3405 3405 b'remove': ([], _(b'removing %s\n')),
3406 3406 b'drop': ([], _(b'removing %s\n')),
3407 3407 b'forget': ([], _(b'forgetting %s\n')),
3408 3408 b'undelete': ([], _(b'undeleting %s\n')),
3409 3409 b'noop': (None, _(b'no changes needed to %s\n')),
3410 3410 b'unknown': (None, _(b'file not managed: %s\n')),
3411 3411 }
3412 3412
3413 3413 # "constant" that convey the backup strategy.
3414 3414 # All set to `discard` if `no-backup` is set do avoid checking
3415 3415 # no_backup lower in the code.
3416 3416 # These values are ordered for comparison purposes
3417 3417 backupinteractive = 3 # do backup if interactively modified
3418 3418 backup = 2 # unconditionally do backup
3419 3419 check = 1 # check if the existing file differs from target
3420 3420 discard = 0 # never do backup
3421 3421 if opts.get(b'no_backup'):
3422 3422 backupinteractive = backup = check = discard
3423 3423 if interactive:
3424 3424 dsmodifiedbackup = backupinteractive
3425 3425 else:
3426 3426 dsmodifiedbackup = backup
3427 3427 tobackup = set()
3428 3428
3429 3429 backupanddel = actions[b'remove']
3430 3430 if not opts.get(b'no_backup'):
3431 3431 backupanddel = actions[b'drop']
3432 3432
3433 3433 disptable = (
3434 3434 # dispatch table:
3435 3435 # file state
3436 3436 # action
3437 3437 # make backup
3438 3438 ## Sets that results that will change file on disk
3439 3439 # Modified compared to target, no local change
3440 3440 (modified, actions[b'revert'], discard),
3441 3441 # Modified compared to target, but local file is deleted
3442 3442 (deleted, actions[b'revert'], discard),
3443 3443 # Modified compared to target, local change
3444 3444 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3445 3445 # Added since target
3446 3446 (added, actions[b'remove'], discard),
3447 3447 # Added in working directory
3448 3448 (dsadded, actions[b'forget'], discard),
3449 3449 # Added since target, have local modification
3450 3450 (modadded, backupanddel, backup),
3451 3451 # Added since target but file is missing in working directory
3452 3452 (deladded, actions[b'drop'], discard),
3453 3453 # Removed since target, before working copy parent
3454 3454 (removed, actions[b'add'], discard),
3455 3455 # Same as `removed` but an unknown file exists at the same path
3456 3456 (removunk, actions[b'add'], check),
3457 3457 # Removed since targe, marked as such in working copy parent
3458 3458 (dsremoved, actions[b'undelete'], discard),
3459 3459 # Same as `dsremoved` but an unknown file exists at the same path
3460 3460 (dsremovunk, actions[b'undelete'], check),
3461 3461 ## the following sets does not result in any file changes
3462 3462 # File with no modification
3463 3463 (clean, actions[b'noop'], discard),
3464 3464 # Existing file, not tracked anywhere
3465 3465 (unknown, actions[b'unknown'], discard),
3466 3466 )
3467 3467
3468 3468 for abs, exact in sorted(names.items()):
3469 3469 # target file to be touch on disk (relative to cwd)
3470 3470 target = repo.wjoin(abs)
3471 3471 # search the entry in the dispatch table.
3472 3472 # if the file is in any of these sets, it was touched in the working
3473 3473 # directory parent and we are sure it needs to be reverted.
3474 3474 for table, (xlist, msg), dobackup in disptable:
3475 3475 if abs not in table:
3476 3476 continue
3477 3477 if xlist is not None:
3478 3478 xlist.append(abs)
3479 3479 if dobackup:
3480 3480 # If in interactive mode, don't automatically create
3481 3481 # .orig files (issue4793)
3482 3482 if dobackup == backupinteractive:
3483 3483 tobackup.add(abs)
3484 3484 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3485 3485 absbakname = scmutil.backuppath(ui, repo, abs)
3486 3486 bakname = os.path.relpath(
3487 3487 absbakname, start=repo.root
3488 3488 )
3489 3489 ui.note(
3490 3490 _(b'saving current version of %s as %s\n')
3491 3491 % (uipathfn(abs), uipathfn(bakname))
3492 3492 )
3493 3493 if not opts.get(b'dry_run'):
3494 3494 if interactive:
3495 3495 util.copyfile(target, absbakname)
3496 3496 else:
3497 3497 util.rename(target, absbakname)
3498 3498 if opts.get(b'dry_run'):
3499 3499 if ui.verbose or not exact:
3500 3500 ui.status(msg % uipathfn(abs))
3501 3501 elif exact:
3502 3502 ui.warn(msg % uipathfn(abs))
3503 3503 break
3504 3504
3505 3505 if not opts.get(b'dry_run'):
3506 3506 needdata = (b'revert', b'add', b'undelete')
3507 3507 oplist = [actions[name][0] for name in needdata]
3508 3508 prefetch = scmutil.prefetchfiles
3509 3509 matchfiles = scmutil.matchfiles(
3510 3510 repo, [f for sublist in oplist for f in sublist]
3511 3511 )
3512 3512 prefetch(
3513 3513 repo,
3514 3514 [(ctx.rev(), matchfiles)],
3515 3515 )
3516 3516 match = scmutil.match(repo[None], pats)
3517 3517 _performrevert(
3518 3518 repo,
3519 3519 ctx,
3520 3520 names,
3521 3521 uipathfn,
3522 3522 actions,
3523 3523 match,
3524 3524 interactive,
3525 3525 tobackup,
3526 3526 )
3527 3527
3528 3528 if targetsubs:
3529 3529 # Revert the subrepos on the revert list
3530 3530 for sub in targetsubs:
3531 3531 try:
3532 3532 wctx.sub(sub).revert(
3533 3533 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3534 3534 )
3535 3535 except KeyError:
3536 3536 raise error.Abort(
3537 3537 b"subrepository '%s' does not exist in %s!"
3538 3538 % (sub, short(ctx.node()))
3539 3539 )
3540 3540
3541 3541
3542 3542 def _performrevert(
3543 3543 repo,
3544 3544 ctx,
3545 3545 names,
3546 3546 uipathfn,
3547 3547 actions,
3548 3548 match,
3549 3549 interactive=False,
3550 3550 tobackup=None,
3551 3551 ):
3552 3552 """function that actually perform all the actions computed for revert
3553 3553
3554 3554 This is an independent function to let extension to plug in and react to
3555 3555 the imminent revert.
3556 3556
3557 3557 Make sure you have the working directory locked when calling this function.
3558 3558 """
3559 3559 parent, p2 = repo.dirstate.parents()
3560 3560 node = ctx.node()
3561 3561 excluded_files = []
3562 3562
3563 3563 def checkout(f):
3564 3564 fc = ctx[f]
3565 3565 repo.wwrite(f, fc.data(), fc.flags())
3566 3566
3567 3567 def doremove(f):
3568 3568 try:
3569 3569 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3570 3570 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3571 3571 except OSError:
3572 3572 pass
3573 3573 repo.dirstate.set_untracked(f)
3574 3574
3575 3575 def prntstatusmsg(action, f):
3576 3576 exact = names[f]
3577 3577 if repo.ui.verbose or not exact:
3578 3578 repo.ui.status(actions[action][1] % uipathfn(f))
3579 3579
3580 3580 audit_path = pathutil.pathauditor(repo.root, cached=True)
3581 3581 for f in actions[b'forget'][0]:
3582 3582 if interactive:
3583 3583 choice = repo.ui.promptchoice(
3584 3584 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3585 3585 )
3586 3586 if choice == 0:
3587 3587 prntstatusmsg(b'forget', f)
3588 3588 repo.dirstate.set_untracked(f)
3589 3589 else:
3590 3590 excluded_files.append(f)
3591 3591 else:
3592 3592 prntstatusmsg(b'forget', f)
3593 3593 repo.dirstate.set_untracked(f)
3594 3594 for f in actions[b'remove'][0]:
3595 3595 audit_path(f)
3596 3596 if interactive:
3597 3597 choice = repo.ui.promptchoice(
3598 3598 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3599 3599 )
3600 3600 if choice == 0:
3601 3601 prntstatusmsg(b'remove', f)
3602 3602 doremove(f)
3603 3603 else:
3604 3604 excluded_files.append(f)
3605 3605 else:
3606 3606 prntstatusmsg(b'remove', f)
3607 3607 doremove(f)
3608 3608 for f in actions[b'drop'][0]:
3609 3609 audit_path(f)
3610 3610 prntstatusmsg(b'drop', f)
3611 3611 repo.dirstate.set_untracked(f)
3612 3612
3613 3613 normal = None
3614 3614 if node == parent:
3615 3615 # We're reverting to our parent. If possible, we'd like status
3616 3616 # to report the file as clean. We have to use normallookup for
3617 3617 # merges to avoid losing information about merged/dirty files.
3618 3618 if p2 != repo.nullid:
3619 3619 normal = repo.dirstate.set_tracked
3620 3620 else:
3621 3621 normal = repo.dirstate.set_clean
3622 3622
3623 3623 newlyaddedandmodifiedfiles = set()
3624 3624 if interactive:
3625 3625 # Prompt the user for changes to revert
3626 3626 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3627 3627 m = scmutil.matchfiles(repo, torevert)
3628 3628 diffopts = patch.difffeatureopts(
3629 3629 repo.ui,
3630 3630 whitespace=True,
3631 3631 section=b'commands',
3632 3632 configprefix=b'revert.interactive.',
3633 3633 )
3634 3634 diffopts.nodates = True
3635 3635 diffopts.git = True
3636 3636 operation = b'apply'
3637 3637 if node == parent:
3638 3638 if repo.ui.configbool(
3639 3639 b'experimental', b'revert.interactive.select-to-keep'
3640 3640 ):
3641 3641 operation = b'keep'
3642 3642 else:
3643 3643 operation = b'discard'
3644 3644
3645 3645 if operation == b'apply':
3646 3646 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3647 3647 else:
3648 3648 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3649 3649 original_headers = patch.parsepatch(diff)
3650 3650
3651 3651 try:
3652 3652
3653 3653 chunks, opts = recordfilter(
3654 3654 repo.ui, original_headers, match, operation=operation
3655 3655 )
3656 3656 if operation == b'discard':
3657 3657 chunks = patch.reversehunks(chunks)
3658 3658
3659 3659 except error.PatchError as err:
3660 3660 raise error.Abort(_(b'error parsing patch: %s') % err)
3661 3661
3662 3662 # FIXME: when doing an interactive revert of a copy, there's no way of
3663 3663 # performing a partial revert of the added file, the only option is
3664 3664 # "remove added file <name> (Yn)?", so we don't need to worry about the
3665 3665 # alsorestore value. Ideally we'd be able to partially revert
3666 3666 # copied/renamed files.
3667 3667 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(chunks)
3668 3668 if tobackup is None:
3669 3669 tobackup = set()
3670 3670 # Apply changes
3671 3671 fp = stringio()
3672 3672 # chunks are serialized per file, but files aren't sorted
3673 3673 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3674 3674 prntstatusmsg(b'revert', f)
3675 3675 files = set()
3676 3676 for c in chunks:
3677 3677 if ishunk(c):
3678 3678 abs = c.header.filename()
3679 3679 # Create a backup file only if this hunk should be backed up
3680 3680 if c.header.filename() in tobackup:
3681 3681 target = repo.wjoin(abs)
3682 3682 bakname = scmutil.backuppath(repo.ui, repo, abs)
3683 3683 util.copyfile(target, bakname)
3684 3684 tobackup.remove(abs)
3685 3685 if abs not in files:
3686 3686 files.add(abs)
3687 3687 if operation == b'keep':
3688 3688 checkout(abs)
3689 3689 c.write(fp)
3690 3690 dopatch = fp.tell()
3691 3691 fp.seek(0)
3692 3692 if dopatch:
3693 3693 try:
3694 3694 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3695 3695 except error.PatchError as err:
3696 3696 raise error.Abort(pycompat.bytestr(err))
3697 3697 del fp
3698 3698 else:
3699 3699 for f in actions[b'revert'][0]:
3700 3700 prntstatusmsg(b'revert', f)
3701 3701 checkout(f)
3702 3702 if normal:
3703 3703 normal(f)
3704 3704
3705 3705 for f in actions[b'add'][0]:
3706 3706 # Don't checkout modified files, they are already created by the diff
3707 3707 if f not in newlyaddedandmodifiedfiles:
3708 3708 prntstatusmsg(b'add', f)
3709 3709 checkout(f)
3710 3710 repo.dirstate.set_tracked(f)
3711 3711
3712 3712 normal = repo.dirstate.set_tracked
3713 3713 if node == parent and p2 == repo.nullid:
3714 3714 normal = repo.dirstate.set_clean
3715 3715 for f in actions[b'undelete'][0]:
3716 3716 if interactive:
3717 3717 choice = repo.ui.promptchoice(
3718 3718 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3719 3719 )
3720 3720 if choice == 0:
3721 3721 prntstatusmsg(b'undelete', f)
3722 3722 checkout(f)
3723 3723 normal(f)
3724 3724 else:
3725 3725 excluded_files.append(f)
3726 3726 else:
3727 3727 prntstatusmsg(b'undelete', f)
3728 3728 checkout(f)
3729 3729 normal(f)
3730 3730
3731 3731 copied = copies.pathcopies(repo[parent], ctx)
3732 3732
3733 3733 for f in (
3734 3734 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3735 3735 ):
3736 3736 if f in copied:
3737 3737 repo.dirstate.copy(copied[f], f)
3738 3738
3739 3739
3740 3740 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3741 3741 # commands.outgoing. "missing" is "missing" of the result of
3742 3742 # "findcommonoutgoing()"
3743 3743 outgoinghooks = util.hooks()
3744 3744
3745 3745 # a list of (ui, repo) functions called by commands.summary
3746 3746 summaryhooks = util.hooks()
3747 3747
3748 3748 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3749 3749 #
3750 3750 # functions should return tuple of booleans below, if 'changes' is None:
3751 3751 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3752 3752 #
3753 3753 # otherwise, 'changes' is a tuple of tuples below:
3754 3754 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3755 3755 # - (desturl, destbranch, destpeer, outgoing)
3756 3756 summaryremotehooks = util.hooks()
3757 3757
3758 3758
3759 3759 def checkunfinished(repo, commit=False, skipmerge=False):
3760 3760 """Look for an unfinished multistep operation, like graft, and abort
3761 3761 if found. It's probably good to check this right before
3762 3762 bailifchanged().
3763 3763 """
3764 3764 # Check for non-clearable states first, so things like rebase will take
3765 3765 # precedence over update.
3766 3766 for state in statemod._unfinishedstates:
3767 3767 if (
3768 3768 state._clearable
3769 3769 or (commit and state._allowcommit)
3770 3770 or state._reportonly
3771 3771 ):
3772 3772 continue
3773 3773 if state.isunfinished(repo):
3774 3774 raise error.StateError(state.msg(), hint=state.hint())
3775 3775
3776 3776 for s in statemod._unfinishedstates:
3777 3777 if (
3778 3778 not s._clearable
3779 3779 or (commit and s._allowcommit)
3780 3780 or (s._opname == b'merge' and skipmerge)
3781 3781 or s._reportonly
3782 3782 ):
3783 3783 continue
3784 3784 if s.isunfinished(repo):
3785 3785 raise error.StateError(s.msg(), hint=s.hint())
3786 3786
3787 3787
3788 3788 def clearunfinished(repo):
3789 3789 """Check for unfinished operations (as above), and clear the ones
3790 3790 that are clearable.
3791 3791 """
3792 3792 for state in statemod._unfinishedstates:
3793 3793 if state._reportonly:
3794 3794 continue
3795 3795 if not state._clearable and state.isunfinished(repo):
3796 3796 raise error.StateError(state.msg(), hint=state.hint())
3797 3797
3798 3798 for s in statemod._unfinishedstates:
3799 3799 if s._opname == b'merge' or s._reportonly:
3800 3800 continue
3801 3801 if s._clearable and s.isunfinished(repo):
3802 3802 util.unlink(repo.vfs.join(s._fname))
3803 3803
3804 3804
3805 3805 def getunfinishedstate(repo):
3806 3806 """Checks for unfinished operations and returns statecheck object
3807 3807 for it"""
3808 3808 for state in statemod._unfinishedstates:
3809 3809 if state.isunfinished(repo):
3810 3810 return state
3811 3811 return None
3812 3812
3813 3813
3814 3814 def howtocontinue(repo):
3815 3815 """Check for an unfinished operation and return the command to finish
3816 3816 it.
3817 3817
3818 3818 statemod._unfinishedstates list is checked for an unfinished operation
3819 3819 and the corresponding message to finish it is generated if a method to
3820 3820 continue is supported by the operation.
3821 3821
3822 3822 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3823 3823 a boolean.
3824 3824 """
3825 3825 contmsg = _(b"continue: %s")
3826 3826 for state in statemod._unfinishedstates:
3827 3827 if not state._continueflag:
3828 3828 continue
3829 3829 if state.isunfinished(repo):
3830 3830 return contmsg % state.continuemsg(), True
3831 3831 if repo[None].dirty(missing=True, merge=False, branch=False):
3832 3832 return contmsg % _(b"hg commit"), False
3833 3833 return None, None
3834 3834
3835 3835
3836 3836 def checkafterresolved(repo):
3837 3837 """Inform the user about the next action after completing hg resolve
3838 3838
3839 3839 If there's a an unfinished operation that supports continue flag,
3840 3840 howtocontinue will yield repo.ui.warn as the reporter.
3841 3841
3842 3842 Otherwise, it will yield repo.ui.note.
3843 3843 """
3844 3844 msg, warning = howtocontinue(repo)
3845 3845 if msg is not None:
3846 3846 if warning:
3847 3847 repo.ui.warn(b"%s\n" % msg)
3848 3848 else:
3849 3849 repo.ui.note(b"%s\n" % msg)
3850 3850
3851 3851
3852 3852 def wrongtooltocontinue(repo, task):
3853 3853 """Raise an abort suggesting how to properly continue if there is an
3854 3854 active task.
3855 3855
3856 3856 Uses howtocontinue() to find the active task.
3857 3857
3858 3858 If there's no task (repo.ui.note for 'hg commit'), it does not offer
3859 3859 a hint.
3860 3860 """
3861 3861 after = howtocontinue(repo)
3862 3862 hint = None
3863 3863 if after[1]:
3864 3864 hint = after[0]
3865 3865 raise error.StateError(_(b'no %s in progress') % task, hint=hint)
3866 3866
3867 3867
3868 3868 def abortgraft(ui, repo, graftstate):
3869 3869 """abort the interrupted graft and rollbacks to the state before interrupted
3870 3870 graft"""
3871 3871 if not graftstate.exists():
3872 3872 raise error.StateError(_(b"no interrupted graft to abort"))
3873 3873 statedata = readgraftstate(repo, graftstate)
3874 3874 newnodes = statedata.get(b'newnodes')
3875 3875 if newnodes is None:
3876 3876 # and old graft state which does not have all the data required to abort
3877 3877 # the graft
3878 3878 raise error.Abort(_(b"cannot abort using an old graftstate"))
3879 3879
3880 3880 # changeset from which graft operation was started
3881 3881 if len(newnodes) > 0:
3882 3882 startctx = repo[newnodes[0]].p1()
3883 3883 else:
3884 3884 startctx = repo[b'.']
3885 3885 # whether to strip or not
3886 3886 cleanup = False
3887 3887
3888 3888 if newnodes:
3889 3889 newnodes = [repo[r].rev() for r in newnodes]
3890 3890 cleanup = True
3891 3891 # checking that none of the newnodes turned public or is public
3892 3892 immutable = [c for c in newnodes if not repo[c].mutable()]
3893 3893 if immutable:
3894 3894 repo.ui.warn(
3895 3895 _(b"cannot clean up public changesets %s\n")
3896 3896 % b', '.join(bytes(repo[r]) for r in immutable),
3897 3897 hint=_(b"see 'hg help phases' for details"),
3898 3898 )
3899 3899 cleanup = False
3900 3900
3901 3901 # checking that no new nodes are created on top of grafted revs
3902 3902 desc = set(repo.changelog.descendants(newnodes))
3903 3903 if desc - set(newnodes):
3904 3904 repo.ui.warn(
3905 3905 _(
3906 3906 b"new changesets detected on destination "
3907 3907 b"branch, can't strip\n"
3908 3908 )
3909 3909 )
3910 3910 cleanup = False
3911 3911
3912 3912 if cleanup:
3913 3913 with repo.wlock(), repo.lock():
3914 3914 mergemod.clean_update(startctx)
3915 3915 # stripping the new nodes created
3916 3916 strippoints = [
3917 3917 c.node() for c in repo.set(b"roots(%ld)", newnodes)
3918 3918 ]
3919 3919 repair.strip(repo.ui, repo, strippoints, backup=False)
3920 3920
3921 3921 if not cleanup:
3922 3922 # we don't update to the startnode if we can't strip
3923 3923 startctx = repo[b'.']
3924 3924 mergemod.clean_update(startctx)
3925 3925
3926 3926 ui.status(_(b"graft aborted\n"))
3927 3927 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
3928 3928 graftstate.delete()
3929 3929 return 0
3930 3930
3931 3931
3932 3932 def readgraftstate(repo, graftstate):
3933 3933 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
3934 3934 """read the graft state file and return a dict of the data stored in it"""
3935 3935 try:
3936 3936 return graftstate.read()
3937 3937 except error.CorruptedState:
3938 3938 nodes = repo.vfs.read(b'graftstate').splitlines()
3939 3939 return {b'nodes': nodes}
3940 3940
3941 3941
3942 3942 def hgabortgraft(ui, repo):
3943 3943 """abort logic for aborting graft using 'hg abort'"""
3944 3944 with repo.wlock():
3945 3945 graftstate = statemod.cmdstate(repo, b'graftstate')
3946 3946 return abortgraft(ui, repo, graftstate)
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now