##// END OF EJS Templates
pycompat: drop usage of hasattr/getattr/setattr/delatt proxy...
marmoute -
r51822:18c8c189 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,332 +1,331
1 1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 2 #
3 3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import errno
9 9 import os
10 10 import re
11 11 import socket
12 12
13 13 from mercurial.i18n import _
14 14 from mercurial.pycompat import (
15 getattr,
16 15 open,
17 16 )
18 17 from mercurial import (
19 18 encoding,
20 19 error,
21 20 util,
22 21 )
23 22 from mercurial.utils import (
24 23 dateutil,
25 24 procutil,
26 25 )
27 26
28 27 from . import (
29 28 common,
30 29 cvsps,
31 30 )
32 31
33 32 stringio = util.stringio
34 33 checktool = common.checktool
35 34 commit = common.commit
36 35 converter_source = common.converter_source
37 36 makedatetimestamp = common.makedatetimestamp
38 37 NoRepo = common.NoRepo
39 38
40 39
41 40 class convert_cvs(converter_source):
42 41 def __init__(self, ui, repotype, path, revs=None):
43 42 super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
44 43
45 44 cvs = os.path.join(path, b"CVS")
46 45 if not os.path.exists(cvs):
47 46 raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
48 47
49 48 checktool(b'cvs')
50 49
51 50 self.changeset = None
52 51 self.files = {}
53 52 self.tags = {}
54 53 self.lastbranch = {}
55 54 self.socket = None
56 55 self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
57 56 self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
58 57 self.encoding = encoding.encoding
59 58
60 59 self._connect()
61 60
62 61 def _parse(self):
63 62 if self.changeset is not None:
64 63 return
65 64 self.changeset = {}
66 65
67 66 maxrev = 0
68 67 if self.revs:
69 68 if len(self.revs) > 1:
70 69 raise error.Abort(
71 70 _(
72 71 b'cvs source does not support specifying '
73 72 b'multiple revs'
74 73 )
75 74 )
76 75 # TODO: handle tags
77 76 try:
78 77 # patchset number?
79 78 maxrev = int(self.revs[0])
80 79 except ValueError:
81 80 raise error.Abort(
82 81 _(b'revision %s is not a patchset number') % self.revs[0]
83 82 )
84 83
85 84 d = encoding.getcwd()
86 85 try:
87 86 os.chdir(self.path)
88 87
89 88 cache = b'update'
90 89 if not self.ui.configbool(b'convert', b'cvsps.cache'):
91 90 cache = None
92 91 db = cvsps.createlog(self.ui, cache=cache)
93 92 db = cvsps.createchangeset(
94 93 self.ui,
95 94 db,
96 95 fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
97 96 mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
98 97 mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
99 98 )
100 99
101 100 for cs in db:
102 101 if maxrev and cs.id > maxrev:
103 102 break
104 103 id = b"%d" % cs.id
105 104 cs.author = self.recode(cs.author)
106 105 self.lastbranch[cs.branch] = id
107 106 cs.comment = self.recode(cs.comment)
108 107 if self.ui.configbool(b'convert', b'localtimezone'):
109 108 cs.date = makedatetimestamp(cs.date[0])
110 109 date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
111 110 self.tags.update(dict.fromkeys(cs.tags, id))
112 111
113 112 files = {}
114 113 for f in cs.entries:
115 114 files[f.file] = b"%s%s" % (
116 115 b'.'.join([(b"%d" % x) for x in f.revision]),
117 116 [b'', b'(DEAD)'][f.dead],
118 117 )
119 118
120 119 # add current commit to set
121 120 c = commit(
122 121 author=cs.author,
123 122 date=date,
124 123 parents=[(b"%d" % p.id) for p in cs.parents],
125 124 desc=cs.comment,
126 125 branch=cs.branch or b'',
127 126 )
128 127 self.changeset[id] = c
129 128 self.files[id] = files
130 129
131 130 self.heads = self.lastbranch.values()
132 131 finally:
133 132 os.chdir(d)
134 133
135 134 def _connect(self):
136 135 root = self.cvsroot
137 136 conntype = None
138 137 user, host = None, None
139 138 cmd = [b'cvs', b'server']
140 139
141 140 self.ui.status(_(b"connecting to %s\n") % root)
142 141
143 142 if root.startswith(b":pserver:"):
144 143 root = root[9:]
145 144 m = re.match(
146 145 br'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root
147 146 )
148 147 if m:
149 148 conntype = b"pserver"
150 149 user, passw, serv, port, root = m.groups()
151 150 if not user:
152 151 user = b"anonymous"
153 152 if not port:
154 153 port = 2401
155 154 else:
156 155 port = int(port)
157 156 format0 = b":pserver:%s@%s:%s" % (user, serv, root)
158 157 format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
159 158
160 159 if not passw:
161 160 passw = b"A"
162 161 cvspass = os.path.expanduser(b"~/.cvspass")
163 162 try:
164 163 pf = open(cvspass, b'rb')
165 164 for line in pf.read().splitlines():
166 165 part1, part2 = line.split(b' ', 1)
167 166 # /1 :pserver:user@example.com:2401/cvsroot/foo
168 167 # Ah<Z
169 168 if part1 == b'/1':
170 169 part1, part2 = part2.split(b' ', 1)
171 170 format = format1
172 171 # :pserver:user@example.com:/cvsroot/foo Ah<Z
173 172 else:
174 173 format = format0
175 174 if part1 == format:
176 175 passw = part2
177 176 break
178 177 pf.close()
179 178 except IOError as inst:
180 179 if inst.errno != errno.ENOENT:
181 180 if not getattr(inst, 'filename', None):
182 181 inst.filename = cvspass
183 182 raise
184 183
185 184 sck = socket.socket()
186 185 sck.connect((serv, port))
187 186 sck.send(
188 187 b"\n".join(
189 188 [
190 189 b"BEGIN AUTH REQUEST",
191 190 root,
192 191 user,
193 192 passw,
194 193 b"END AUTH REQUEST",
195 194 b"",
196 195 ]
197 196 )
198 197 )
199 198 if sck.recv(128) != b"I LOVE YOU\n":
200 199 raise error.Abort(_(b"CVS pserver authentication failed"))
201 200
202 201 self.writep = self.readp = sck.makefile('rwb')
203 202
204 203 if not conntype and root.startswith(b":local:"):
205 204 conntype = b"local"
206 205 root = root[7:]
207 206
208 207 if not conntype:
209 208 # :ext:user@host/home/user/path/to/cvsroot
210 209 if root.startswith(b":ext:"):
211 210 root = root[5:]
212 211 m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
213 212 # Do not take Windows path "c:\foo\bar" for a connection strings
214 213 if os.path.isdir(root) or not m:
215 214 conntype = b"local"
216 215 else:
217 216 conntype = b"rsh"
218 217 user, host, root = m.group(1), m.group(2), m.group(3)
219 218
220 219 if conntype != b"pserver":
221 220 if conntype == b"rsh":
222 221 rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
223 222 if user:
224 223 cmd = [rsh, b'-l', user, host] + cmd
225 224 else:
226 225 cmd = [rsh, host] + cmd
227 226
228 227 # popen2 does not support argument lists under Windows
229 228 cmd = b' '.join(procutil.shellquote(arg) for arg in cmd)
230 229 self.writep, self.readp = procutil.popen2(cmd)
231 230
232 231 self.realroot = root
233 232
234 233 self.writep.write(b"Root %s\n" % root)
235 234 self.writep.write(
236 235 b"Valid-responses ok error Valid-requests Mode"
237 236 b" M Mbinary E Checked-in Created Updated"
238 237 b" Merged Removed\n"
239 238 )
240 239 self.writep.write(b"valid-requests\n")
241 240 self.writep.flush()
242 241 r = self.readp.readline()
243 242 if not r.startswith(b"Valid-requests"):
244 243 raise error.Abort(
245 244 _(
246 245 b'unexpected response from CVS server '
247 246 b'(expected "Valid-requests", but got %r)'
248 247 )
249 248 % r
250 249 )
251 250 if b"UseUnchanged" in r:
252 251 self.writep.write(b"UseUnchanged\n")
253 252 self.writep.flush()
254 253 self.readp.readline()
255 254
256 255 def getheads(self):
257 256 self._parse()
258 257 return self.heads
259 258
260 259 def getfile(self, name, rev):
261 260 def chunkedread(fp, count):
262 261 # file-objects returned by socket.makefile() do not handle
263 262 # large read() requests very well.
264 263 chunksize = 65536
265 264 output = stringio()
266 265 while count > 0:
267 266 data = fp.read(min(count, chunksize))
268 267 if not data:
269 268 raise error.Abort(
270 269 _(b"%d bytes missing from remote file") % count
271 270 )
272 271 count -= len(data)
273 272 output.write(data)
274 273 return output.getvalue()
275 274
276 275 self._parse()
277 276 if rev.endswith(b"(DEAD)"):
278 277 return None, None
279 278
280 279 args = (b"-N -P -kk -r %s --" % rev).split()
281 280 args.append(self.cvsrepo + b'/' + name)
282 281 for x in args:
283 282 self.writep.write(b"Argument %s\n" % x)
284 283 self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
285 284 self.writep.flush()
286 285
287 286 data = b""
288 287 mode = None
289 288 while True:
290 289 line = self.readp.readline()
291 290 if line.startswith(b"Created ") or line.startswith(b"Updated "):
292 291 self.readp.readline() # path
293 292 self.readp.readline() # entries
294 293 mode = self.readp.readline()[:-1]
295 294 count = int(self.readp.readline()[:-1])
296 295 data = chunkedread(self.readp, count)
297 296 elif line.startswith(b" "):
298 297 data += line[1:]
299 298 elif line.startswith(b"M "):
300 299 pass
301 300 elif line.startswith(b"Mbinary "):
302 301 count = int(self.readp.readline()[:-1])
303 302 data = chunkedread(self.readp, count)
304 303 else:
305 304 if line == b"ok\n":
306 305 if mode is None:
307 306 raise error.Abort(_(b'malformed response from CVS'))
308 307 return (data, b"x" in mode and b"x" or b"")
309 308 elif line.startswith(b"E "):
310 309 self.ui.warn(_(b"cvs server: %s\n") % line[2:])
311 310 elif line.startswith(b"Remove"):
312 311 self.readp.readline()
313 312 else:
314 313 raise error.Abort(_(b"unknown CVS response: %s") % line)
315 314
316 315 def getchanges(self, rev, full):
317 316 if full:
318 317 raise error.Abort(_(b"convert from cvs does not support --full"))
319 318 self._parse()
320 319 return sorted(self.files[rev].items()), {}, set()
321 320
322 321 def getcommit(self, rev):
323 322 self._parse()
324 323 return self.changeset[rev]
325 324
326 325 def gettags(self):
327 326 self._parse()
328 327 return self.tags
329 328
330 329 def getchangedfiles(self, rev, i):
331 330 self._parse()
332 331 return sorted(self.files[rev])
@@ -1,159 +1,157
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 4 # This is a stripped-down version of the original bzr-svn transport.py,
5 5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6 6
7 7 # This program is free software; you can redistribute it and/or modify
8 8 # it under the terms of the GNU General Public License as published by
9 9 # the Free Software Foundation; either version 2 of the License, or
10 10 # (at your option) any later version.
11 11
12 12 # This program is distributed in the hope that it will be useful,
13 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 15 # GNU General Public License for more details.
16 16
17 17 # You should have received a copy of the GNU General Public License
18 18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19 19
20 20 # pytype: disable=import-error
21 21 import svn.client
22 22 import svn.core
23 23 import svn.ra
24 24
25 25 # pytype: enable=import-error
26 26
27 27 Pool = svn.core.Pool
28 28 SubversionException = svn.core.SubversionException
29 29
30 from mercurial.pycompat import getattr
31
32 30 # Some older versions of the Python bindings need to be
33 31 # explicitly initialized. But what we want to do probably
34 32 # won't work worth a darn against those libraries anyway!
35 33 svn.ra.initialize()
36 34
37 35 svn_config = None
38 36
39 37
40 38 def _create_auth_baton(pool):
41 39 """Create a Subversion authentication baton."""
42 40 import svn.client # pytype: disable=import-error
43 41
44 42 # Give the client context baton a suite of authentication
45 43 # providers.h
46 44 providers = [
47 45 svn.client.get_simple_provider(pool),
48 46 svn.client.get_username_provider(pool),
49 47 svn.client.get_ssl_client_cert_file_provider(pool),
50 48 svn.client.get_ssl_client_cert_pw_file_provider(pool),
51 49 svn.client.get_ssl_server_trust_file_provider(pool),
52 50 ]
53 51 # Platform-dependent authentication methods
54 52 getprovider = getattr(
55 53 svn.core, 'svn_auth_get_platform_specific_provider', None
56 54 )
57 55 if getprovider:
58 56 # Available in svn >= 1.6
59 57 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
60 58 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
61 59 p = getprovider(name, type, pool)
62 60 if p:
63 61 providers.append(p)
64 62 else:
65 63 if hasattr(svn.client, 'get_windows_simple_provider'):
66 64 providers.append(svn.client.get_windows_simple_provider(pool))
67 65
68 66 return svn.core.svn_auth_open(providers, pool)
69 67
70 68
71 69 class NotBranchError(SubversionException):
72 70 pass
73 71
74 72
75 73 class SvnRaTransport:
76 74 """
77 75 Open an ra connection to a Subversion repository.
78 76 """
79 77
80 78 def __init__(self, url=b"", ra=None):
81 79 self.pool = Pool()
82 80 self.svn_url = url
83 81 self.username = b''
84 82 self.password = b''
85 83
86 84 # Only Subversion 1.4 has reparent()
87 85 if ra is None or not hasattr(svn.ra, 'reparent'):
88 86 self.client = svn.client.create_context(self.pool)
89 87 ab = _create_auth_baton(self.pool)
90 88 self.client.auth_baton = ab
91 89 global svn_config
92 90 if svn_config is None:
93 91 svn_config = svn.core.svn_config_get_config(None)
94 92 self.client.config = svn_config
95 93 try:
96 94 self.ra = svn.client.open_ra_session(
97 95 self.svn_url, self.client, self.pool
98 96 )
99 97 except SubversionException as xxx_todo_changeme:
100 98 (inst, num) = xxx_todo_changeme.args
101 99 if num in (
102 100 svn.core.SVN_ERR_RA_ILLEGAL_URL,
103 101 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
104 102 svn.core.SVN_ERR_BAD_URL,
105 103 ):
106 104 raise NotBranchError(url)
107 105 raise
108 106 else:
109 107 self.ra = ra
110 108 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
111 109
112 110 class Reporter:
113 111 def __init__(self, reporter_data):
114 112 self._reporter, self._baton = reporter_data
115 113
116 114 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
117 115 svn.ra.reporter2_invoke_set_path(
118 116 self._reporter,
119 117 self._baton,
120 118 path,
121 119 revnum,
122 120 start_empty,
123 121 lock_token,
124 122 pool,
125 123 )
126 124
127 125 def delete_path(self, path, pool=None):
128 126 svn.ra.reporter2_invoke_delete_path(
129 127 self._reporter, self._baton, path, pool
130 128 )
131 129
132 130 def link_path(
133 131 self, path, url, revision, start_empty, lock_token, pool=None
134 132 ):
135 133 svn.ra.reporter2_invoke_link_path(
136 134 self._reporter,
137 135 self._baton,
138 136 path,
139 137 url,
140 138 revision,
141 139 start_empty,
142 140 lock_token,
143 141 pool,
144 142 )
145 143
146 144 def finish_report(self, pool=None):
147 145 svn.ra.reporter2_invoke_finish_report(
148 146 self._reporter, self._baton, pool
149 147 )
150 148
151 149 def abort_report(self, pool=None):
152 150 svn.ra.reporter2_invoke_abort_report(
153 151 self._reporter, self._baton, pool
154 152 )
155 153
156 154 def do_update(self, revnum, path, *args, **kwargs):
157 155 return self.Reporter(
158 156 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
159 157 )
@@ -1,161 +1,160
1 1 # factotum.py - Plan 9 factotum integration for Mercurial
2 2 #
3 3 # Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
4 4 #
5 5 # This program is free software; you can redistribute it and/or modify it
6 6 # under the terms of the GNU General Public License as published by the
7 7 # Free Software Foundation; either version 2 of the License, or (at your
8 8 # option) any later version.
9 9 #
10 10 # This program is distributed in the hope that it will be useful, but
11 11 # WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
13 13 # Public License for more details.
14 14 #
15 15 # You should have received a copy of the GNU General Public License along
16 16 # with this program; if not, write to the Free Software Foundation, Inc.,
17 17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 18
19 19 '''http authentication with factotum
20 20
21 21 This extension allows the factotum(4) facility on Plan 9 from Bell Labs
22 22 platforms to provide authentication information for HTTP access. Configuration
23 23 entries specified in the auth section as well as authentication information
24 24 provided in the repository URL are fully supported. If no prefix is specified,
25 25 a value of "*" will be assumed.
26 26
27 27 By default, keys are specified as::
28 28
29 29 proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
30 30
31 31 If the factotum extension is unable to read the required key, one will be
32 32 requested interactively.
33 33
34 34 A configuration section is available to customize runtime behavior. By
35 35 default, these entries are::
36 36
37 37 [factotum]
38 38 executable = /bin/auth/factotum
39 39 mountpoint = /mnt/factotum
40 40 service = hg
41 41
42 42 The executable entry defines the full path to the factotum binary. The
43 43 mountpoint entry defines the path to the factotum file service. Lastly, the
44 44 service entry controls the service name used when reading keys.
45 45
46 46 '''
47 47
48 48
49 49 import os
50 50 from mercurial.i18n import _
51 from mercurial.pycompat import setattr
52 51 from mercurial.utils import procutil
53 52 from mercurial import (
54 53 error,
55 54 httpconnection,
56 55 registrar,
57 56 url,
58 57 util,
59 58 )
60 59
61 60 urlreq = util.urlreq
62 61 passwordmgr = url.passwordmgr
63 62
64 63 ERRMAX = 128
65 64
66 65 _executable = _mountpoint = _service = None
67 66
68 67 configtable = {}
69 68 configitem = registrar.configitem(configtable)
70 69
71 70 configitem(
72 71 b'factotum',
73 72 b'executable',
74 73 default=b'/bin/auth/factotum',
75 74 )
76 75 configitem(
77 76 b'factotum',
78 77 b'mountpoint',
79 78 default=b'/mnt/factotum',
80 79 )
81 80 configitem(
82 81 b'factotum',
83 82 b'service',
84 83 default=b'hg',
85 84 )
86 85
87 86
88 87 def auth_getkey(self, params):
89 88 if not self.ui.interactive():
90 89 raise error.Abort(_(b'factotum not interactive'))
91 90 if b'user=' not in params:
92 91 params = b'%s user?' % params
93 92 params = b'%s !password?' % params
94 93 os.system(procutil.tonativestr(b"%s -g '%s'" % (_executable, params)))
95 94
96 95
97 96 def auth_getuserpasswd(self, getkey, params):
98 97 params = b'proto=pass %s' % params
99 98 while True:
100 99 fd = os.open(b'%s/rpc' % _mountpoint, os.O_RDWR)
101 100 try:
102 101 os.write(fd, b'start %s' % params)
103 102 l = os.read(fd, ERRMAX).split()
104 103 if l[0] == b'ok':
105 104 os.write(fd, b'read')
106 105 status, user, passwd = os.read(fd, ERRMAX).split(None, 2)
107 106 if status == b'ok':
108 107 if passwd.startswith(b"'"):
109 108 if passwd.endswith(b"'"):
110 109 passwd = passwd[1:-1].replace(b"''", b"'")
111 110 else:
112 111 raise error.Abort(_(b'malformed password string'))
113 112 return (user, passwd)
114 113 except (OSError, IOError):
115 114 raise error.Abort(_(b'factotum not responding'))
116 115 finally:
117 116 os.close(fd)
118 117 getkey(self, params)
119 118
120 119
121 120 def monkeypatch_method(cls):
122 121 def decorator(func):
123 122 setattr(cls, func.__name__, func)
124 123 return func
125 124
126 125 return decorator
127 126
128 127
129 128 @monkeypatch_method(passwordmgr)
130 129 def find_user_password(self, realm, authuri):
131 130 user, passwd = self.passwddb.find_user_password(realm, authuri)
132 131 if user and passwd:
133 132 self._writedebug(user, passwd)
134 133 return (user, passwd)
135 134
136 135 prefix = b''
137 136 res = httpconnection.readauthforuri(self.ui, authuri, user)
138 137 if res:
139 138 _, auth = res
140 139 prefix = auth.get(b'prefix')
141 140 user, passwd = auth.get(b'username'), auth.get(b'password')
142 141 if not user or not passwd:
143 142 if not prefix:
144 143 prefix = realm.split(b' ')[0].lower()
145 144 params = b'service=%s prefix=%s' % (_service, prefix)
146 145 if user:
147 146 params = b'%s user=%s' % (params, user)
148 147 user, passwd = auth_getuserpasswd(self, auth_getkey, params)
149 148
150 149 self.add_password(realm, authuri, user, passwd)
151 150 self._writedebug(user, passwd)
152 151 return (user, passwd)
153 152
154 153
155 154 def uisetup(ui):
156 155 global _executable
157 156 _executable = ui.config(b'factotum', b'executable')
158 157 global _mountpoint
159 158 _mountpoint = ui.config(b'factotum', b'mountpoint')
160 159 global _service
161 160 _service = ui.config(b'factotum', b'service')
@@ -1,860 +1,858
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # context: context needed to annotate a file
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import collections
10 10 import contextlib
11 11 import os
12 12
13 13 from mercurial.i18n import _
14 14 from mercurial.pycompat import (
15 getattr,
16 15 open,
17 setattr,
18 16 )
19 17 from mercurial.node import (
20 18 bin,
21 19 hex,
22 20 short,
23 21 )
24 22 from mercurial import (
25 23 error,
26 24 linelog as linelogmod,
27 25 lock as lockmod,
28 26 mdiff,
29 27 pycompat,
30 28 scmutil,
31 29 util,
32 30 )
33 31 from mercurial.utils import (
34 32 hashutil,
35 33 stringutil,
36 34 )
37 35
38 36 from . import (
39 37 error as faerror,
40 38 revmap as revmapmod,
41 39 )
42 40
43 41 # given path, get filelog, cached
44 42 @util.lrucachefunc
45 43 def _getflog(repo, path):
46 44 return repo.file(path)
47 45
48 46
49 47 # extracted from mercurial.context.basefilectx.annotate
50 48 def _parents(f, follow=True):
51 49 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
52 50 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
53 51 # from the topmost introrev (= srcrev) down to p.linkrev() if it
54 52 # isn't an ancestor of the srcrev.
55 53 f._changeid
56 54 pl = f.parents()
57 55
58 56 # Don't return renamed parents if we aren't following.
59 57 if not follow:
60 58 pl = [p for p in pl if p.path() == f.path()]
61 59
62 60 # renamed filectx won't have a filelog yet, so set it
63 61 # from the cache to save time
64 62 for p in pl:
65 63 if not '_filelog' in p.__dict__:
66 64 p._filelog = _getflog(f._repo, p.path())
67 65
68 66 return pl
69 67
70 68
71 69 # extracted from mercurial.context.basefilectx.annotate. slightly modified
72 70 # so it takes a fctx instead of a pair of text and fctx.
73 71 def _decorate(fctx):
74 72 text = fctx.data()
75 73 linecount = text.count(b'\n')
76 74 if text and not text.endswith(b'\n'):
77 75 linecount += 1
78 76 return ([(fctx, i) for i in range(linecount)], text)
79 77
80 78
81 79 # extracted from mercurial.context.basefilectx.annotate. slightly modified
82 80 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
83 81 # calculating diff here.
84 82 def _pair(parent, child, blocks):
85 83 for (a1, a2, b1, b2), t in blocks:
86 84 # Changed blocks ('!') or blocks made only of blank lines ('~')
87 85 # belong to the child.
88 86 if t == b'=':
89 87 child[0][b1:b2] = parent[0][a1:a2]
90 88 return child
91 89
92 90
93 91 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
94 92 # could be reused
95 93 _revsingle = util.lrucachefunc(scmutil.revsingle)
96 94
97 95
98 96 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
99 97 """(repo, str, str) -> fctx
100 98
101 99 get the filectx object from repo, rev, path, in an efficient way.
102 100
103 101 if resolverev is True, "rev" is a revision specified by the revset
104 102 language, otherwise "rev" is a nodeid, or a revision number that can
105 103 be consumed by repo.__getitem__.
106 104
107 105 if adjustctx is not None, the returned fctx will point to a changeset
108 106 that introduces the change (last modified the file). if adjustctx
109 107 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
110 108 faster for big repos but is incorrect for some cases.
111 109 """
112 110 if resolverev and not isinstance(rev, int) and rev is not None:
113 111 ctx = _revsingle(repo, rev)
114 112 else:
115 113 ctx = repo[rev]
116 114
117 115 # If we don't need to adjust the linkrev, create the filectx using the
118 116 # changectx instead of using ctx[path]. This means it already has the
119 117 # changectx information, so blame -u will be able to look directly at the
120 118 # commitctx object instead of having to resolve it by going through the
121 119 # manifest. In a lazy-manifest world this can prevent us from downloading a
122 120 # lot of data.
123 121 if adjustctx is None:
124 122 # ctx.rev() is None means it's the working copy, which is a special
125 123 # case.
126 124 if ctx.rev() is None:
127 125 fctx = ctx[path]
128 126 else:
129 127 fctx = repo.filectx(path, changeid=ctx.rev())
130 128 else:
131 129 fctx = ctx[path]
132 130 if adjustctx == b'linkrev':
133 131 introrev = fctx.linkrev()
134 132 else:
135 133 introrev = fctx.introrev()
136 134 if introrev != ctx.rev():
137 135 fctx._changeid = introrev
138 136 fctx._changectx = repo[introrev]
139 137 return fctx
140 138
141 139
142 140 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
143 141 def encodedir(path):
144 142 return (
145 143 path.replace(b'.hg/', b'.hg.hg/')
146 144 .replace(b'.l/', b'.l.hg/')
147 145 .replace(b'.m/', b'.m.hg/')
148 146 .replace(b'.lock/', b'.lock.hg/')
149 147 )
150 148
151 149
152 150 def hashdiffopts(diffopts):
153 151 diffoptstr = stringutil.pprint(
154 152 sorted(
155 153 (k, getattr(diffopts, pycompat.sysstr(k)))
156 154 for k in mdiff.diffopts.defaults
157 155 )
158 156 )
159 157 return hex(hashutil.sha1(diffoptstr).digest())[:6]
160 158
161 159
162 160 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
163 161
164 162
165 163 class annotateopts:
166 164 """like mercurial.mdiff.diffopts, but is for annotate
167 165
168 166 followrename: follow renames, like "hg annotate -f"
169 167 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
170 168 """
171 169
172 170 defaults = {
173 171 'diffopts': None,
174 172 'followrename': True,
175 173 'followmerge': True,
176 174 }
177 175
178 176 def __init__(self, **opts):
179 177 for k, v in self.defaults.items():
180 178 setattr(self, k, opts.get(k, v))
181 179
182 180 @util.propertycache
183 181 def shortstr(self):
184 182 """represent opts in a short string, suitable for a directory name"""
185 183 result = b''
186 184 if not self.followrename:
187 185 result += b'r0'
188 186 if not self.followmerge:
189 187 result += b'm0'
190 188 if self.diffopts is not None:
191 189 assert isinstance(self.diffopts, mdiff.diffopts)
192 190 diffopthash = hashdiffopts(self.diffopts)
193 191 if diffopthash != _defaultdiffopthash:
194 192 result += b'i' + diffopthash
195 193 return result or b'default'
196 194
197 195
198 196 defaultopts = annotateopts()
199 197
200 198
201 199 class _annotatecontext:
202 200 """do not use this class directly as it does not use lock to protect
203 201 writes. use "with annotatecontext(...)" instead.
204 202 """
205 203
206 204 def __init__(self, repo, path, linelogpath, revmappath, opts):
207 205 self.repo = repo
208 206 self.ui = repo.ui
209 207 self.path = path
210 208 self.opts = opts
211 209 self.linelogpath = linelogpath
212 210 self.revmappath = revmappath
213 211 self._linelog = None
214 212 self._revmap = None
215 213 self._node2path = {} # {str: str}
216 214
217 215 @property
218 216 def linelog(self):
219 217 if self._linelog is None:
220 218 if os.path.exists(self.linelogpath):
221 219 with open(self.linelogpath, b'rb') as f:
222 220 try:
223 221 self._linelog = linelogmod.linelog.fromdata(f.read())
224 222 except linelogmod.LineLogError:
225 223 self._linelog = linelogmod.linelog()
226 224 else:
227 225 self._linelog = linelogmod.linelog()
228 226 return self._linelog
229 227
230 228 @property
231 229 def revmap(self):
232 230 if self._revmap is None:
233 231 self._revmap = revmapmod.revmap(self.revmappath)
234 232 return self._revmap
235 233
236 234 def close(self):
237 235 if self._revmap is not None:
238 236 self._revmap.flush()
239 237 self._revmap = None
240 238 if self._linelog is not None:
241 239 with open(self.linelogpath, b'wb') as f:
242 240 f.write(self._linelog.encode())
243 241 self._linelog = None
244 242
245 243 __del__ = close
246 244
247 245 def rebuild(self):
248 246 """delete linelog and revmap, useful for rebuilding"""
249 247 self.close()
250 248 self._node2path.clear()
251 249 _unlinkpaths([self.revmappath, self.linelogpath])
252 250
253 251 @property
254 252 def lastnode(self):
255 253 """return last node in revmap, or None if revmap is empty"""
256 254 if self._revmap is None:
257 255 # fast path, read revmap without loading its full content
258 256 return revmapmod.getlastnode(self.revmappath)
259 257 else:
260 258 return self._revmap.rev2hsh(self._revmap.maxrev)
261 259
262 260 def isuptodate(self, master, strict=True):
263 261 """return True if the revmap / linelog is up-to-date, or the file
264 262 does not exist in the master revision. False otherwise.
265 263
266 264 it tries to be fast and could return false negatives, because of the
267 265 use of linkrev instead of introrev.
268 266
269 267 useful for both server and client to decide whether to update
270 268 fastannotate cache or not.
271 269
272 270 if strict is True, even if fctx exists in the revmap, but is not the
273 271 last node, isuptodate will return False. it's good for performance - no
274 272 expensive check was done.
275 273
276 274 if strict is False, if fctx exists in the revmap, this function may
277 275 return True. this is useful for the client to skip downloading the
278 276 cache if the client's master is behind the server's.
279 277 """
280 278 lastnode = self.lastnode
281 279 try:
282 280 f = self._resolvefctx(master, resolverev=True)
283 281 # choose linkrev instead of introrev as the check is meant to be
284 282 # *fast*.
285 283 linknode = self.repo.changelog.node(f.linkrev())
286 284 if not strict and lastnode and linknode != lastnode:
287 285 # check if f.node() is in the revmap. note: this loads the
288 286 # revmap and can be slow.
289 287 return self.revmap.hsh2rev(linknode) is not None
290 288 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
291 289 # false negatives are acceptable in this case.
292 290 return linknode == lastnode
293 291 except LookupError:
294 292 # master does not have the file, or the revmap is ahead
295 293 return True
296 294
297 295 def annotate(self, rev, master=None, showpath=False, showlines=False):
298 296 """incrementally update the cache so it includes revisions in the main
299 297 branch till 'master'. and run annotate on 'rev', which may or may not be
300 298 included in the main branch.
301 299
302 300 if master is None, do not update linelog.
303 301
304 302 the first value returned is the annotate result, it is [(node, linenum)]
305 303 by default. [(node, linenum, path)] if showpath is True.
306 304
307 305 if showlines is True, a second value will be returned, it is a list of
308 306 corresponding line contents.
309 307 """
310 308
311 309 # the fast path test requires commit hash, convert rev number to hash,
312 310 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
313 311 # command could give us a revision number even if the user passes a
314 312 # commit hash.
315 313 if isinstance(rev, int):
316 314 rev = hex(self.repo.changelog.node(rev))
317 315
318 316 # fast path: if rev is in the main branch already
319 317 directly, revfctx = self.canannotatedirectly(rev)
320 318 if directly:
321 319 if self.ui.debugflag:
322 320 self.ui.debug(
323 321 b'fastannotate: %s: using fast path '
324 322 b'(resolved fctx: %s)\n'
325 323 % (
326 324 self.path,
327 325 stringutil.pprint(hasattr(revfctx, 'node')),
328 326 )
329 327 )
330 328 return self.annotatedirectly(revfctx, showpath, showlines)
331 329
332 330 # resolve master
333 331 masterfctx = None
334 332 if master:
335 333 try:
336 334 masterfctx = self._resolvefctx(
337 335 master, resolverev=True, adjustctx=True
338 336 )
339 337 except LookupError: # master does not have the file
340 338 pass
341 339 else:
342 340 if masterfctx in self.revmap: # no need to update linelog
343 341 masterfctx = None
344 342
345 343 # ... - @ <- rev (can be an arbitrary changeset,
346 344 # / not necessarily a descendant
347 345 # master -> o of master)
348 346 # |
349 347 # a merge -> o 'o': new changesets in the main branch
350 348 # |\ '#': revisions in the main branch that
351 349 # o * exist in linelog / revmap
352 350 # | . '*': changesets in side branches, or
353 351 # last master -> # . descendants of master
354 352 # | .
355 353 # # * joint: '#', and is a parent of a '*'
356 354 # |/
357 355 # a joint -> # ^^^^ --- side branches
358 356 # |
359 357 # ^ --- main branch (in linelog)
360 358
361 359 # these DFSes are similar to the traditional annotate algorithm.
362 360 # we cannot really reuse the code for perf reason.
363 361
364 362 # 1st DFS calculates merges, joint points, and needed.
365 363 # "needed" is a simple reference counting dict to free items in
366 364 # "hist", reducing its memory usage otherwise could be huge.
367 365 initvisit = [revfctx]
368 366 if masterfctx:
369 367 if masterfctx.rev() is None:
370 368 raise error.Abort(
371 369 _(b'cannot update linelog to wdir()'),
372 370 hint=_(b'set fastannotate.mainbranch'),
373 371 )
374 372 initvisit.append(masterfctx)
375 373 visit = initvisit[:]
376 374 pcache = {}
377 375 needed = {revfctx: 1}
378 376 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
379 377 while visit:
380 378 f = visit.pop()
381 379 if f in pcache or f in hist:
382 380 continue
383 381 if f in self.revmap: # in the old main branch, it's a joint
384 382 llrev = self.revmap.hsh2rev(f.node())
385 383 self.linelog.annotate(llrev)
386 384 result = self.linelog.annotateresult
387 385 hist[f] = (result, f.data())
388 386 continue
389 387 pl = self._parentfunc(f)
390 388 pcache[f] = pl
391 389 for p in pl:
392 390 needed[p] = needed.get(p, 0) + 1
393 391 if p not in pcache:
394 392 visit.append(p)
395 393
396 394 # 2nd (simple) DFS calculates new changesets in the main branch
397 395 # ('o' nodes in # the above graph), so we know when to update linelog.
398 396 newmainbranch = set()
399 397 f = masterfctx
400 398 while f and f not in self.revmap:
401 399 newmainbranch.add(f)
402 400 pl = pcache[f]
403 401 if pl:
404 402 f = pl[0]
405 403 else:
406 404 f = None
407 405 break
408 406
409 407 # f, if present, is the position where the last build stopped at, and
410 408 # should be the "master" last time. check to see if we can continue
411 409 # building the linelog incrementally. (we cannot if diverged)
412 410 if masterfctx is not None:
413 411 self._checklastmasterhead(f)
414 412
415 413 if self.ui.debugflag:
416 414 if newmainbranch:
417 415 self.ui.debug(
418 416 b'fastannotate: %s: %d new changesets in the main'
419 417 b' branch\n' % (self.path, len(newmainbranch))
420 418 )
421 419 elif not hist: # no joints, no updates
422 420 self.ui.debug(
423 421 b'fastannotate: %s: linelog cannot help in '
424 422 b'annotating this revision\n' % self.path
425 423 )
426 424
427 425 # prepare annotateresult so we can update linelog incrementally
428 426 self.linelog.annotate(self.linelog.maxrev)
429 427
430 428 # 3rd DFS does the actual annotate
431 429 visit = initvisit[:]
432 430 progress = self.ui.makeprogress(
433 431 b'building cache', total=len(newmainbranch)
434 432 )
435 433 while visit:
436 434 f = visit[-1]
437 435 if f in hist:
438 436 visit.pop()
439 437 continue
440 438
441 439 ready = True
442 440 pl = pcache[f]
443 441 for p in pl:
444 442 if p not in hist:
445 443 ready = False
446 444 visit.append(p)
447 445 if not ready:
448 446 continue
449 447
450 448 visit.pop()
451 449 blocks = None # mdiff blocks, used for appending linelog
452 450 ismainbranch = f in newmainbranch
453 451 # curr is the same as the traditional annotate algorithm,
454 452 # if we only care about linear history (do not follow merge),
455 453 # then curr is not actually used.
456 454 assert f not in hist
457 455 curr = _decorate(f)
458 456 for i, p in enumerate(pl):
459 457 bs = list(self._diffblocks(hist[p][1], curr[1]))
460 458 if i == 0 and ismainbranch:
461 459 blocks = bs
462 460 curr = _pair(hist[p], curr, bs)
463 461 if needed[p] == 1:
464 462 del hist[p]
465 463 del needed[p]
466 464 else:
467 465 needed[p] -= 1
468 466
469 467 hist[f] = curr
470 468 del pcache[f]
471 469
472 470 if ismainbranch: # need to write to linelog
473 471 progress.increment()
474 472 bannotated = None
475 473 if len(pl) == 2 and self.opts.followmerge: # merge
476 474 bannotated = curr[0]
477 475 if blocks is None: # no parents, add an empty one
478 476 blocks = list(self._diffblocks(b'', curr[1]))
479 477 self._appendrev(f, blocks, bannotated)
480 478 elif showpath: # not append linelog, but we need to record path
481 479 self._node2path[f.node()] = f.path()
482 480
483 481 progress.complete()
484 482
485 483 result = [
486 484 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
487 485 for fr, l in hist[revfctx][0]
488 486 ] # [(node, linenumber)]
489 487 return self._refineannotateresult(result, revfctx, showpath, showlines)
490 488
491 489 def canannotatedirectly(self, rev):
492 490 """(str) -> bool, fctx or node.
493 491 return (True, f) if we can annotate without updating the linelog, pass
494 492 f to annotatedirectly.
495 493 return (False, f) if we need extra calculation. f is the fctx resolved
496 494 from rev.
497 495 """
498 496 result = True
499 497 f = None
500 498 if not isinstance(rev, int) and rev is not None:
501 499 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
502 500 if hsh is not None and (hsh, self.path) in self.revmap:
503 501 f = hsh
504 502 if f is None:
505 503 adjustctx = b'linkrev' if self._perfhack else True
506 504 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
507 505 result = f in self.revmap
508 506 if not result and self._perfhack:
509 507 # redo the resolution without perfhack - as we are going to
510 508 # do write operations, we need a correct fctx.
511 509 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
512 510 return result, f
513 511
514 512 def annotatealllines(self, rev, showpath=False, showlines=False):
515 513 """(rev : str) -> [(node : str, linenum : int, path : str)]
516 514
517 515 the result has the same format with annotate, but include all (including
518 516 deleted) lines up to rev. call this after calling annotate(rev, ...) for
519 517 better performance and accuracy.
520 518 """
521 519 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
522 520
523 521 # find a chain from rev to anything in the mainbranch
524 522 if revfctx not in self.revmap:
525 523 chain = [revfctx]
526 524 a = b''
527 525 while True:
528 526 f = chain[-1]
529 527 pl = self._parentfunc(f)
530 528 if not pl:
531 529 break
532 530 if pl[0] in self.revmap:
533 531 a = pl[0].data()
534 532 break
535 533 chain.append(pl[0])
536 534
537 535 # both self.linelog and self.revmap is backed by filesystem. now
538 536 # we want to modify them but do not want to write changes back to
539 537 # files. so we create in-memory objects and copy them. it's like
540 538 # a "fork".
541 539 linelog = linelogmod.linelog()
542 540 linelog.copyfrom(self.linelog)
543 541 linelog.annotate(linelog.maxrev)
544 542 revmap = revmapmod.revmap()
545 543 revmap.copyfrom(self.revmap)
546 544
547 545 for f in reversed(chain):
548 546 b = f.data()
549 547 blocks = list(self._diffblocks(a, b))
550 548 self._doappendrev(linelog, revmap, f, blocks)
551 549 a = b
552 550 else:
553 551 # fastpath: use existing linelog, revmap as we don't write to them
554 552 linelog = self.linelog
555 553 revmap = self.revmap
556 554
557 555 lines = linelog.getalllines()
558 556 hsh = revfctx.node()
559 557 llrev = revmap.hsh2rev(hsh)
560 558 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
561 559 # cannot use _refineannotateresult since we need custom logic for
562 560 # resolving line contents
563 561 if showpath:
564 562 result = self._addpathtoresult(result, revmap)
565 563 if showlines:
566 564 linecontents = self._resolvelines(result, revmap, linelog)
567 565 result = (result, linecontents)
568 566 return result
569 567
570 568 def _resolvelines(self, annotateresult, revmap, linelog):
571 569 """(annotateresult) -> [line]. designed for annotatealllines.
572 570 this is probably the most inefficient code in the whole fastannotate
573 571 directory. but we have made a decision that the linelog does not
574 572 store line contents. so getting them requires random accesses to
575 573 the revlog data, since they can be many, it can be very slow.
576 574 """
577 575 # [llrev]
578 576 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
579 577 result = [None] * len(annotateresult)
580 578 # {(rev, linenum): [lineindex]}
581 579 key2idxs = collections.defaultdict(list)
582 580 for i in range(len(result)):
583 581 key2idxs[(revs[i], annotateresult[i][1])].append(i)
584 582 while key2idxs:
585 583 # find an unresolved line and its linelog rev to annotate
586 584 hsh = None
587 585 try:
588 586 for (rev, _linenum), idxs in key2idxs.items():
589 587 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
590 588 continue
591 589 hsh = annotateresult[idxs[0]][0]
592 590 break
593 591 except StopIteration: # no more unresolved lines
594 592 return result
595 593 if hsh is None:
596 594 # the remaining key2idxs are not in main branch, resolving them
597 595 # using the hard way...
598 596 revlines = {}
599 597 for (rev, linenum), idxs in key2idxs.items():
600 598 if rev not in revlines:
601 599 hsh = annotateresult[idxs[0]][0]
602 600 if self.ui.debugflag:
603 601 self.ui.debug(
604 602 b'fastannotate: reading %s line #%d '
605 603 b'to resolve lines %r\n'
606 604 % (short(hsh), linenum, idxs)
607 605 )
608 606 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
609 607 lines = mdiff.splitnewlines(fctx.data())
610 608 revlines[rev] = lines
611 609 for idx in idxs:
612 610 result[idx] = revlines[rev][linenum]
613 611 assert all(x is not None for x in result)
614 612 return result
615 613
616 614 # run the annotate and the lines should match to the file content
617 615 self.ui.debug(
618 616 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
619 617 )
620 618 linelog.annotate(rev)
621 619 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
622 620 annotated = linelog.annotateresult
623 621 lines = mdiff.splitnewlines(fctx.data())
624 622 if len(lines) != len(annotated):
625 623 raise faerror.CorruptedFileError(b'unexpected annotated lines')
626 624 # resolve lines from the annotate result
627 625 for i, line in enumerate(lines):
628 626 k = annotated[i]
629 627 if k in key2idxs:
630 628 for idx in key2idxs[k]:
631 629 result[idx] = line
632 630 del key2idxs[k]
633 631 return result
634 632
635 633 def annotatedirectly(self, f, showpath, showlines):
636 634 """like annotate, but when we know that f is in linelog.
637 635 f can be either a 20-char str (node) or a fctx. this is for perf - in
638 636 the best case, the user provides a node and we don't need to read the
639 637 filelog or construct any filecontext.
640 638 """
641 639 if isinstance(f, bytes):
642 640 hsh = f
643 641 else:
644 642 hsh = f.node()
645 643 llrev = self.revmap.hsh2rev(hsh)
646 644 if not llrev:
647 645 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
648 646 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
649 647 raise faerror.CorruptedFileError(
650 648 b'%s is not in revmap mainbranch' % hex(hsh)
651 649 )
652 650 self.linelog.annotate(llrev)
653 651 result = [
654 652 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
655 653 ]
656 654 return self._refineannotateresult(result, f, showpath, showlines)
657 655
658 656 def _refineannotateresult(self, result, f, showpath, showlines):
659 657 """add the missing path or line contents, they can be expensive.
660 658 f could be either node or fctx.
661 659 """
662 660 if showpath:
663 661 result = self._addpathtoresult(result)
664 662 if showlines:
665 663 if isinstance(f, bytes): # f: node or fctx
666 664 llrev = self.revmap.hsh2rev(f)
667 665 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
668 666 else:
669 667 fctx = f
670 668 lines = mdiff.splitnewlines(fctx.data())
671 669 if len(lines) != len(result): # linelog is probably corrupted
672 670 raise faerror.CorruptedFileError()
673 671 result = (result, lines)
674 672 return result
675 673
676 674 def _appendrev(self, fctx, blocks, bannotated=None):
677 675 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
678 676
679 677 def _diffblocks(self, a, b):
680 678 return mdiff.allblocks(a, b, self.opts.diffopts)
681 679
682 680 @staticmethod
683 681 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
684 682 """append a revision to linelog and revmap"""
685 683
686 684 def getllrev(f):
687 685 """(fctx) -> int"""
688 686 # f should not be a linelog revision
689 687 if isinstance(f, int):
690 688 raise error.ProgrammingError(b'f should not be an int')
691 689 # f is a fctx, allocate linelog rev on demand
692 690 hsh = f.node()
693 691 rev = revmap.hsh2rev(hsh)
694 692 if rev is None:
695 693 rev = revmap.append(hsh, sidebranch=True, path=f.path())
696 694 return rev
697 695
698 696 # append sidebranch revisions to revmap
699 697 siderevs = []
700 698 siderevmap = {} # node: int
701 699 if bannotated is not None:
702 700 for (a1, a2, b1, b2), op in blocks:
703 701 if op != b'=':
704 702 # f could be either linelong rev, or fctx.
705 703 siderevs += [
706 704 f
707 705 for f, l in bannotated[b1:b2]
708 706 if not isinstance(f, int)
709 707 ]
710 708 siderevs = set(siderevs)
711 709 if fctx in siderevs: # mainnode must be appended seperately
712 710 siderevs.remove(fctx)
713 711 for f in siderevs:
714 712 siderevmap[f] = getllrev(f)
715 713
716 714 # the changeset in the main branch, could be a merge
717 715 llrev = revmap.append(fctx.node(), path=fctx.path())
718 716 siderevmap[fctx] = llrev
719 717
720 718 for (a1, a2, b1, b2), op in reversed(blocks):
721 719 if op == b'=':
722 720 continue
723 721 if bannotated is None:
724 722 linelog.replacelines(llrev, a1, a2, b1, b2)
725 723 else:
726 724 blines = [
727 725 ((r if isinstance(r, int) else siderevmap[r]), l)
728 726 for r, l in bannotated[b1:b2]
729 727 ]
730 728 linelog.replacelines_vec(llrev, a1, a2, blines)
731 729
732 730 def _addpathtoresult(self, annotateresult, revmap=None):
733 731 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
734 732 if revmap is None:
735 733 revmap = self.revmap
736 734
737 735 def _getpath(nodeid):
738 736 path = self._node2path.get(nodeid)
739 737 if path is None:
740 738 path = revmap.rev2path(revmap.hsh2rev(nodeid))
741 739 self._node2path[nodeid] = path
742 740 return path
743 741
744 742 return [(n, l, _getpath(n)) for n, l in annotateresult]
745 743
746 744 def _checklastmasterhead(self, fctx):
747 745 """check if fctx is the master's head last time, raise if not"""
748 746 if fctx is None:
749 747 llrev = 0
750 748 else:
751 749 llrev = self.revmap.hsh2rev(fctx.node())
752 750 if not llrev:
753 751 raise faerror.CannotReuseError()
754 752 if self.linelog.maxrev != llrev:
755 753 raise faerror.CannotReuseError()
756 754
757 755 @util.propertycache
758 756 def _parentfunc(self):
759 757 """-> (fctx) -> [fctx]"""
760 758 followrename = self.opts.followrename
761 759 followmerge = self.opts.followmerge
762 760
763 761 def parents(f):
764 762 pl = _parents(f, follow=followrename)
765 763 if not followmerge:
766 764 pl = pl[:1]
767 765 return pl
768 766
769 767 return parents
770 768
771 769 @util.propertycache
772 770 def _perfhack(self):
773 771 return self.ui.configbool(b'fastannotate', b'perfhack')
774 772
775 773 def _resolvefctx(self, rev, path=None, **kwds):
776 774 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
777 775
778 776
779 777 def _unlinkpaths(paths):
780 778 """silent, best-effort unlink"""
781 779 for path in paths:
782 780 try:
783 781 util.unlink(path)
784 782 except OSError:
785 783 pass
786 784
787 785
788 786 class pathhelper:
789 787 """helper for getting paths for lockfile, linelog and revmap"""
790 788
791 789 def __init__(self, repo, path, opts=defaultopts):
792 790 # different options use different directories
793 791 self._vfspath = os.path.join(
794 792 b'fastannotate', opts.shortstr, encodedir(path)
795 793 )
796 794 self._repo = repo
797 795
798 796 @property
799 797 def dirname(self):
800 798 return os.path.dirname(self._repo.vfs.join(self._vfspath))
801 799
802 800 @property
803 801 def linelogpath(self):
804 802 return self._repo.vfs.join(self._vfspath + b'.l')
805 803
806 804 def lock(self):
807 805 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
808 806
809 807 @property
810 808 def revmappath(self):
811 809 return self._repo.vfs.join(self._vfspath + b'.m')
812 810
813 811
814 812 @contextlib.contextmanager
815 813 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
816 814 """context needed to perform (fast) annotate on a file
817 815
818 816 an annotatecontext of a single file consists of two structures: the
819 817 linelog and the revmap. this function takes care of locking. only 1
820 818 process is allowed to write that file's linelog and revmap at a time.
821 819
822 820 when something goes wrong, this function will assume the linelog and the
823 821 revmap are in a bad state, and remove them from disk.
824 822
825 823 use this function in the following way:
826 824
827 825 with annotatecontext(...) as actx:
828 826 actx. ....
829 827 """
830 828 helper = pathhelper(repo, path, opts)
831 829 util.makedirs(helper.dirname)
832 830 revmappath = helper.revmappath
833 831 linelogpath = helper.linelogpath
834 832 actx = None
835 833 try:
836 834 with helper.lock():
837 835 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
838 836 if rebuild:
839 837 actx.rebuild()
840 838 yield actx
841 839 except Exception:
842 840 if actx is not None:
843 841 actx.rebuild()
844 842 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
845 843 raise
846 844 finally:
847 845 if actx is not None:
848 846 actx.close()
849 847
850 848
851 849 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
852 850 """like annotatecontext but get the context from a fctx. convenient when
853 851 used in fctx.annotate
854 852 """
855 853 repo = fctx._repo
856 854 path = fctx._path
857 855 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
858 856 follow = True
859 857 aopts = annotateopts(diffopts=diffopts, followrename=follow)
860 858 return annotatecontext(repo, path, aopts, rebuild)
@@ -1,136 +1,135
1 1 # Copyright 2016-present Facebook. All Rights Reserved.
2 2 #
3 3 # support: fastannotate support for hgweb, and filectx
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 from mercurial.pycompat import getattr
10 9 from mercurial import (
11 10 context as hgcontext,
12 11 dagop,
13 12 extensions,
14 13 hgweb,
15 14 patch,
16 15 util,
17 16 )
18 17
19 18 from . import (
20 19 context,
21 20 revmap,
22 21 )
23 22
24 23
25 24 class _lazyfctx:
26 25 """delegates to fctx but do not construct fctx when unnecessary"""
27 26
28 27 def __init__(self, repo, node, path):
29 28 self._node = node
30 29 self._path = path
31 30 self._repo = repo
32 31
33 32 def node(self):
34 33 return self._node
35 34
36 35 def path(self):
37 36 return self._path
38 37
39 38 @util.propertycache
40 39 def _fctx(self):
41 40 return context.resolvefctx(self._repo, self._node, self._path)
42 41
43 42 def __getattr__(self, name):
44 43 return getattr(self._fctx, name)
45 44
46 45
47 46 def _convertoutputs(repo, annotated, contents):
48 47 """convert fastannotate outputs to vanilla annotate format"""
49 48 # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
50 49 # convert to what fctx.annotate returns: [annotateline]
51 50 results = []
52 51 fctxmap = {}
53 52 annotateline = dagop.annotateline
54 53 for i, (hsh, linenum, path) in enumerate(annotated):
55 54 if (hsh, path) not in fctxmap:
56 55 fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
57 56 # linenum: the user wants 1-based, we have 0-based.
58 57 lineno = linenum + 1
59 58 fctx = fctxmap[(hsh, path)]
60 59 line = contents[i]
61 60 results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
62 61 return results
63 62
64 63
65 64 def _getmaster(fctx):
66 65 """(fctx) -> str"""
67 66 return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
68 67
69 68
70 69 def _doannotate(fctx, follow=True, diffopts=None):
71 70 """like the vanilla fctx.annotate, but do it via fastannotate, and make
72 71 the output format compatible with the vanilla fctx.annotate.
73 72 may raise Exception, and always return line numbers.
74 73 """
75 74 master = _getmaster(fctx)
76 75
77 76 with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
78 77 try:
79 78 annotated, contents = ac.annotate(
80 79 fctx.rev(), master=master, showpath=True, showlines=True
81 80 )
82 81 except Exception:
83 82 ac.rebuild() # try rebuild once
84 83 fctx._repo.ui.debug(
85 84 b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
86 85 )
87 86 try:
88 87 annotated, contents = ac.annotate(
89 88 fctx.rev(), master=master, showpath=True, showlines=True
90 89 )
91 90 except Exception:
92 91 raise
93 92
94 93 assert annotated and contents
95 94 return _convertoutputs(fctx._repo, annotated, contents)
96 95
97 96
98 97 def _hgwebannotate(orig, fctx, ui):
99 98 diffopts = patch.difffeatureopts(
100 99 ui, untrusted=True, section=b'annotate', whitespace=True
101 100 )
102 101 return _doannotate(fctx, diffopts=diffopts)
103 102
104 103
105 104 def _fctxannotate(
106 105 orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
107 106 ):
108 107 if skiprevs:
109 108 # skiprevs is not supported yet
110 109 return orig(
111 110 self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
112 111 )
113 112 try:
114 113 return _doannotate(self, follow, diffopts)
115 114 except Exception as ex:
116 115 self._repo.ui.debug(
117 116 b'fastannotate: falling back to the vanilla annotate: %r\n' % ex
118 117 )
119 118 return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
120 119
121 120
122 121 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
123 122 # skipset: a set-like used to test if a fctx needs to be downloaded
124 123 with context.fctxannotatecontext(self, follow, diffopts) as ac:
125 124 skipset = revmap.revmap(ac.revmappath)
126 125 return orig(
127 126 self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
128 127 )
129 128
130 129
131 130 def replacehgwebannotate():
132 131 extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
133 132
134 133
135 134 def replacefctxannotate():
136 135 extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
@@ -1,2682 +1,2681
1 1 # histedit.py - interactive history editing for mercurial
2 2 #
3 3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """interactive history editing
8 8
9 9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 10 is as follows, assuming the following history::
11 11
12 12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 13 | Add delta
14 14 |
15 15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 16 | Add gamma
17 17 |
18 18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 19 | Add beta
20 20 |
21 21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 22 Add alpha
23 23
24 24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 25 file open in your editor::
26 26
27 27 pick c561b4e977df Add beta
28 28 pick 030b686bedc4 Add gamma
29 29 pick 7c2fd3b9020c Add delta
30 30
31 31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 32 #
33 33 # Commits are listed from least to most recent
34 34 #
35 35 # Commands:
36 36 # p, pick = use commit
37 37 # e, edit = use commit, but allow edits before making new commit
38 38 # f, fold = use commit, but combine it with the one above
39 39 # r, roll = like fold, but discard this commit's description and date
40 40 # d, drop = remove commit from history
41 41 # m, mess = edit commit message without changing commit content
42 42 # b, base = checkout changeset and apply further changesets from there
43 43 #
44 44
45 45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 46 for each revision in your history. For example, if you had meant to add gamma
47 47 before beta, and then wanted to add delta in the same revision as beta, you
48 48 would reorganize the file to look like this::
49 49
50 50 pick 030b686bedc4 Add gamma
51 51 pick c561b4e977df Add beta
52 52 fold 7c2fd3b9020c Add delta
53 53
54 54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 55 #
56 56 # Commits are listed from least to most recent
57 57 #
58 58 # Commands:
59 59 # p, pick = use commit
60 60 # e, edit = use commit, but allow edits before making new commit
61 61 # f, fold = use commit, but combine it with the one above
62 62 # r, roll = like fold, but discard this commit's description and date
63 63 # d, drop = remove commit from history
64 64 # m, mess = edit commit message without changing commit content
65 65 # b, base = checkout changeset and apply further changesets from there
66 66 #
67 67
68 68 At which point you close the editor and ``histedit`` starts working. When you
69 69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 70 those revisions together, offering you a chance to clean up the commit message::
71 71
72 72 Add beta
73 73 ***
74 74 Add delta
75 75
76 76 Edit the commit message to your liking, then close the editor. The date used
77 77 for the commit will be the later of the two commits' dates. For this example,
78 78 let's assume that the commit message was changed to ``Add beta and delta.``
79 79 After histedit has run and had a chance to remove any old or temporary
80 80 revisions it needed, the history looks like this::
81 81
82 82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 83 | Add beta and delta.
84 84 |
85 85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 86 | Add gamma
87 87 |
88 88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 89 Add alpha
90 90
91 91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 92 ones) until after it has completed all the editing operations, so it will
93 93 probably perform several strip operations when it's done. For the above example,
94 94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 95 so you might need to be a little patient. You can choose to keep the original
96 96 revisions by passing the ``--keep`` flag.
97 97
98 98 The ``edit`` operation will drop you back to a command prompt,
99 99 allowing you to edit files freely, or even use ``hg record`` to commit
100 100 some changes as a separate commit. When you're done, any remaining
101 101 uncommitted changes will be committed as well. When done, run ``hg
102 102 histedit --continue`` to finish this step. If there are uncommitted
103 103 changes, you'll be prompted for a new commit message, but the default
104 104 commit message will be the original message for the ``edit`` ed
105 105 revision, and the date of the original commit will be preserved.
106 106
107 107 The ``message`` operation will give you a chance to revise a commit
108 108 message without changing the contents. It's a shortcut for doing
109 109 ``edit`` immediately followed by `hg histedit --continue``.
110 110
111 111 If ``histedit`` encounters a conflict when moving a revision (while
112 112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 113 ``edit`` with the difference that it won't prompt you for a commit
114 114 message when done. If you decide at this point that you don't like how
115 115 much work it will be to rearrange history, or that you made a mistake,
116 116 you can use ``hg histedit --abort`` to abandon the new changes you
117 117 have made and return to the state before you attempted to edit your
118 118 history.
119 119
120 120 If we clone the histedit-ed example repository above and add four more
121 121 changes, such that we have the following history::
122 122
123 123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 124 | Add theta
125 125 |
126 126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 127 | Add eta
128 128 |
129 129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 130 | Add zeta
131 131 |
132 132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 133 | Add epsilon
134 134 |
135 135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 136 | Add beta and delta.
137 137 |
138 138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 139 | Add gamma
140 140 |
141 141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 142 Add alpha
143 143
144 144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 146 repository that Mercurial does not detect to be related to the source
147 147 repo, you can add a ``--force`` option.
148 148
149 149 Config
150 150 ------
151 151
152 152 Histedit rule lines are truncated to 80 characters by default. You
153 153 can customize this behavior by setting a different length in your
154 154 configuration file::
155 155
156 156 [histedit]
157 157 linelen = 120 # truncate rule lines at 120 characters
158 158
159 159 The summary of a change can be customized as well::
160 160
161 161 [histedit]
162 162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163 163
164 164 The customized summary should be kept short enough that rule lines
165 165 will fit in the configured line length. See above if that requires
166 166 customization.
167 167
168 168 ``hg histedit`` attempts to automatically choose an appropriate base
169 169 revision to use. To change which base revision is used, define a
170 170 revset in your configuration file::
171 171
172 172 [histedit]
173 173 defaultrev = only(.) & draft()
174 174
175 175 By default each edited revision needs to be present in histedit commands.
176 176 To remove revision you need to use ``drop`` operation. You can configure
177 177 the drop to be implicit for missing commits by adding::
178 178
179 179 [histedit]
180 180 dropmissing = True
181 181
182 182 By default, histedit will close the transaction after each action. For
183 183 performance purposes, you can configure histedit to use a single transaction
184 184 across the entire histedit. WARNING: This setting introduces a significant risk
185 185 of losing the work you've done in a histedit if the histedit aborts
186 186 unexpectedly::
187 187
188 188 [histedit]
189 189 singletransaction = True
190 190
191 191 """
192 192
193 193
194 194 # chistedit dependencies that are not available everywhere
195 195 try:
196 196 import fcntl
197 197 import termios
198 198 except ImportError:
199 199 fcntl = None
200 200 termios = None
201 201
202 202 import binascii
203 203 import functools
204 204 import os
205 205 import pickle
206 206 import struct
207 207
208 208 from mercurial.i18n import _
209 209 from mercurial.pycompat import (
210 getattr,
211 210 open,
212 211 )
213 212 from mercurial.node import (
214 213 bin,
215 214 hex,
216 215 short,
217 216 )
218 217 from mercurial import (
219 218 bundle2,
220 219 cmdutil,
221 220 context,
222 221 copies,
223 222 destutil,
224 223 discovery,
225 224 encoding,
226 225 error,
227 226 exchange,
228 227 extensions,
229 228 hg,
230 229 logcmdutil,
231 230 merge as mergemod,
232 231 mergestate as mergestatemod,
233 232 mergeutil,
234 233 obsolete,
235 234 pycompat,
236 235 registrar,
237 236 repair,
238 237 rewriteutil,
239 238 scmutil,
240 239 state as statemod,
241 240 util,
242 241 )
243 242 from mercurial.utils import (
244 243 dateutil,
245 244 stringutil,
246 245 urlutil,
247 246 )
248 247
249 248 cmdtable = {}
250 249 command = registrar.command(cmdtable)
251 250
252 251 configtable = {}
253 252 configitem = registrar.configitem(configtable)
254 253 configitem(
255 254 b'experimental',
256 255 b'histedit.autoverb',
257 256 default=False,
258 257 )
259 258 configitem(
260 259 b'histedit',
261 260 b'defaultrev',
262 261 default=None,
263 262 )
264 263 configitem(
265 264 b'histedit',
266 265 b'dropmissing',
267 266 default=False,
268 267 )
269 268 configitem(
270 269 b'histedit',
271 270 b'linelen',
272 271 default=80,
273 272 )
274 273 configitem(
275 274 b'histedit',
276 275 b'singletransaction',
277 276 default=False,
278 277 )
279 278 configitem(
280 279 b'ui',
281 280 b'interface.histedit',
282 281 default=None,
283 282 )
284 283 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 284 # TODO: Teach the text-based histedit interface to respect this config option
286 285 # before we make it non-experimental.
287 286 configitem(
288 287 b'histedit', b'later-commits-first', default=False, experimental=True
289 288 )
290 289
291 290 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
292 291 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
293 292 # be specifying the version(s) of Mercurial they are tested with, or
294 293 # leave the attribute unspecified.
295 294 testedwith = b'ships-with-hg-core'
296 295
297 296 actiontable = {}
298 297 primaryactions = set()
299 298 secondaryactions = set()
300 299 tertiaryactions = set()
301 300 internalactions = set()
302 301
303 302
304 303 def geteditcomment(ui, first, last):
305 304 """construct the editor comment
306 305 The comment includes::
307 306 - an intro
308 307 - sorted primary commands
309 308 - sorted short commands
310 309 - sorted long commands
311 310 - additional hints
312 311
313 312 Commands are only included once.
314 313 """
315 314 intro = _(
316 315 b"""Edit history between %s and %s
317 316
318 317 Commits are listed from least to most recent
319 318
320 319 You can reorder changesets by reordering the lines
321 320
322 321 Commands:
323 322 """
324 323 )
325 324 actions = []
326 325
327 326 def addverb(v):
328 327 a = actiontable[v]
329 328 lines = a.message.split(b"\n")
330 329 if len(a.verbs):
331 330 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
332 331 actions.append(b" %s = %s" % (v, lines[0]))
333 332 actions.extend([b' %s'] * (len(lines) - 1))
334 333
335 334 for v in (
336 335 sorted(primaryactions)
337 336 + sorted(secondaryactions)
338 337 + sorted(tertiaryactions)
339 338 ):
340 339 addverb(v)
341 340 actions.append(b'')
342 341
343 342 hints = []
344 343 if ui.configbool(b'histedit', b'dropmissing'):
345 344 hints.append(
346 345 b"Deleting a changeset from the list "
347 346 b"will DISCARD it from the edited history!"
348 347 )
349 348
350 349 lines = (intro % (first, last)).split(b'\n') + actions + hints
351 350
352 351 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
353 352
354 353
355 354 class histeditstate:
356 355 def __init__(self, repo):
357 356 self.repo = repo
358 357 self.actions = None
359 358 self.keep = None
360 359 self.topmost = None
361 360 self.parentctxnode = None
362 361 self.lock = None
363 362 self.wlock = None
364 363 self.backupfile = None
365 364 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
366 365 self.replacements = []
367 366
368 367 def read(self):
369 368 """Load histedit state from disk and set fields appropriately."""
370 369 if not self.stateobj.exists():
371 370 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
372 371
373 372 data = self._read()
374 373
375 374 self.parentctxnode = data[b'parentctxnode']
376 375 actions = parserules(data[b'rules'], self)
377 376 self.actions = actions
378 377 self.keep = data[b'keep']
379 378 self.topmost = data[b'topmost']
380 379 self.replacements = data[b'replacements']
381 380 self.backupfile = data[b'backupfile']
382 381
383 382 def _read(self):
384 383 fp = self.repo.vfs.read(b'histedit-state')
385 384 if fp.startswith(b'v1\n'):
386 385 data = self._load()
387 386 parentctxnode, rules, keep, topmost, replacements, backupfile = data
388 387 else:
389 388 data = pickle.loads(fp)
390 389 parentctxnode, rules, keep, topmost, replacements = data
391 390 backupfile = None
392 391 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
393 392
394 393 return {
395 394 b'parentctxnode': parentctxnode,
396 395 b"rules": rules,
397 396 b"keep": keep,
398 397 b"topmost": topmost,
399 398 b"replacements": replacements,
400 399 b"backupfile": backupfile,
401 400 }
402 401
403 402 def write(self, tr=None):
404 403 if tr:
405 404 tr.addfilegenerator(
406 405 b'histedit-state',
407 406 (b'histedit-state',),
408 407 self._write,
409 408 location=b'plain',
410 409 )
411 410 else:
412 411 with self.repo.vfs(b"histedit-state", b"w") as f:
413 412 self._write(f)
414 413
415 414 def _write(self, fp):
416 415 fp.write(b'v1\n')
417 416 fp.write(b'%s\n' % hex(self.parentctxnode))
418 417 fp.write(b'%s\n' % hex(self.topmost))
419 418 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
420 419 fp.write(b'%d\n' % len(self.actions))
421 420 for action in self.actions:
422 421 fp.write(b'%s\n' % action.tostate())
423 422 fp.write(b'%d\n' % len(self.replacements))
424 423 for replacement in self.replacements:
425 424 fp.write(
426 425 b'%s%s\n'
427 426 % (
428 427 hex(replacement[0]),
429 428 b''.join(hex(r) for r in replacement[1]),
430 429 )
431 430 )
432 431 backupfile = self.backupfile
433 432 if not backupfile:
434 433 backupfile = b''
435 434 fp.write(b'%s\n' % backupfile)
436 435
437 436 def _load(self):
438 437 fp = self.repo.vfs(b'histedit-state', b'r')
439 438 lines = [l[:-1] for l in fp.readlines()]
440 439
441 440 index = 0
442 441 lines[index] # version number
443 442 index += 1
444 443
445 444 parentctxnode = bin(lines[index])
446 445 index += 1
447 446
448 447 topmost = bin(lines[index])
449 448 index += 1
450 449
451 450 keep = lines[index] == b'True'
452 451 index += 1
453 452
454 453 # Rules
455 454 rules = []
456 455 rulelen = int(lines[index])
457 456 index += 1
458 457 for i in range(rulelen):
459 458 ruleaction = lines[index]
460 459 index += 1
461 460 rule = lines[index]
462 461 index += 1
463 462 rules.append((ruleaction, rule))
464 463
465 464 # Replacements
466 465 replacements = []
467 466 replacementlen = int(lines[index])
468 467 index += 1
469 468 for i in range(replacementlen):
470 469 replacement = lines[index]
471 470 original = bin(replacement[:40])
472 471 succ = [
473 472 bin(replacement[i : i + 40])
474 473 for i in range(40, len(replacement), 40)
475 474 ]
476 475 replacements.append((original, succ))
477 476 index += 1
478 477
479 478 backupfile = lines[index]
480 479 index += 1
481 480
482 481 fp.close()
483 482
484 483 return parentctxnode, rules, keep, topmost, replacements, backupfile
485 484
486 485 def clear(self):
487 486 if self.inprogress():
488 487 self.repo.vfs.unlink(b'histedit-state')
489 488
490 489 def inprogress(self):
491 490 return self.repo.vfs.exists(b'histedit-state')
492 491
493 492
494 493 class histeditaction:
495 494 def __init__(self, state, node):
496 495 self.state = state
497 496 self.repo = state.repo
498 497 self.node = node
499 498
500 499 @classmethod
501 500 def fromrule(cls, state, rule):
502 501 """Parses the given rule, returning an instance of the histeditaction."""
503 502 ruleid = rule.strip().split(b' ', 1)[0]
504 503 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
505 504 # Check for validation of rule ids and get the rulehash
506 505 try:
507 506 rev = bin(ruleid)
508 507 except binascii.Error:
509 508 try:
510 509 _ctx = scmutil.revsingle(state.repo, ruleid)
511 510 rulehash = _ctx.hex()
512 511 rev = bin(rulehash)
513 512 except error.RepoLookupError:
514 513 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
515 514 return cls(state, rev)
516 515
517 516 def verify(self, prev, expected, seen):
518 517 """Verifies semantic correctness of the rule"""
519 518 repo = self.repo
520 519 ha = hex(self.node)
521 520 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
522 521 if self.node is None:
523 522 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
524 523 self._verifynodeconstraints(prev, expected, seen)
525 524
526 525 def _verifynodeconstraints(self, prev, expected, seen):
527 526 # by default command need a node in the edited list
528 527 if self.node not in expected:
529 528 raise error.ParseError(
530 529 _(b'%s "%s" changeset was not a candidate')
531 530 % (self.verb, short(self.node)),
532 531 hint=_(b'only use listed changesets'),
533 532 )
534 533 # and only one command per node
535 534 if self.node in seen:
536 535 raise error.ParseError(
537 536 _(b'duplicated command for changeset %s') % short(self.node)
538 537 )
539 538
540 539 def torule(self):
541 540 """build a histedit rule line for an action
542 541
543 542 by default lines are in the form:
544 543 <hash> <rev> <summary>
545 544 """
546 545 ctx = self.repo[self.node]
547 546 ui = self.repo.ui
548 547 # We don't want color codes in the commit message template, so
549 548 # disable the label() template function while we render it.
550 549 with ui.configoverride(
551 550 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
552 551 ):
553 552 summary = cmdutil.rendertemplate(
554 553 ctx, ui.config(b'histedit', b'summary-template')
555 554 )
556 555 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
557 556 # trim to 75 columns by default so it's not stupidly wide in my editor
558 557 # (the 5 more are left for verb)
559 558 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
560 559 maxlen = max(maxlen, 22) # avoid truncating hash
561 560 return stringutil.ellipsis(line, maxlen)
562 561
563 562 def tostate(self):
564 563 """Print an action in format used by histedit state files
565 564 (the first line is a verb, the remainder is the second)
566 565 """
567 566 return b"%s\n%s" % (self.verb, hex(self.node))
568 567
569 568 def run(self):
570 569 """Runs the action. The default behavior is simply apply the action's
571 570 rulectx onto the current parentctx."""
572 571 self.applychange()
573 572 self.continuedirty()
574 573 return self.continueclean()
575 574
576 575 def applychange(self):
577 576 """Applies the changes from this action's rulectx onto the current
578 577 parentctx, but does not commit them."""
579 578 repo = self.repo
580 579 rulectx = repo[self.node]
581 580 with repo.ui.silent():
582 581 hg.update(repo, self.state.parentctxnode, quietempty=True)
583 582 stats = applychanges(repo.ui, repo, rulectx, {})
584 583 repo.dirstate.setbranch(rulectx.branch(), repo.currenttransaction())
585 584 if stats.unresolvedcount:
586 585 raise error.InterventionRequired(
587 586 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
588 587 hint=_(b'hg histedit --continue to resume'),
589 588 )
590 589
591 590 def continuedirty(self):
592 591 """Continues the action when changes have been applied to the working
593 592 copy. The default behavior is to commit the dirty changes."""
594 593 repo = self.repo
595 594 rulectx = repo[self.node]
596 595
597 596 editor = self.commiteditor()
598 597 commit = commitfuncfor(repo, rulectx)
599 598 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
600 599 date = dateutil.makedate()
601 600 else:
602 601 date = rulectx.date()
603 602 commit(
604 603 text=rulectx.description(),
605 604 user=rulectx.user(),
606 605 date=date,
607 606 extra=rulectx.extra(),
608 607 editor=editor,
609 608 )
610 609
611 610 def commiteditor(self):
612 611 """The editor to be used to edit the commit message."""
613 612 return False
614 613
615 614 def continueclean(self):
616 615 """Continues the action when the working copy is clean. The default
617 616 behavior is to accept the current commit as the new version of the
618 617 rulectx."""
619 618 ctx = self.repo[b'.']
620 619 if ctx.node() == self.state.parentctxnode:
621 620 self.repo.ui.warn(
622 621 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
623 622 )
624 623 return ctx, [(self.node, tuple())]
625 624 if ctx.node() == self.node:
626 625 # Nothing changed
627 626 return ctx, []
628 627 return ctx, [(self.node, (ctx.node(),))]
629 628
630 629
631 630 def commitfuncfor(repo, src):
632 631 """Build a commit function for the replacement of <src>
633 632
634 633 This function ensure we apply the same treatment to all changesets.
635 634
636 635 - Add a 'histedit_source' entry in extra.
637 636
638 637 Note that fold has its own separated logic because its handling is a bit
639 638 different and not easily factored out of the fold method.
640 639 """
641 640 phasemin = src.phase()
642 641
643 642 def commitfunc(**kwargs):
644 643 overrides = {(b'phases', b'new-commit'): phasemin}
645 644 with repo.ui.configoverride(overrides, b'histedit'):
646 645 extra = kwargs.get('extra', {}).copy()
647 646 extra[b'histedit_source'] = src.hex()
648 647 kwargs['extra'] = extra
649 648 return repo.commit(**kwargs)
650 649
651 650 return commitfunc
652 651
653 652
654 653 def applychanges(ui, repo, ctx, opts):
655 654 """Merge changeset from ctx (only) in the current working directory"""
656 655 if ctx.p1().node() == repo.dirstate.p1():
657 656 # edits are "in place" we do not need to make any merge,
658 657 # just applies changes on parent for editing
659 658 with ui.silent():
660 659 cmdutil.revert(ui, repo, ctx, all=True)
661 660 stats = mergemod.updateresult(0, 0, 0, 0)
662 661 else:
663 662 try:
664 663 # ui.forcemerge is an internal variable, do not document
665 664 repo.ui.setconfig(
666 665 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
667 666 )
668 667 stats = mergemod.graft(
669 668 repo,
670 669 ctx,
671 670 labels=[
672 671 b'already edited',
673 672 b'current change',
674 673 b'parent of current change',
675 674 ],
676 675 )
677 676 finally:
678 677 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
679 678 return stats
680 679
681 680
682 681 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
683 682 """collapse the set of revisions from first to last as new one.
684 683
685 684 Expected commit options are:
686 685 - message
687 686 - date
688 687 - username
689 688 Commit message is edited in all cases.
690 689
691 690 This function works in memory."""
692 691 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
693 692 if not ctxs:
694 693 return None
695 694 for c in ctxs:
696 695 if not c.mutable():
697 696 raise error.ParseError(
698 697 _(b"cannot fold into public change %s") % short(c.node())
699 698 )
700 699 base = firstctx.p1()
701 700
702 701 # commit a new version of the old changeset, including the update
703 702 # collect all files which might be affected
704 703 files = set()
705 704 for ctx in ctxs:
706 705 files.update(ctx.files())
707 706
708 707 # Recompute copies (avoid recording a -> b -> a)
709 708 copied = copies.pathcopies(base, lastctx)
710 709
711 710 # prune files which were reverted by the updates
712 711 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
713 712 # commit version of these files as defined by head
714 713 headmf = lastctx.manifest()
715 714
716 715 def filectxfn(repo, ctx, path):
717 716 if path in headmf:
718 717 fctx = lastctx[path]
719 718 flags = fctx.flags()
720 719 mctx = context.memfilectx(
721 720 repo,
722 721 ctx,
723 722 fctx.path(),
724 723 fctx.data(),
725 724 islink=b'l' in flags,
726 725 isexec=b'x' in flags,
727 726 copysource=copied.get(path),
728 727 )
729 728 return mctx
730 729 return None
731 730
732 731 if commitopts.get(b'message'):
733 732 message = commitopts[b'message']
734 733 else:
735 734 message = firstctx.description()
736 735 user = commitopts.get(b'user')
737 736 date = commitopts.get(b'date')
738 737 extra = commitopts.get(b'extra')
739 738
740 739 parents = (firstctx.p1().node(), firstctx.p2().node())
741 740 editor = None
742 741 if not skipprompt:
743 742 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
744 743 new = context.memctx(
745 744 repo,
746 745 parents=parents,
747 746 text=message,
748 747 files=files,
749 748 filectxfn=filectxfn,
750 749 user=user,
751 750 date=date,
752 751 extra=extra,
753 752 editor=editor,
754 753 )
755 754 return repo.commitctx(new)
756 755
757 756
758 757 def _isdirtywc(repo):
759 758 return repo[None].dirty(missing=True)
760 759
761 760
762 761 def abortdirty():
763 762 raise error.StateError(
764 763 _(b'working copy has pending changes'),
765 764 hint=_(
766 765 b'amend, commit, or revert them and run histedit '
767 766 b'--continue, or abort with histedit --abort'
768 767 ),
769 768 )
770 769
771 770
772 771 def action(verbs, message, priority=False, internal=False):
773 772 def wrap(cls):
774 773 assert not priority or not internal
775 774 verb = verbs[0]
776 775 if priority:
777 776 primaryactions.add(verb)
778 777 elif internal:
779 778 internalactions.add(verb)
780 779 elif len(verbs) > 1:
781 780 secondaryactions.add(verb)
782 781 else:
783 782 tertiaryactions.add(verb)
784 783
785 784 cls.verb = verb
786 785 cls.verbs = verbs
787 786 cls.message = message
788 787 for verb in verbs:
789 788 actiontable[verb] = cls
790 789 return cls
791 790
792 791 return wrap
793 792
794 793
795 794 @action([b'pick', b'p'], _(b'use commit'), priority=True)
796 795 class pick(histeditaction):
797 796 def run(self):
798 797 rulectx = self.repo[self.node]
799 798 if rulectx.p1().node() == self.state.parentctxnode:
800 799 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
801 800 return rulectx, []
802 801
803 802 return super(pick, self).run()
804 803
805 804
806 805 @action(
807 806 [b'edit', b'e'],
808 807 _(b'use commit, but allow edits before making new commit'),
809 808 priority=True,
810 809 )
811 810 class edit(histeditaction):
812 811 def run(self):
813 812 repo = self.repo
814 813 rulectx = repo[self.node]
815 814 hg.update(repo, self.state.parentctxnode, quietempty=True)
816 815 applychanges(repo.ui, repo, rulectx, {})
817 816 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
818 817 raise error.InterventionRequired(
819 818 _(b'Editing (%s), commit as needed now to split the change')
820 819 % short(self.node),
821 820 hint=hint % short(self.node),
822 821 )
823 822
824 823 def commiteditor(self):
825 824 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
826 825
827 826
828 827 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
829 828 class fold(histeditaction):
830 829 def verify(self, prev, expected, seen):
831 830 """Verifies semantic correctness of the fold rule"""
832 831 super(fold, self).verify(prev, expected, seen)
833 832 repo = self.repo
834 833 if not prev:
835 834 c = repo[self.node].p1()
836 835 elif not prev.verb in (b'pick', b'base'):
837 836 return
838 837 else:
839 838 c = repo[prev.node]
840 839 if not c.mutable():
841 840 raise error.ParseError(
842 841 _(b"cannot fold into public change %s") % short(c.node())
843 842 )
844 843
845 844 def continuedirty(self):
846 845 repo = self.repo
847 846 rulectx = repo[self.node]
848 847
849 848 commit = commitfuncfor(repo, rulectx)
850 849 commit(
851 850 text=b'fold-temp-revision %s' % short(self.node),
852 851 user=rulectx.user(),
853 852 date=rulectx.date(),
854 853 extra=rulectx.extra(),
855 854 )
856 855
857 856 def continueclean(self):
858 857 repo = self.repo
859 858 ctx = repo[b'.']
860 859 rulectx = repo[self.node]
861 860 parentctxnode = self.state.parentctxnode
862 861 if ctx.node() == parentctxnode:
863 862 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
864 863 return ctx, [(self.node, (parentctxnode,))]
865 864
866 865 parentctx = repo[parentctxnode]
867 866 newcommits = {
868 867 c.node()
869 868 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
870 869 }
871 870 if not newcommits:
872 871 repo.ui.warn(
873 872 _(
874 873 b'%s: cannot fold - working copy is not a '
875 874 b'descendant of previous commit %s\n'
876 875 )
877 876 % (short(self.node), short(parentctxnode))
878 877 )
879 878 return ctx, [(self.node, (ctx.node(),))]
880 879
881 880 middlecommits = newcommits.copy()
882 881 middlecommits.discard(ctx.node())
883 882
884 883 return self.finishfold(
885 884 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
886 885 )
887 886
888 887 def skipprompt(self):
889 888 """Returns true if the rule should skip the message editor.
890 889
891 890 For example, 'fold' wants to show an editor, but 'rollup'
892 891 doesn't want to.
893 892 """
894 893 return False
895 894
896 895 def mergedescs(self):
897 896 """Returns true if the rule should merge messages of multiple changes.
898 897
899 898 This exists mainly so that 'rollup' rules can be a subclass of
900 899 'fold'.
901 900 """
902 901 return True
903 902
904 903 def firstdate(self):
905 904 """Returns true if the rule should preserve the date of the first
906 905 change.
907 906
908 907 This exists mainly so that 'rollup' rules can be a subclass of
909 908 'fold'.
910 909 """
911 910 return False
912 911
913 912 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
914 913 mergemod.update(ctx.p1())
915 914 ### prepare new commit data
916 915 commitopts = {}
917 916 commitopts[b'user'] = ctx.user()
918 917 # commit message
919 918 if not self.mergedescs():
920 919 newmessage = ctx.description()
921 920 else:
922 921 newmessage = (
923 922 b'\n***\n'.join(
924 923 [ctx.description()]
925 924 + [repo[r].description() for r in internalchanges]
926 925 + [oldctx.description()]
927 926 )
928 927 + b'\n'
929 928 )
930 929 commitopts[b'message'] = newmessage
931 930 # date
932 931 if self.firstdate():
933 932 commitopts[b'date'] = ctx.date()
934 933 else:
935 934 commitopts[b'date'] = max(ctx.date(), oldctx.date())
936 935 # if date is to be updated to current
937 936 if ui.configbool(b'rewrite', b'update-timestamp'):
938 937 commitopts[b'date'] = dateutil.makedate()
939 938
940 939 extra = ctx.extra().copy()
941 940 # histedit_source
942 941 # note: ctx is likely a temporary commit but that the best we can do
943 942 # here. This is sufficient to solve issue3681 anyway.
944 943 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
945 944 commitopts[b'extra'] = extra
946 945 phasemin = max(ctx.phase(), oldctx.phase())
947 946 overrides = {(b'phases', b'new-commit'): phasemin}
948 947 with repo.ui.configoverride(overrides, b'histedit'):
949 948 n = collapse(
950 949 repo,
951 950 ctx,
952 951 repo[newnode],
953 952 commitopts,
954 953 skipprompt=self.skipprompt(),
955 954 )
956 955 if n is None:
957 956 return ctx, []
958 957 mergemod.update(repo[n])
959 958 replacements = [
960 959 (oldctx.node(), (newnode,)),
961 960 (ctx.node(), (n,)),
962 961 (newnode, (n,)),
963 962 ]
964 963 for ich in internalchanges:
965 964 replacements.append((ich, (n,)))
966 965 return repo[n], replacements
967 966
968 967
969 968 @action(
970 969 [b'base', b'b'],
971 970 _(b'checkout changeset and apply further changesets from there'),
972 971 )
973 972 class base(histeditaction):
974 973 def run(self):
975 974 if self.repo[b'.'].node() != self.node:
976 975 mergemod.clean_update(self.repo[self.node])
977 976 return self.continueclean()
978 977
979 978 def continuedirty(self):
980 979 abortdirty()
981 980
982 981 def continueclean(self):
983 982 basectx = self.repo[b'.']
984 983 return basectx, []
985 984
986 985 def _verifynodeconstraints(self, prev, expected, seen):
987 986 # base can only be use with a node not in the edited set
988 987 if self.node in expected:
989 988 msg = _(b'%s "%s" changeset was an edited list candidate')
990 989 raise error.ParseError(
991 990 msg % (self.verb, short(self.node)),
992 991 hint=_(b'base must only use unlisted changesets'),
993 992 )
994 993
995 994
996 995 @action(
997 996 [b'_multifold'],
998 997 _(
999 998 b"""fold subclass used for when multiple folds happen in a row
1000 999
1001 1000 We only want to fire the editor for the folded message once when
1002 1001 (say) four changes are folded down into a single change. This is
1003 1002 similar to rollup, but we should preserve both messages so that
1004 1003 when the last fold operation runs we can show the user all the
1005 1004 commit messages in their editor.
1006 1005 """
1007 1006 ),
1008 1007 internal=True,
1009 1008 )
1010 1009 class _multifold(fold):
1011 1010 def skipprompt(self):
1012 1011 return True
1013 1012
1014 1013
1015 1014 @action(
1016 1015 [b"roll", b"r"],
1017 1016 _(b"like fold, but discard this commit's description and date"),
1018 1017 )
1019 1018 class rollup(fold):
1020 1019 def mergedescs(self):
1021 1020 return False
1022 1021
1023 1022 def skipprompt(self):
1024 1023 return True
1025 1024
1026 1025 def firstdate(self):
1027 1026 return True
1028 1027
1029 1028
1030 1029 @action([b"drop", b"d"], _(b'remove commit from history'))
1031 1030 class drop(histeditaction):
1032 1031 def run(self):
1033 1032 parentctx = self.repo[self.state.parentctxnode]
1034 1033 return parentctx, [(self.node, tuple())]
1035 1034
1036 1035
1037 1036 @action(
1038 1037 [b"mess", b"m"],
1039 1038 _(b'edit commit message without changing commit content'),
1040 1039 priority=True,
1041 1040 )
1042 1041 class message(histeditaction):
1043 1042 def commiteditor(self):
1044 1043 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1045 1044
1046 1045
1047 1046 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1048 1047 """utility function to find the first outgoing changeset
1049 1048
1050 1049 Used by initialization code"""
1051 1050 if opts is None:
1052 1051 opts = {}
1053 1052 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1054 1053
1055 1054 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1056 1055
1057 1056 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1058 1057 other = hg.peer(repo, opts, path)
1059 1058
1060 1059 if revs:
1061 1060 revs = [repo.lookup(rev) for rev in revs]
1062 1061
1063 1062 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1064 1063 if not outgoing.missing:
1065 1064 raise error.StateError(_(b'no outgoing ancestors'))
1066 1065 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1067 1066 if len(roots) > 1:
1068 1067 msg = _(b'there are ambiguous outgoing revisions')
1069 1068 hint = _(b"see 'hg help histedit' for more detail")
1070 1069 raise error.StateError(msg, hint=hint)
1071 1070 return repo[roots[0]].node()
1072 1071
1073 1072
1074 1073 # Curses Support
1075 1074 try:
1076 1075 import curses
1077 1076 except ImportError:
1078 1077 curses = None
1079 1078
1080 1079 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1081 1080 ACTION_LABELS = {
1082 1081 b'fold': b'^fold',
1083 1082 b'roll': b'^roll',
1084 1083 }
1085 1084
1086 1085 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1087 1086 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1088 1087 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1089 1088
1090 1089 E_QUIT, E_HISTEDIT = 1, 2
1091 1090 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1092 1091 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1093 1092
1094 1093 KEYTABLE = {
1095 1094 b'global': {
1096 1095 b'h': b'next-action',
1097 1096 b'KEY_RIGHT': b'next-action',
1098 1097 b'l': b'prev-action',
1099 1098 b'KEY_LEFT': b'prev-action',
1100 1099 b'q': b'quit',
1101 1100 b'c': b'histedit',
1102 1101 b'C': b'histedit',
1103 1102 b'v': b'showpatch',
1104 1103 b'?': b'help',
1105 1104 },
1106 1105 MODE_RULES: {
1107 1106 b'd': b'action-drop',
1108 1107 b'e': b'action-edit',
1109 1108 b'f': b'action-fold',
1110 1109 b'm': b'action-mess',
1111 1110 b'p': b'action-pick',
1112 1111 b'r': b'action-roll',
1113 1112 b' ': b'select',
1114 1113 b'j': b'down',
1115 1114 b'k': b'up',
1116 1115 b'KEY_DOWN': b'down',
1117 1116 b'KEY_UP': b'up',
1118 1117 b'J': b'move-down',
1119 1118 b'K': b'move-up',
1120 1119 b'KEY_NPAGE': b'move-down',
1121 1120 b'KEY_PPAGE': b'move-up',
1122 1121 b'0': b'goto', # Used for 0..9
1123 1122 },
1124 1123 MODE_PATCH: {
1125 1124 b' ': b'page-down',
1126 1125 b'KEY_NPAGE': b'page-down',
1127 1126 b'KEY_PPAGE': b'page-up',
1128 1127 b'j': b'line-down',
1129 1128 b'k': b'line-up',
1130 1129 b'KEY_DOWN': b'line-down',
1131 1130 b'KEY_UP': b'line-up',
1132 1131 b'J': b'down',
1133 1132 b'K': b'up',
1134 1133 },
1135 1134 MODE_HELP: {},
1136 1135 }
1137 1136
1138 1137
1139 1138 def screen_size():
1140 1139 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1141 1140
1142 1141
1143 1142 class histeditrule:
1144 1143 def __init__(self, ui, ctx, pos, action=b'pick'):
1145 1144 self.ui = ui
1146 1145 self.ctx = ctx
1147 1146 self.action = action
1148 1147 self.origpos = pos
1149 1148 self.pos = pos
1150 1149 self.conflicts = []
1151 1150
1152 1151 def __bytes__(self):
1153 1152 # Example display of several histeditrules:
1154 1153 #
1155 1154 # #10 pick 316392:06a16c25c053 add option to skip tests
1156 1155 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1157 1156 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1158 1157 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1159 1158 #
1160 1159 # The carets point to the changeset being folded into ("roll this
1161 1160 # changeset into the changeset above").
1162 1161 return b'%s%s' % (self.prefix, self.desc)
1163 1162
1164 1163 __str__ = encoding.strmethod(__bytes__)
1165 1164
1166 1165 @property
1167 1166 def prefix(self):
1168 1167 # Some actions ('fold' and 'roll') combine a patch with a
1169 1168 # previous one. Add a marker showing which patch they apply
1170 1169 # to.
1171 1170 action = ACTION_LABELS.get(self.action, self.action)
1172 1171
1173 1172 h = self.ctx.hex()[0:12]
1174 1173 r = self.ctx.rev()
1175 1174
1176 1175 return b"#%s %s %d:%s " % (
1177 1176 (b'%d' % self.origpos).ljust(2),
1178 1177 action.ljust(6),
1179 1178 r,
1180 1179 h,
1181 1180 )
1182 1181
1183 1182 @util.propertycache
1184 1183 def desc(self):
1185 1184 summary = cmdutil.rendertemplate(
1186 1185 self.ctx, self.ui.config(b'histedit', b'summary-template')
1187 1186 )
1188 1187 if summary:
1189 1188 return summary
1190 1189 # This is split off from the prefix property so that we can
1191 1190 # separately make the description for 'roll' red (since it
1192 1191 # will get discarded).
1193 1192 return stringutil.firstline(self.ctx.description())
1194 1193
1195 1194 def checkconflicts(self, other):
1196 1195 if other.pos > self.pos and other.origpos <= self.origpos:
1197 1196 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1198 1197 self.conflicts.append(other)
1199 1198 return self.conflicts
1200 1199
1201 1200 if other in self.conflicts:
1202 1201 self.conflicts.remove(other)
1203 1202 return self.conflicts
1204 1203
1205 1204
1206 1205 def makecommands(rules):
1207 1206 """Returns a list of commands consumable by histedit --commands based on
1208 1207 our list of rules"""
1209 1208 commands = []
1210 1209 for rules in rules:
1211 1210 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1212 1211 return commands
1213 1212
1214 1213
1215 1214 def addln(win, y, x, line, color=None):
1216 1215 """Add a line to the given window left padding but 100% filled with
1217 1216 whitespace characters, so that the color appears on the whole line"""
1218 1217 maxy, maxx = win.getmaxyx()
1219 1218 length = maxx - 1 - x
1220 1219 line = bytes(line).ljust(length)[:length]
1221 1220 if y < 0:
1222 1221 y = maxy + y
1223 1222 if x < 0:
1224 1223 x = maxx + x
1225 1224 if color:
1226 1225 win.addstr(y, x, line, color)
1227 1226 else:
1228 1227 win.addstr(y, x, line)
1229 1228
1230 1229
1231 1230 def _trunc_head(line, n):
1232 1231 if len(line) <= n:
1233 1232 return line
1234 1233 return b'> ' + line[-(n - 2) :]
1235 1234
1236 1235
1237 1236 def _trunc_tail(line, n):
1238 1237 if len(line) <= n:
1239 1238 return line
1240 1239 return line[: n - 2] + b' >'
1241 1240
1242 1241
1243 1242 class _chistedit_state:
1244 1243 def __init__(
1245 1244 self,
1246 1245 repo,
1247 1246 rules,
1248 1247 stdscr,
1249 1248 ):
1250 1249 self.repo = repo
1251 1250 self.rules = rules
1252 1251 self.stdscr = stdscr
1253 1252 self.later_on_top = repo.ui.configbool(
1254 1253 b'histedit', b'later-commits-first'
1255 1254 )
1256 1255 # The current item in display order, initialized to point to the top
1257 1256 # of the screen.
1258 1257 self.pos = 0
1259 1258 self.selected = None
1260 1259 self.mode = (MODE_INIT, MODE_INIT)
1261 1260 self.page_height = None
1262 1261 self.modes = {
1263 1262 MODE_RULES: {
1264 1263 b'line_offset': 0,
1265 1264 },
1266 1265 MODE_PATCH: {
1267 1266 b'line_offset': 0,
1268 1267 },
1269 1268 }
1270 1269
1271 1270 def render_commit(self, win):
1272 1271 """Renders the commit window that shows the log of the current selected
1273 1272 commit"""
1274 1273 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1275 1274
1276 1275 ctx = rule.ctx
1277 1276 win.box()
1278 1277
1279 1278 maxy, maxx = win.getmaxyx()
1280 1279 length = maxx - 3
1281 1280
1282 1281 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1283 1282 win.addstr(1, 1, line[:length])
1284 1283
1285 1284 line = b"user: %s" % ctx.user()
1286 1285 win.addstr(2, 1, line[:length])
1287 1286
1288 1287 bms = self.repo.nodebookmarks(ctx.node())
1289 1288 line = b"bookmark: %s" % b' '.join(bms)
1290 1289 win.addstr(3, 1, line[:length])
1291 1290
1292 1291 line = b"summary: %s" % stringutil.firstline(ctx.description())
1293 1292 win.addstr(4, 1, line[:length])
1294 1293
1295 1294 line = b"files: "
1296 1295 win.addstr(5, 1, line)
1297 1296 fnx = 1 + len(line)
1298 1297 fnmaxx = length - fnx + 1
1299 1298 y = 5
1300 1299 fnmaxn = maxy - (1 + y) - 1
1301 1300 files = ctx.files()
1302 1301 for i, line1 in enumerate(files):
1303 1302 if len(files) > fnmaxn and i == fnmaxn - 1:
1304 1303 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1305 1304 y = y + 1
1306 1305 break
1307 1306 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1308 1307 y = y + 1
1309 1308
1310 1309 conflicts = rule.conflicts
1311 1310 if len(conflicts) > 0:
1312 1311 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1313 1312 conflictstr = b"changed files overlap with %s" % conflictstr
1314 1313 else:
1315 1314 conflictstr = b'no overlap'
1316 1315
1317 1316 win.addstr(y, 1, conflictstr[:length])
1318 1317 win.noutrefresh()
1319 1318
1320 1319 def helplines(self):
1321 1320 if self.mode[0] == MODE_PATCH:
1322 1321 help = b"""\
1323 1322 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1324 1323 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1325 1324 """
1326 1325 else:
1327 1326 help = b"""\
1328 1327 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1329 1328 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1330 1329 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1331 1330 """
1332 1331 if self.later_on_top:
1333 1332 help += b"Newer commits are shown above older commits.\n"
1334 1333 else:
1335 1334 help += b"Older commits are shown above newer commits.\n"
1336 1335 return help.splitlines()
1337 1336
1338 1337 def render_help(self, win):
1339 1338 maxy, maxx = win.getmaxyx()
1340 1339 for y, line in enumerate(self.helplines()):
1341 1340 if y >= maxy:
1342 1341 break
1343 1342 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1344 1343 win.noutrefresh()
1345 1344
1346 1345 def layout(self):
1347 1346 maxy, maxx = self.stdscr.getmaxyx()
1348 1347 helplen = len(self.helplines())
1349 1348 mainlen = maxy - helplen - 12
1350 1349 if mainlen < 1:
1351 1350 raise error.Abort(
1352 1351 _(b"terminal dimensions %d by %d too small for curses histedit")
1353 1352 % (maxy, maxx),
1354 1353 hint=_(
1355 1354 b"enlarge your terminal or use --config ui.interface=text"
1356 1355 ),
1357 1356 )
1358 1357 return {
1359 1358 b'commit': (12, maxx),
1360 1359 b'help': (helplen, maxx),
1361 1360 b'main': (mainlen, maxx),
1362 1361 }
1363 1362
1364 1363 def display_pos_to_rule_pos(self, display_pos):
1365 1364 """Converts a position in display order to rule order.
1366 1365
1367 1366 The `display_pos` is the order from the top in display order, not
1368 1367 considering which items are currently visible on the screen. Thus,
1369 1368 `display_pos=0` is the item at the top (possibly after scrolling to
1370 1369 the top)
1371 1370 """
1372 1371 if self.later_on_top:
1373 1372 return len(self.rules) - 1 - display_pos
1374 1373 else:
1375 1374 return display_pos
1376 1375
1377 1376 def render_rules(self, rulesscr):
1378 1377 start = self.modes[MODE_RULES][b'line_offset']
1379 1378
1380 1379 conflicts = [r.ctx for r in self.rules if r.conflicts]
1381 1380 if len(conflicts) > 0:
1382 1381 line = b"potential conflict in %s" % b','.join(
1383 1382 map(pycompat.bytestr, conflicts)
1384 1383 )
1385 1384 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1386 1385
1387 1386 for display_pos in range(start, len(self.rules)):
1388 1387 y = display_pos - start
1389 1388 if y < 0 or y >= self.page_height:
1390 1389 continue
1391 1390 rule_pos = self.display_pos_to_rule_pos(display_pos)
1392 1391 rule = self.rules[rule_pos]
1393 1392 if len(rule.conflicts) > 0:
1394 1393 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1395 1394 else:
1396 1395 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1397 1396
1398 1397 if display_pos == self.selected:
1399 1398 rollcolor = COLOR_ROLL_SELECTED
1400 1399 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1401 1400 elif display_pos == self.pos:
1402 1401 rollcolor = COLOR_ROLL_CURRENT
1403 1402 addln(
1404 1403 rulesscr,
1405 1404 y,
1406 1405 2,
1407 1406 rule,
1408 1407 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1409 1408 )
1410 1409 else:
1411 1410 rollcolor = COLOR_ROLL
1412 1411 addln(rulesscr, y, 2, rule)
1413 1412
1414 1413 if rule.action == b'roll':
1415 1414 rulesscr.addstr(
1416 1415 y,
1417 1416 2 + len(rule.prefix),
1418 1417 rule.desc,
1419 1418 curses.color_pair(rollcolor),
1420 1419 )
1421 1420
1422 1421 rulesscr.noutrefresh()
1423 1422
1424 1423 def render_string(self, win, output, diffcolors=False):
1425 1424 maxy, maxx = win.getmaxyx()
1426 1425 length = min(maxy - 1, len(output))
1427 1426 for y in range(0, length):
1428 1427 line = output[y]
1429 1428 if diffcolors:
1430 1429 if line.startswith(b'+'):
1431 1430 win.addstr(
1432 1431 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1433 1432 )
1434 1433 elif line.startswith(b'-'):
1435 1434 win.addstr(
1436 1435 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1437 1436 )
1438 1437 elif line.startswith(b'@@ '):
1439 1438 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1440 1439 else:
1441 1440 win.addstr(y, 0, line)
1442 1441 else:
1443 1442 win.addstr(y, 0, line)
1444 1443 win.noutrefresh()
1445 1444
1446 1445 def render_patch(self, win):
1447 1446 start = self.modes[MODE_PATCH][b'line_offset']
1448 1447 content = self.modes[MODE_PATCH][b'patchcontents']
1449 1448 self.render_string(win, content[start:], diffcolors=True)
1450 1449
1451 1450 def event(self, ch):
1452 1451 """Change state based on the current character input
1453 1452
1454 1453 This takes the current state and based on the current character input from
1455 1454 the user we change the state.
1456 1455 """
1457 1456 oldpos = self.pos
1458 1457
1459 1458 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1460 1459 return E_RESIZE
1461 1460
1462 1461 lookup_ch = ch
1463 1462 if ch is not None and b'0' <= ch <= b'9':
1464 1463 lookup_ch = b'0'
1465 1464
1466 1465 curmode, prevmode = self.mode
1467 1466 action = KEYTABLE[curmode].get(
1468 1467 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1469 1468 )
1470 1469 if action is None:
1471 1470 return
1472 1471 if action in (b'down', b'move-down'):
1473 1472 newpos = min(oldpos + 1, len(self.rules) - 1)
1474 1473 self.move_cursor(oldpos, newpos)
1475 1474 if self.selected is not None or action == b'move-down':
1476 1475 self.swap(oldpos, newpos)
1477 1476 elif action in (b'up', b'move-up'):
1478 1477 newpos = max(0, oldpos - 1)
1479 1478 self.move_cursor(oldpos, newpos)
1480 1479 if self.selected is not None or action == b'move-up':
1481 1480 self.swap(oldpos, newpos)
1482 1481 elif action == b'next-action':
1483 1482 self.cycle_action(oldpos, next=True)
1484 1483 elif action == b'prev-action':
1485 1484 self.cycle_action(oldpos, next=False)
1486 1485 elif action == b'select':
1487 1486 self.selected = oldpos if self.selected is None else None
1488 1487 self.make_selection(self.selected)
1489 1488 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1490 1489 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1491 1490 self.move_cursor(oldpos, newrule.pos)
1492 1491 if self.selected is not None:
1493 1492 self.swap(oldpos, newrule.pos)
1494 1493 elif action.startswith(b'action-'):
1495 1494 self.change_action(oldpos, action[7:])
1496 1495 elif action == b'showpatch':
1497 1496 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1498 1497 elif action == b'help':
1499 1498 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1500 1499 elif action == b'quit':
1501 1500 return E_QUIT
1502 1501 elif action == b'histedit':
1503 1502 return E_HISTEDIT
1504 1503 elif action == b'page-down':
1505 1504 return E_PAGEDOWN
1506 1505 elif action == b'page-up':
1507 1506 return E_PAGEUP
1508 1507 elif action == b'line-down':
1509 1508 return E_LINEDOWN
1510 1509 elif action == b'line-up':
1511 1510 return E_LINEUP
1512 1511
1513 1512 def patch_contents(self):
1514 1513 repo = self.repo
1515 1514 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1516 1515 displayer = logcmdutil.changesetdisplayer(
1517 1516 repo.ui,
1518 1517 repo,
1519 1518 {b"patch": True, b"template": b"status"},
1520 1519 buffered=True,
1521 1520 )
1522 1521 overrides = {(b'ui', b'verbose'): True}
1523 1522 with repo.ui.configoverride(overrides, source=b'histedit'):
1524 1523 displayer.show(rule.ctx)
1525 1524 displayer.close()
1526 1525 return displayer.hunk[rule.ctx.rev()].splitlines()
1527 1526
1528 1527 def move_cursor(self, oldpos, newpos):
1529 1528 """Change the rule/changeset that the cursor is pointing to, regardless of
1530 1529 current mode (you can switch between patches from the view patch window)."""
1531 1530 self.pos = newpos
1532 1531
1533 1532 mode, _ = self.mode
1534 1533 if mode == MODE_RULES:
1535 1534 # Scroll through the list by updating the view for MODE_RULES, so that
1536 1535 # even if we are not currently viewing the rules, switching back will
1537 1536 # result in the cursor's rule being visible.
1538 1537 modestate = self.modes[MODE_RULES]
1539 1538 if newpos < modestate[b'line_offset']:
1540 1539 modestate[b'line_offset'] = newpos
1541 1540 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1542 1541 modestate[b'line_offset'] = newpos - self.page_height + 1
1543 1542
1544 1543 # Reset the patch view region to the top of the new patch.
1545 1544 self.modes[MODE_PATCH][b'line_offset'] = 0
1546 1545
1547 1546 def change_mode(self, mode):
1548 1547 curmode, _ = self.mode
1549 1548 self.mode = (mode, curmode)
1550 1549 if mode == MODE_PATCH:
1551 1550 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1552 1551
1553 1552 def make_selection(self, pos):
1554 1553 self.selected = pos
1555 1554
1556 1555 def swap(self, oldpos, newpos):
1557 1556 """Swap two positions and calculate necessary conflicts in
1558 1557 O(|newpos-oldpos|) time"""
1559 1558 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1560 1559 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1561 1560
1562 1561 rules = self.rules
1563 1562 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1564 1563
1565 1564 rules[old_rule_pos], rules[new_rule_pos] = (
1566 1565 rules[new_rule_pos],
1567 1566 rules[old_rule_pos],
1568 1567 )
1569 1568
1570 1569 # TODO: swap should not know about histeditrule's internals
1571 1570 rules[new_rule_pos].pos = new_rule_pos
1572 1571 rules[old_rule_pos].pos = old_rule_pos
1573 1572
1574 1573 start = min(old_rule_pos, new_rule_pos)
1575 1574 end = max(old_rule_pos, new_rule_pos)
1576 1575 for r in range(start, end + 1):
1577 1576 rules[new_rule_pos].checkconflicts(rules[r])
1578 1577 rules[old_rule_pos].checkconflicts(rules[r])
1579 1578
1580 1579 if self.selected:
1581 1580 self.make_selection(newpos)
1582 1581
1583 1582 def change_action(self, pos, action):
1584 1583 """Change the action state on the given position to the new action"""
1585 1584 assert 0 <= pos < len(self.rules)
1586 1585 self.rules[pos].action = action
1587 1586
1588 1587 def cycle_action(self, pos, next=False):
1589 1588 """Changes the action state the next or the previous action from
1590 1589 the action list"""
1591 1590 assert 0 <= pos < len(self.rules)
1592 1591 current = self.rules[pos].action
1593 1592
1594 1593 assert current in KEY_LIST
1595 1594
1596 1595 index = KEY_LIST.index(current)
1597 1596 if next:
1598 1597 index += 1
1599 1598 else:
1600 1599 index -= 1
1601 1600 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1602 1601
1603 1602 def change_view(self, delta, unit):
1604 1603 """Change the region of whatever is being viewed (a patch or the list of
1605 1604 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1606 1605 mode, _ = self.mode
1607 1606 if mode != MODE_PATCH:
1608 1607 return
1609 1608 mode_state = self.modes[mode]
1610 1609 num_lines = len(mode_state[b'patchcontents'])
1611 1610 page_height = self.page_height
1612 1611 unit = page_height if unit == b'page' else 1
1613 1612 num_pages = 1 + (num_lines - 1) // page_height
1614 1613 max_offset = (num_pages - 1) * page_height
1615 1614 newline = mode_state[b'line_offset'] + delta * unit
1616 1615 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1617 1616
1618 1617
1619 1618 def _chisteditmain(repo, rules, stdscr):
1620 1619 try:
1621 1620 curses.use_default_colors()
1622 1621 except curses.error:
1623 1622 pass
1624 1623
1625 1624 # initialize color pattern
1626 1625 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1627 1626 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1628 1627 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1629 1628 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1630 1629 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1631 1630 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1632 1631 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1633 1632 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1634 1633 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1635 1634 curses.init_pair(
1636 1635 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1637 1636 )
1638 1637 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1639 1638
1640 1639 # don't display the cursor
1641 1640 try:
1642 1641 curses.curs_set(0)
1643 1642 except curses.error:
1644 1643 pass
1645 1644
1646 1645 def drawvertwin(size, y, x):
1647 1646 win = curses.newwin(size[0], size[1], y, x)
1648 1647 y += size[0]
1649 1648 return win, y, x
1650 1649
1651 1650 state = _chistedit_state(repo, rules, stdscr)
1652 1651
1653 1652 # eventloop
1654 1653 ch = None
1655 1654 stdscr.clear()
1656 1655 stdscr.refresh()
1657 1656 while True:
1658 1657 oldmode, unused = state.mode
1659 1658 if oldmode == MODE_INIT:
1660 1659 state.change_mode(MODE_RULES)
1661 1660 e = state.event(ch)
1662 1661
1663 1662 if e == E_QUIT:
1664 1663 return False
1665 1664 if e == E_HISTEDIT:
1666 1665 return state.rules
1667 1666 else:
1668 1667 if e == E_RESIZE:
1669 1668 size = screen_size()
1670 1669 if size != stdscr.getmaxyx():
1671 1670 curses.resizeterm(*size)
1672 1671
1673 1672 sizes = state.layout()
1674 1673 curmode, unused = state.mode
1675 1674 if curmode != oldmode:
1676 1675 state.page_height = sizes[b'main'][0]
1677 1676 # Adjust the view to fit the current screen size.
1678 1677 state.move_cursor(state.pos, state.pos)
1679 1678
1680 1679 # Pack the windows against the top, each pane spread across the
1681 1680 # full width of the screen.
1682 1681 y, x = (0, 0)
1683 1682 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1684 1683 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1685 1684 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1686 1685
1687 1686 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1688 1687 if e == E_PAGEDOWN:
1689 1688 state.change_view(+1, b'page')
1690 1689 elif e == E_PAGEUP:
1691 1690 state.change_view(-1, b'page')
1692 1691 elif e == E_LINEDOWN:
1693 1692 state.change_view(+1, b'line')
1694 1693 elif e == E_LINEUP:
1695 1694 state.change_view(-1, b'line')
1696 1695
1697 1696 # start rendering
1698 1697 commitwin.erase()
1699 1698 helpwin.erase()
1700 1699 mainwin.erase()
1701 1700 if curmode == MODE_PATCH:
1702 1701 state.render_patch(mainwin)
1703 1702 elif curmode == MODE_HELP:
1704 1703 state.render_string(mainwin, __doc__.strip().splitlines())
1705 1704 else:
1706 1705 state.render_rules(mainwin)
1707 1706 state.render_commit(commitwin)
1708 1707 state.render_help(helpwin)
1709 1708 curses.doupdate()
1710 1709 # done rendering
1711 1710 ch = encoding.strtolocal(stdscr.getkey())
1712 1711
1713 1712
1714 1713 def _chistedit(ui, repo, freeargs, opts):
1715 1714 """interactively edit changeset history via a curses interface
1716 1715
1717 1716 Provides a ncurses interface to histedit. Press ? in chistedit mode
1718 1717 to see an extensive help. Requires python-curses to be installed."""
1719 1718
1720 1719 if curses is None:
1721 1720 raise error.Abort(_(b"Python curses library required"))
1722 1721
1723 1722 # disable color
1724 1723 ui._colormode = None
1725 1724
1726 1725 try:
1727 1726 keep = opts.get(b'keep')
1728 1727 revs = opts.get(b'rev', [])[:]
1729 1728 cmdutil.checkunfinished(repo)
1730 1729 cmdutil.bailifchanged(repo)
1731 1730
1732 1731 revs.extend(freeargs)
1733 1732 if not revs:
1734 1733 defaultrev = destutil.desthistedit(ui, repo)
1735 1734 if defaultrev is not None:
1736 1735 revs.append(defaultrev)
1737 1736 if len(revs) != 1:
1738 1737 raise error.InputError(
1739 1738 _(b'histedit requires exactly one ancestor revision')
1740 1739 )
1741 1740
1742 1741 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1743 1742 if len(rr) != 1:
1744 1743 raise error.InputError(
1745 1744 _(
1746 1745 b'The specified revisions must have '
1747 1746 b'exactly one common root'
1748 1747 )
1749 1748 )
1750 1749 root = rr[0].node()
1751 1750
1752 1751 topmost = repo.dirstate.p1()
1753 1752 revs = between(repo, root, topmost, keep)
1754 1753 if not revs:
1755 1754 raise error.InputError(
1756 1755 _(b'%s is not an ancestor of working directory') % short(root)
1757 1756 )
1758 1757
1759 1758 rules = []
1760 1759 for i, r in enumerate(revs):
1761 1760 rules.append(histeditrule(ui, repo[r], i))
1762 1761 with util.with_lc_ctype():
1763 1762 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1764 1763 curses.echo()
1765 1764 curses.endwin()
1766 1765 if rc is False:
1767 1766 ui.write(_(b"histedit aborted\n"))
1768 1767 return 0
1769 1768 if type(rc) is list:
1770 1769 ui.status(_(b"performing changes\n"))
1771 1770 rules = makecommands(rc)
1772 1771 with repo.vfs(b'chistedit', b'w+') as fp:
1773 1772 for r in rules:
1774 1773 fp.write(r)
1775 1774 opts[b'commands'] = fp.name
1776 1775 return _texthistedit(ui, repo, freeargs, opts)
1777 1776 except KeyboardInterrupt:
1778 1777 pass
1779 1778 return -1
1780 1779
1781 1780
1782 1781 @command(
1783 1782 b'histedit',
1784 1783 [
1785 1784 (
1786 1785 b'',
1787 1786 b'commands',
1788 1787 b'',
1789 1788 _(b'read history edits from the specified file'),
1790 1789 _(b'FILE'),
1791 1790 ),
1792 1791 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1793 1792 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1794 1793 (
1795 1794 b'k',
1796 1795 b'keep',
1797 1796 False,
1798 1797 _(b"don't strip old nodes after edit is complete"),
1799 1798 ),
1800 1799 (b'', b'abort', False, _(b'abort an edit in progress')),
1801 1800 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1802 1801 (
1803 1802 b'f',
1804 1803 b'force',
1805 1804 False,
1806 1805 _(b'force outgoing even for unrelated repositories'),
1807 1806 ),
1808 1807 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1809 1808 ]
1810 1809 + cmdutil.formatteropts,
1811 1810 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1812 1811 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1813 1812 )
1814 1813 def histedit(ui, repo, *freeargs, **opts):
1815 1814 """interactively edit changeset history
1816 1815
1817 1816 This command lets you edit a linear series of changesets (up to
1818 1817 and including the working directory, which should be clean).
1819 1818 You can:
1820 1819
1821 1820 - `pick` to [re]order a changeset
1822 1821
1823 1822 - `drop` to omit changeset
1824 1823
1825 1824 - `mess` to reword the changeset commit message
1826 1825
1827 1826 - `fold` to combine it with the preceding changeset (using the later date)
1828 1827
1829 1828 - `roll` like fold, but discarding this commit's description and date
1830 1829
1831 1830 - `edit` to edit this changeset (preserving date)
1832 1831
1833 1832 - `base` to checkout changeset and apply further changesets from there
1834 1833
1835 1834 There are a number of ways to select the root changeset:
1836 1835
1837 1836 - Specify ANCESTOR directly
1838 1837
1839 1838 - Use --outgoing -- it will be the first linear changeset not
1840 1839 included in destination. (See :hg:`help config.paths.default-push`)
1841 1840
1842 1841 - Otherwise, the value from the "histedit.defaultrev" config option
1843 1842 is used as a revset to select the base revision when ANCESTOR is not
1844 1843 specified. The first revision returned by the revset is used. By
1845 1844 default, this selects the editable history that is unique to the
1846 1845 ancestry of the working directory.
1847 1846
1848 1847 .. container:: verbose
1849 1848
1850 1849 If you use --outgoing, this command will abort if there are ambiguous
1851 1850 outgoing revisions. For example, if there are multiple branches
1852 1851 containing outgoing revisions.
1853 1852
1854 1853 Use "min(outgoing() and ::.)" or similar revset specification
1855 1854 instead of --outgoing to specify edit target revision exactly in
1856 1855 such ambiguous situation. See :hg:`help revsets` for detail about
1857 1856 selecting revisions.
1858 1857
1859 1858 .. container:: verbose
1860 1859
1861 1860 Examples:
1862 1861
1863 1862 - A number of changes have been made.
1864 1863 Revision 3 is no longer needed.
1865 1864
1866 1865 Start history editing from revision 3::
1867 1866
1868 1867 hg histedit -r 3
1869 1868
1870 1869 An editor opens, containing the list of revisions,
1871 1870 with specific actions specified::
1872 1871
1873 1872 pick 5339bf82f0ca 3 Zworgle the foobar
1874 1873 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1875 1874 pick 0a9639fcda9d 5 Morgify the cromulancy
1876 1875
1877 1876 Additional information about the possible actions
1878 1877 to take appears below the list of revisions.
1879 1878
1880 1879 To remove revision 3 from the history,
1881 1880 its action (at the beginning of the relevant line)
1882 1881 is changed to 'drop'::
1883 1882
1884 1883 drop 5339bf82f0ca 3 Zworgle the foobar
1885 1884 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1886 1885 pick 0a9639fcda9d 5 Morgify the cromulancy
1887 1886
1888 1887 - A number of changes have been made.
1889 1888 Revision 2 and 4 need to be swapped.
1890 1889
1891 1890 Start history editing from revision 2::
1892 1891
1893 1892 hg histedit -r 2
1894 1893
1895 1894 An editor opens, containing the list of revisions,
1896 1895 with specific actions specified::
1897 1896
1898 1897 pick 252a1af424ad 2 Blorb a morgwazzle
1899 1898 pick 5339bf82f0ca 3 Zworgle the foobar
1900 1899 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1901 1900
1902 1901 To swap revision 2 and 4, its lines are swapped
1903 1902 in the editor::
1904 1903
1905 1904 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1906 1905 pick 5339bf82f0ca 3 Zworgle the foobar
1907 1906 pick 252a1af424ad 2 Blorb a morgwazzle
1908 1907
1909 1908 Returns 0 on success, 1 if user intervention is required (not only
1910 1909 for intentional "edit" command, but also for resolving unexpected
1911 1910 conflicts).
1912 1911 """
1913 1912 opts = pycompat.byteskwargs(opts)
1914 1913
1915 1914 # kludge: _chistedit only works for starting an edit, not aborting
1916 1915 # or continuing, so fall back to regular _texthistedit for those
1917 1916 # operations.
1918 1917 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1919 1918 return _chistedit(ui, repo, freeargs, opts)
1920 1919 return _texthistedit(ui, repo, freeargs, opts)
1921 1920
1922 1921
1923 1922 def _texthistedit(ui, repo, freeargs, opts):
1924 1923 state = histeditstate(repo)
1925 1924 with repo.wlock() as wlock, repo.lock() as lock:
1926 1925 state.wlock = wlock
1927 1926 state.lock = lock
1928 1927 _histedit(ui, repo, state, freeargs, opts)
1929 1928
1930 1929
1931 1930 goalcontinue = b'continue'
1932 1931 goalabort = b'abort'
1933 1932 goaleditplan = b'edit-plan'
1934 1933 goalnew = b'new'
1935 1934
1936 1935
1937 1936 def _getgoal(opts):
1938 1937 if opts.get(b'continue'):
1939 1938 return goalcontinue
1940 1939 if opts.get(b'abort'):
1941 1940 return goalabort
1942 1941 if opts.get(b'edit_plan'):
1943 1942 return goaleditplan
1944 1943 return goalnew
1945 1944
1946 1945
1947 1946 def _readfile(ui, path):
1948 1947 if path == b'-':
1949 1948 with ui.timeblockedsection(b'histedit'):
1950 1949 return ui.fin.read()
1951 1950 else:
1952 1951 with open(path, b'rb') as f:
1953 1952 return f.read()
1954 1953
1955 1954
1956 1955 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1957 1956 # TODO only abort if we try to histedit mq patches, not just
1958 1957 # blanket if mq patches are applied somewhere
1959 1958 mq = getattr(repo, 'mq', None)
1960 1959 if mq and mq.applied:
1961 1960 raise error.StateError(_(b'source has mq patches applied'))
1962 1961
1963 1962 # basic argument incompatibility processing
1964 1963 outg = opts.get(b'outgoing')
1965 1964 editplan = opts.get(b'edit_plan')
1966 1965 abort = opts.get(b'abort')
1967 1966 force = opts.get(b'force')
1968 1967 if force and not outg:
1969 1968 raise error.InputError(_(b'--force only allowed with --outgoing'))
1970 1969 if goal == b'continue':
1971 1970 if any((outg, abort, revs, freeargs, rules, editplan)):
1972 1971 raise error.InputError(_(b'no arguments allowed with --continue'))
1973 1972 elif goal == b'abort':
1974 1973 if any((outg, revs, freeargs, rules, editplan)):
1975 1974 raise error.InputError(_(b'no arguments allowed with --abort'))
1976 1975 elif goal == b'edit-plan':
1977 1976 if any((outg, revs, freeargs)):
1978 1977 raise error.InputError(
1979 1978 _(b'only --commands argument allowed with --edit-plan')
1980 1979 )
1981 1980 else:
1982 1981 if outg:
1983 1982 if revs:
1984 1983 raise error.InputError(
1985 1984 _(b'no revisions allowed with --outgoing')
1986 1985 )
1987 1986 if len(freeargs) > 1:
1988 1987 raise error.InputError(
1989 1988 _(b'only one repo argument allowed with --outgoing')
1990 1989 )
1991 1990 else:
1992 1991 revs.extend(freeargs)
1993 1992 if len(revs) == 0:
1994 1993 defaultrev = destutil.desthistedit(ui, repo)
1995 1994 if defaultrev is not None:
1996 1995 revs.append(defaultrev)
1997 1996
1998 1997 if len(revs) != 1:
1999 1998 raise error.InputError(
2000 1999 _(b'histedit requires exactly one ancestor revision')
2001 2000 )
2002 2001
2003 2002
2004 2003 def _histedit(ui, repo, state, freeargs, opts):
2005 2004 fm = ui.formatter(b'histedit', opts)
2006 2005 fm.startitem()
2007 2006 goal = _getgoal(opts)
2008 2007 revs = opts.get(b'rev', [])
2009 2008 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2010 2009 rules = opts.get(b'commands', b'')
2011 2010 state.keep = opts.get(b'keep', False)
2012 2011
2013 2012 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2014 2013
2015 2014 hastags = False
2016 2015 if revs:
2017 2016 revs = logcmdutil.revrange(repo, revs)
2018 2017 ctxs = [repo[rev] for rev in revs]
2019 2018 for ctx in ctxs:
2020 2019 tags = [tag for tag in ctx.tags() if tag != b'tip']
2021 2020 if not hastags:
2022 2021 hastags = len(tags)
2023 2022 if hastags:
2024 2023 if ui.promptchoice(
2025 2024 _(
2026 2025 b'warning: tags associated with the given'
2027 2026 b' changeset will be lost after histedit.\n'
2028 2027 b'do you want to continue (yN)? $$ &Yes $$ &No'
2029 2028 ),
2030 2029 default=1,
2031 2030 ):
2032 2031 raise error.CanceledError(_(b'histedit cancelled\n'))
2033 2032 # rebuild state
2034 2033 if goal == goalcontinue:
2035 2034 state.read()
2036 2035 state = bootstrapcontinue(ui, state, opts)
2037 2036 elif goal == goaleditplan:
2038 2037 _edithisteditplan(ui, repo, state, rules)
2039 2038 return
2040 2039 elif goal == goalabort:
2041 2040 _aborthistedit(ui, repo, state, nobackup=nobackup)
2042 2041 return
2043 2042 else:
2044 2043 # goal == goalnew
2045 2044 _newhistedit(ui, repo, state, revs, freeargs, opts)
2046 2045
2047 2046 _continuehistedit(ui, repo, state)
2048 2047 _finishhistedit(ui, repo, state, fm)
2049 2048 fm.end()
2050 2049
2051 2050
2052 2051 def _continuehistedit(ui, repo, state):
2053 2052 """This function runs after either:
2054 2053 - bootstrapcontinue (if the goal is 'continue')
2055 2054 - _newhistedit (if the goal is 'new')
2056 2055 """
2057 2056 # preprocess rules so that we can hide inner folds from the user
2058 2057 # and only show one editor
2059 2058 actions = state.actions[:]
2060 2059 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2061 2060 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2062 2061 state.actions[idx].__class__ = _multifold
2063 2062
2064 2063 # Force an initial state file write, so the user can run --abort/continue
2065 2064 # even if there's an exception before the first transaction serialize.
2066 2065 state.write()
2067 2066
2068 2067 tr = None
2069 2068 # Don't use singletransaction by default since it rolls the entire
2070 2069 # transaction back if an unexpected exception happens (like a
2071 2070 # pretxncommit hook throws, or the user aborts the commit msg editor).
2072 2071 if ui.configbool(b"histedit", b"singletransaction"):
2073 2072 # Don't use a 'with' for the transaction, since actions may close
2074 2073 # and reopen a transaction. For example, if the action executes an
2075 2074 # external process it may choose to commit the transaction first.
2076 2075 tr = repo.transaction(b'histedit')
2077 2076 progress = ui.makeprogress(
2078 2077 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2079 2078 )
2080 2079 with progress, util.acceptintervention(tr):
2081 2080 while state.actions:
2082 2081 state.write(tr=tr)
2083 2082 actobj = state.actions[0]
2084 2083 progress.increment(item=actobj.torule())
2085 2084 ui.debug(
2086 2085 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2087 2086 )
2088 2087 parentctx, replacement_ = actobj.run()
2089 2088 state.parentctxnode = parentctx.node()
2090 2089 state.replacements.extend(replacement_)
2091 2090 state.actions.pop(0)
2092 2091
2093 2092 state.write()
2094 2093
2095 2094
2096 2095 def _finishhistedit(ui, repo, state, fm):
2097 2096 """This action runs when histedit is finishing its session"""
2098 2097 mergemod.update(repo[state.parentctxnode])
2099 2098
2100 2099 mapping, tmpnodes, created, ntm = processreplacement(state)
2101 2100 if mapping:
2102 2101 for prec, succs in mapping.items():
2103 2102 if not succs:
2104 2103 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2105 2104 else:
2106 2105 ui.debug(
2107 2106 b'histedit: %s is replaced by %s\n'
2108 2107 % (short(prec), short(succs[0]))
2109 2108 )
2110 2109 if len(succs) > 1:
2111 2110 m = b'histedit: %s'
2112 2111 for n in succs[1:]:
2113 2112 ui.debug(m % short(n))
2114 2113
2115 2114 if not state.keep:
2116 2115 if mapping:
2117 2116 movetopmostbookmarks(repo, state.topmost, ntm)
2118 2117 # TODO update mq state
2119 2118 else:
2120 2119 mapping = {}
2121 2120
2122 2121 for n in tmpnodes:
2123 2122 if n in repo:
2124 2123 mapping[n] = ()
2125 2124
2126 2125 # remove entries about unknown nodes
2127 2126 has_node = repo.unfiltered().changelog.index.has_node
2128 2127 mapping = {
2129 2128 k: v
2130 2129 for k, v in mapping.items()
2131 2130 if has_node(k) and all(has_node(n) for n in v)
2132 2131 }
2133 2132 scmutil.cleanupnodes(repo, mapping, b'histedit')
2134 2133 hf = fm.hexfunc
2135 2134 fl = fm.formatlist
2136 2135 fd = fm.formatdict
2137 2136 nodechanges = fd(
2138 2137 {
2139 2138 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2140 2139 for oldn, newn in mapping.items()
2141 2140 },
2142 2141 key=b"oldnode",
2143 2142 value=b"newnodes",
2144 2143 )
2145 2144 fm.data(nodechanges=nodechanges)
2146 2145
2147 2146 state.clear()
2148 2147 if os.path.exists(repo.sjoin(b'undo')):
2149 2148 os.unlink(repo.sjoin(b'undo'))
2150 2149 if repo.vfs.exists(b'histedit-last-edit.txt'):
2151 2150 repo.vfs.unlink(b'histedit-last-edit.txt')
2152 2151
2153 2152
2154 2153 def _aborthistedit(ui, repo, state, nobackup=False):
2155 2154 try:
2156 2155 state.read()
2157 2156 __, leafs, tmpnodes, __ = processreplacement(state)
2158 2157 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2159 2158
2160 2159 # Recover our old commits if necessary
2161 2160 if not state.topmost in repo and state.backupfile:
2162 2161 backupfile = repo.vfs.join(state.backupfile)
2163 2162 f = hg.openpath(ui, backupfile)
2164 2163 gen = exchange.readbundle(ui, f, backupfile)
2165 2164 with repo.transaction(b'histedit.abort') as tr:
2166 2165 bundle2.applybundle(
2167 2166 repo,
2168 2167 gen,
2169 2168 tr,
2170 2169 source=b'histedit',
2171 2170 url=b'bundle:' + backupfile,
2172 2171 )
2173 2172
2174 2173 os.remove(backupfile)
2175 2174
2176 2175 # check whether we should update away
2177 2176 if repo.unfiltered().revs(
2178 2177 b'parents() and (%n or %ln::)',
2179 2178 state.parentctxnode,
2180 2179 leafs | tmpnodes,
2181 2180 ):
2182 2181 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2183 2182 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2184 2183 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2185 2184 except Exception:
2186 2185 if state.inprogress():
2187 2186 ui.warn(
2188 2187 _(
2189 2188 b'warning: encountered an exception during histedit '
2190 2189 b'--abort; the repository may not have been completely '
2191 2190 b'cleaned up\n'
2192 2191 )
2193 2192 )
2194 2193 raise
2195 2194 finally:
2196 2195 state.clear()
2197 2196
2198 2197
2199 2198 def hgaborthistedit(ui, repo):
2200 2199 state = histeditstate(repo)
2201 2200 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2202 2201 with repo.wlock() as wlock, repo.lock() as lock:
2203 2202 state.wlock = wlock
2204 2203 state.lock = lock
2205 2204 _aborthistedit(ui, repo, state, nobackup=nobackup)
2206 2205
2207 2206
2208 2207 def _edithisteditplan(ui, repo, state, rules):
2209 2208 state.read()
2210 2209 if not rules:
2211 2210 comment = geteditcomment(
2212 2211 ui, short(state.parentctxnode), short(state.topmost)
2213 2212 )
2214 2213 rules = ruleeditor(repo, ui, state.actions, comment)
2215 2214 else:
2216 2215 rules = _readfile(ui, rules)
2217 2216 actions = parserules(rules, state)
2218 2217 ctxs = [repo[act.node] for act in state.actions if act.node]
2219 2218 warnverifyactions(ui, repo, actions, state, ctxs)
2220 2219 state.actions = actions
2221 2220 state.write()
2222 2221
2223 2222
2224 2223 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2225 2224 outg = opts.get(b'outgoing')
2226 2225 rules = opts.get(b'commands', b'')
2227 2226 force = opts.get(b'force')
2228 2227
2229 2228 cmdutil.checkunfinished(repo)
2230 2229 cmdutil.bailifchanged(repo)
2231 2230
2232 2231 topmost = repo.dirstate.p1()
2233 2232 if outg:
2234 2233 if freeargs:
2235 2234 remote = freeargs[0]
2236 2235 else:
2237 2236 remote = None
2238 2237 root = findoutgoing(ui, repo, remote, force, opts)
2239 2238 else:
2240 2239 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2241 2240 if len(rr) != 1:
2242 2241 raise error.InputError(
2243 2242 _(
2244 2243 b'The specified revisions must have '
2245 2244 b'exactly one common root'
2246 2245 )
2247 2246 )
2248 2247 root = rr[0].node()
2249 2248
2250 2249 revs = between(repo, root, topmost, state.keep)
2251 2250 if not revs:
2252 2251 raise error.InputError(
2253 2252 _(b'%s is not an ancestor of working directory') % short(root)
2254 2253 )
2255 2254
2256 2255 ctxs = [repo[r] for r in revs]
2257 2256
2258 2257 wctx = repo[None]
2259 2258 # Please don't ask me why `ancestors` is this value. I figured it
2260 2259 # out with print-debugging, not by actually understanding what the
2261 2260 # merge code is doing. :(
2262 2261 ancs = [repo[b'.']]
2263 2262 # Sniff-test to make sure we won't collide with untracked files in
2264 2263 # the working directory. If we don't do this, we can get a
2265 2264 # collision after we've started histedit and backing out gets ugly
2266 2265 # for everyone, especially the user.
2267 2266 for c in [ctxs[0].p1()] + ctxs:
2268 2267 try:
2269 2268 mergemod.calculateupdates(
2270 2269 repo,
2271 2270 wctx,
2272 2271 c,
2273 2272 ancs,
2274 2273 # These parameters were determined by print-debugging
2275 2274 # what happens later on inside histedit.
2276 2275 branchmerge=False,
2277 2276 force=False,
2278 2277 acceptremote=False,
2279 2278 followcopies=False,
2280 2279 )
2281 2280 except error.Abort:
2282 2281 raise error.StateError(
2283 2282 _(
2284 2283 b"untracked files in working directory conflict with files in %s"
2285 2284 )
2286 2285 % c
2287 2286 )
2288 2287
2289 2288 if not rules:
2290 2289 comment = geteditcomment(ui, short(root), short(topmost))
2291 2290 actions = [pick(state, r) for r in revs]
2292 2291 rules = ruleeditor(repo, ui, actions, comment)
2293 2292 else:
2294 2293 rules = _readfile(ui, rules)
2295 2294 actions = parserules(rules, state)
2296 2295 warnverifyactions(ui, repo, actions, state, ctxs)
2297 2296
2298 2297 parentctxnode = repo[root].p1().node()
2299 2298
2300 2299 state.parentctxnode = parentctxnode
2301 2300 state.actions = actions
2302 2301 state.topmost = topmost
2303 2302 state.replacements = []
2304 2303
2305 2304 ui.log(
2306 2305 b"histedit",
2307 2306 b"%d actions to histedit\n",
2308 2307 len(actions),
2309 2308 histedit_num_actions=len(actions),
2310 2309 )
2311 2310
2312 2311 # Create a backup so we can always abort completely.
2313 2312 backupfile = None
2314 2313 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2315 2314 backupfile = repair.backupbundle(
2316 2315 repo, [parentctxnode], [topmost], root, b'histedit'
2317 2316 )
2318 2317 state.backupfile = backupfile
2319 2318
2320 2319
2321 2320 def _getsummary(ctx):
2322 2321 return stringutil.firstline(ctx.description())
2323 2322
2324 2323
2325 2324 def bootstrapcontinue(ui, state, opts):
2326 2325 repo = state.repo
2327 2326
2328 2327 ms = mergestatemod.mergestate.read(repo)
2329 2328 mergeutil.checkunresolved(ms)
2330 2329
2331 2330 if state.actions:
2332 2331 actobj = state.actions.pop(0)
2333 2332
2334 2333 if _isdirtywc(repo):
2335 2334 actobj.continuedirty()
2336 2335 if _isdirtywc(repo):
2337 2336 abortdirty()
2338 2337
2339 2338 parentctx, replacements = actobj.continueclean()
2340 2339
2341 2340 state.parentctxnode = parentctx.node()
2342 2341 state.replacements.extend(replacements)
2343 2342
2344 2343 return state
2345 2344
2346 2345
2347 2346 def between(repo, old, new, keep):
2348 2347 """select and validate the set of revision to edit
2349 2348
2350 2349 When keep is false, the specified set can't have children."""
2351 2350 revs = repo.revs(b'%n::%n', old, new)
2352 2351 if revs and not keep:
2353 2352 rewriteutil.precheck(repo, revs, b'edit')
2354 2353 if repo.revs(b'(%ld) and merge()', revs):
2355 2354 raise error.StateError(
2356 2355 _(b'cannot edit history that contains merges')
2357 2356 )
2358 2357 return pycompat.maplist(repo.changelog.node, revs)
2359 2358
2360 2359
2361 2360 def ruleeditor(repo, ui, actions, editcomment=b""):
2362 2361 """open an editor to edit rules
2363 2362
2364 2363 rules are in the format [ [act, ctx], ...] like in state.rules
2365 2364 """
2366 2365 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2367 2366 newact = util.sortdict()
2368 2367 for act in actions:
2369 2368 ctx = repo[act.node]
2370 2369 summary = _getsummary(ctx)
2371 2370 fword = summary.split(b' ', 1)[0].lower()
2372 2371 added = False
2373 2372
2374 2373 # if it doesn't end with the special character '!' just skip this
2375 2374 if fword.endswith(b'!'):
2376 2375 fword = fword[:-1]
2377 2376 if fword in primaryactions | secondaryactions | tertiaryactions:
2378 2377 act.verb = fword
2379 2378 # get the target summary
2380 2379 tsum = summary[len(fword) + 1 :].lstrip()
2381 2380 # safe but slow: reverse iterate over the actions so we
2382 2381 # don't clash on two commits having the same summary
2383 2382 for na, l in reversed(list(newact.items())):
2384 2383 actx = repo[na.node]
2385 2384 asum = _getsummary(actx)
2386 2385 if asum == tsum:
2387 2386 added = True
2388 2387 l.append(act)
2389 2388 break
2390 2389
2391 2390 if not added:
2392 2391 newact[act] = []
2393 2392
2394 2393 # copy over and flatten the new list
2395 2394 actions = []
2396 2395 for na, l in newact.items():
2397 2396 actions.append(na)
2398 2397 actions += l
2399 2398
2400 2399 rules = b'\n'.join([act.torule() for act in actions])
2401 2400 rules += b'\n\n'
2402 2401 rules += editcomment
2403 2402 rules = ui.edit(
2404 2403 rules,
2405 2404 ui.username(),
2406 2405 {b'prefix': b'histedit'},
2407 2406 repopath=repo.path,
2408 2407 action=b'histedit',
2409 2408 )
2410 2409
2411 2410 # Save edit rules in .hg/histedit-last-edit.txt in case
2412 2411 # the user needs to ask for help after something
2413 2412 # surprising happens.
2414 2413 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2415 2414 f.write(rules)
2416 2415
2417 2416 return rules
2418 2417
2419 2418
2420 2419 def parserules(rules, state):
2421 2420 """Read the histedit rules string and return list of action objects"""
2422 2421 rules = [
2423 2422 l
2424 2423 for l in (r.strip() for r in rules.splitlines())
2425 2424 if l and not l.startswith(b'#')
2426 2425 ]
2427 2426 actions = []
2428 2427 for r in rules:
2429 2428 if b' ' not in r:
2430 2429 raise error.ParseError(_(b'malformed line "%s"') % r)
2431 2430 verb, rest = r.split(b' ', 1)
2432 2431
2433 2432 if verb not in actiontable:
2434 2433 raise error.ParseError(_(b'unknown action "%s"') % verb)
2435 2434
2436 2435 action = actiontable[verb].fromrule(state, rest)
2437 2436 actions.append(action)
2438 2437 return actions
2439 2438
2440 2439
2441 2440 def warnverifyactions(ui, repo, actions, state, ctxs):
2442 2441 try:
2443 2442 verifyactions(actions, state, ctxs)
2444 2443 except error.ParseError:
2445 2444 if repo.vfs.exists(b'histedit-last-edit.txt'):
2446 2445 ui.warn(
2447 2446 _(
2448 2447 b'warning: histedit rules saved '
2449 2448 b'to: .hg/histedit-last-edit.txt\n'
2450 2449 )
2451 2450 )
2452 2451 raise
2453 2452
2454 2453
2455 2454 def verifyactions(actions, state, ctxs):
2456 2455 """Verify that there exists exactly one action per given changeset and
2457 2456 other constraints.
2458 2457
2459 2458 Will abort if there are to many or too few rules, a malformed rule,
2460 2459 or a rule on a changeset outside of the user-given range.
2461 2460 """
2462 2461 expected = {c.node() for c in ctxs}
2463 2462 seen = set()
2464 2463 prev = None
2465 2464
2466 2465 if actions and actions[0].verb in [b'roll', b'fold']:
2467 2466 raise error.ParseError(
2468 2467 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2469 2468 )
2470 2469
2471 2470 for action in actions:
2472 2471 action.verify(prev, expected, seen)
2473 2472 prev = action
2474 2473 if action.node is not None:
2475 2474 seen.add(action.node)
2476 2475 missing = sorted(expected - seen) # sort to stabilize output
2477 2476
2478 2477 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2479 2478 if len(actions) == 0:
2480 2479 raise error.ParseError(
2481 2480 _(b'no rules provided'),
2482 2481 hint=_(b'use strip extension to remove commits'),
2483 2482 )
2484 2483
2485 2484 drops = [drop(state, n) for n in missing]
2486 2485 # put the in the beginning so they execute immediately and
2487 2486 # don't show in the edit-plan in the future
2488 2487 actions[:0] = drops
2489 2488 elif missing:
2490 2489 raise error.ParseError(
2491 2490 _(b'missing rules for changeset %s') % short(missing[0]),
2492 2491 hint=_(
2493 2492 b'use "drop %s" to discard, see also: '
2494 2493 b"'hg help -e histedit.config'"
2495 2494 )
2496 2495 % short(missing[0]),
2497 2496 )
2498 2497
2499 2498
2500 2499 def adjustreplacementsfrommarkers(repo, oldreplacements):
2501 2500 """Adjust replacements from obsolescence markers
2502 2501
2503 2502 Replacements structure is originally generated based on
2504 2503 histedit's state and does not account for changes that are
2505 2504 not recorded there. This function fixes that by adding
2506 2505 data read from obsolescence markers"""
2507 2506 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2508 2507 return oldreplacements
2509 2508
2510 2509 unfi = repo.unfiltered()
2511 2510 get_rev = unfi.changelog.index.get_rev
2512 2511 obsstore = repo.obsstore
2513 2512 newreplacements = list(oldreplacements)
2514 2513 oldsuccs = [r[1] for r in oldreplacements]
2515 2514 # successors that have already been added to succstocheck once
2516 2515 seensuccs = set().union(
2517 2516 *oldsuccs
2518 2517 ) # create a set from an iterable of tuples
2519 2518 succstocheck = list(seensuccs)
2520 2519 while succstocheck:
2521 2520 n = succstocheck.pop()
2522 2521 missing = get_rev(n) is None
2523 2522 markers = obsstore.successors.get(n, ())
2524 2523 if missing and not markers:
2525 2524 # dead end, mark it as such
2526 2525 newreplacements.append((n, ()))
2527 2526 for marker in markers:
2528 2527 nsuccs = marker[1]
2529 2528 newreplacements.append((n, nsuccs))
2530 2529 for nsucc in nsuccs:
2531 2530 if nsucc not in seensuccs:
2532 2531 seensuccs.add(nsucc)
2533 2532 succstocheck.append(nsucc)
2534 2533
2535 2534 return newreplacements
2536 2535
2537 2536
2538 2537 def processreplacement(state):
2539 2538 """process the list of replacements to return
2540 2539
2541 2540 1) the final mapping between original and created nodes
2542 2541 2) the list of temporary node created by histedit
2543 2542 3) the list of new commit created by histedit"""
2544 2543 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2545 2544 allsuccs = set()
2546 2545 replaced = set()
2547 2546 fullmapping = {}
2548 2547 # initialize basic set
2549 2548 # fullmapping records all operations recorded in replacement
2550 2549 for rep in replacements:
2551 2550 allsuccs.update(rep[1])
2552 2551 replaced.add(rep[0])
2553 2552 fullmapping.setdefault(rep[0], set()).update(rep[1])
2554 2553 new = allsuccs - replaced
2555 2554 tmpnodes = allsuccs & replaced
2556 2555 # Reduce content fullmapping into direct relation between original nodes
2557 2556 # and final node created during history edition
2558 2557 # Dropped changeset are replaced by an empty list
2559 2558 toproceed = set(fullmapping)
2560 2559 final = {}
2561 2560 while toproceed:
2562 2561 for x in list(toproceed):
2563 2562 succs = fullmapping[x]
2564 2563 for s in list(succs):
2565 2564 if s in toproceed:
2566 2565 # non final node with unknown closure
2567 2566 # We can't process this now
2568 2567 break
2569 2568 elif s in final:
2570 2569 # non final node, replace with closure
2571 2570 succs.remove(s)
2572 2571 succs.update(final[s])
2573 2572 else:
2574 2573 final[x] = succs
2575 2574 toproceed.remove(x)
2576 2575 # remove tmpnodes from final mapping
2577 2576 for n in tmpnodes:
2578 2577 del final[n]
2579 2578 # we expect all changes involved in final to exist in the repo
2580 2579 # turn `final` into list (topologically sorted)
2581 2580 get_rev = state.repo.changelog.index.get_rev
2582 2581 for prec, succs in final.items():
2583 2582 final[prec] = sorted(succs, key=get_rev)
2584 2583
2585 2584 # computed topmost element (necessary for bookmark)
2586 2585 if new:
2587 2586 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2588 2587 elif not final:
2589 2588 # Nothing rewritten at all. we won't need `newtopmost`
2590 2589 # It is the same as `oldtopmost` and `processreplacement` know it
2591 2590 newtopmost = None
2592 2591 else:
2593 2592 # every body died. The newtopmost is the parent of the root.
2594 2593 r = state.repo.changelog.rev
2595 2594 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2596 2595
2597 2596 return final, tmpnodes, new, newtopmost
2598 2597
2599 2598
2600 2599 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2601 2600 """Move bookmark from oldtopmost to newly created topmost
2602 2601
2603 2602 This is arguably a feature and we may only want that for the active
2604 2603 bookmark. But the behavior is kept compatible with the old version for now.
2605 2604 """
2606 2605 if not oldtopmost or not newtopmost:
2607 2606 return
2608 2607 oldbmarks = repo.nodebookmarks(oldtopmost)
2609 2608 if oldbmarks:
2610 2609 with repo.lock(), repo.transaction(b'histedit') as tr:
2611 2610 marks = repo._bookmarks
2612 2611 changes = []
2613 2612 for name in oldbmarks:
2614 2613 changes.append((name, newtopmost))
2615 2614 marks.applychanges(repo, tr, changes)
2616 2615
2617 2616
2618 2617 def cleanupnode(ui, repo, nodes, nobackup=False):
2619 2618 """strip a group of nodes from the repository
2620 2619
2621 2620 The set of node to strip may contains unknown nodes."""
2622 2621 with repo.lock():
2623 2622 # do not let filtering get in the way of the cleanse
2624 2623 # we should probably get rid of obsolescence marker created during the
2625 2624 # histedit, but we currently do not have such information.
2626 2625 repo = repo.unfiltered()
2627 2626 # Find all nodes that need to be stripped
2628 2627 # (we use %lr instead of %ln to silently ignore unknown items)
2629 2628 has_node = repo.changelog.index.has_node
2630 2629 nodes = sorted(n for n in nodes if has_node(n))
2631 2630 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2632 2631 if roots:
2633 2632 backup = not nobackup
2634 2633 repair.strip(ui, repo, roots, backup=backup)
2635 2634
2636 2635
2637 2636 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2638 2637 if isinstance(nodelist, bytes):
2639 2638 nodelist = [nodelist]
2640 2639 state = histeditstate(repo)
2641 2640 if state.inprogress():
2642 2641 state.read()
2643 2642 histedit_nodes = {
2644 2643 action.node for action in state.actions if action.node
2645 2644 }
2646 2645 common_nodes = histedit_nodes & set(nodelist)
2647 2646 if common_nodes:
2648 2647 raise error.Abort(
2649 2648 _(b"histedit in progress, can't strip %s")
2650 2649 % b', '.join(short(x) for x in common_nodes)
2651 2650 )
2652 2651 return orig(ui, repo, nodelist, *args, **kwargs)
2653 2652
2654 2653
2655 2654 extensions.wrapfunction(repair, 'strip', stripwrapper)
2656 2655
2657 2656
2658 2657 def summaryhook(ui, repo):
2659 2658 state = histeditstate(repo)
2660 2659 if not state.inprogress():
2661 2660 return
2662 2661 state.read()
2663 2662 if state.actions:
2664 2663 # i18n: column positioning for "hg summary"
2665 2664 ui.write(
2666 2665 _(b'hist: %s (histedit --continue)\n')
2667 2666 % (
2668 2667 ui.label(_(b'%d remaining'), b'histedit.remaining')
2669 2668 % len(state.actions)
2670 2669 )
2671 2670 )
2672 2671
2673 2672
2674 2673 def extsetup(ui):
2675 2674 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2676 2675 statemod.addunfinished(
2677 2676 b'histedit',
2678 2677 fname=b'histedit-state',
2679 2678 allowcommit=True,
2680 2679 continueflag=True,
2681 2680 abortfunc=hgaborthistedit,
2682 2681 )
@@ -1,894 +1,893
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a Distributed SCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56
57 57 The more specific you are in your filename patterns the less you
58 58 lose speed in huge repositories.
59 59
60 60 For [keywordmaps] template mapping and expansion demonstration and
61 61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 62 available templates and filters.
63 63
64 64 Three additional date template filters are provided:
65 65
66 66 :``utcdate``: "2006/09/18 15:13:13"
67 67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69 69
70 70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 71 replaced with customized keywords and templates. Again, run
72 72 :hg:`kwdemo` to control the results of your configuration changes.
73 73
74 74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 75 to avoid storing expanded keywords in the change history.
76 76
77 77 To force expansion after enabling it, or a configuration change, run
78 78 :hg:`kwexpand`.
79 79
80 80 Expansions spanning more than one line and incremental expansions,
81 81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 82 {desc}" expands to the first line of the changeset description.
83 83 '''
84 84
85 85
86 86 import os
87 87 import re
88 88 import weakref
89 89
90 90 from mercurial.i18n import _
91 from mercurial.pycompat import getattr
92 91 from mercurial.hgweb import webcommands
93 92
94 93 from mercurial import (
95 94 cmdutil,
96 95 context,
97 96 dispatch,
98 97 error,
99 98 extensions,
100 99 filelog,
101 100 localrepo,
102 101 logcmdutil,
103 102 match,
104 103 patch,
105 104 pathutil,
106 105 pycompat,
107 106 registrar,
108 107 scmutil,
109 108 templatefilters,
110 109 templateutil,
111 110 util,
112 111 )
113 112 from mercurial.utils import (
114 113 dateutil,
115 114 stringutil,
116 115 )
117 116 from mercurial.dirstateutils import timestamp
118 117
119 118 cmdtable = {}
120 119 command = registrar.command(cmdtable)
121 120 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 121 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 122 # be specifying the version(s) of Mercurial they are tested with, or
124 123 # leave the attribute unspecified.
125 124 testedwith = b'ships-with-hg-core'
126 125
127 126 # hg commands that do not act on keywords
128 127 nokwcommands = (
129 128 b'add addremove annotate bundle export grep incoming init log'
130 129 b' outgoing push tip verify convert email glog'
131 130 )
132 131
133 132 # webcommands that do not act on keywords
134 133 nokwwebcommands = 'annotate changeset rev filediff diff comparison'
135 134
136 135 # hg commands that trigger expansion only when writing to working dir,
137 136 # not when reading filelog, and unexpand when reading from working dir
138 137 restricted = (
139 138 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 139 b' unshelve rebase graft backout histedit fetch'
141 140 )
142 141
143 142 # names of extensions using dorecord
144 143 recordextensions = b'record'
145 144
146 145 colortable = {
147 146 b'kwfiles.enabled': b'green bold',
148 147 b'kwfiles.deleted': b'cyan bold underline',
149 148 b'kwfiles.enabledunknown': b'green',
150 149 b'kwfiles.ignored': b'bold',
151 150 b'kwfiles.ignoredunknown': b'none',
152 151 }
153 152
154 153 templatefilter = registrar.templatefilter()
155 154
156 155 configtable = {}
157 156 configitem = registrar.configitem(configtable)
158 157
159 158 configitem(
160 159 b'keywordset',
161 160 b'svn',
162 161 default=False,
163 162 )
164 163 # date like in cvs' $Date
165 164 @templatefilter(b'utcdate', intype=templateutil.date)
166 165 def utcdate(date):
167 166 """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13"."""
168 167 dateformat = b'%Y/%m/%d %H:%M:%S'
169 168 return dateutil.datestr((date[0], 0), dateformat)
170 169
171 170
172 171 # date like in svn's $Date
173 172 @templatefilter(b'svnisodate', intype=templateutil.date)
174 173 def svnisodate(date):
175 174 """Date. Returns a date in this format: "2009-08-18 13:00:13
176 175 +0200 (Tue, 18 Aug 2009)".
177 176 """
178 177 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
179 178
180 179
181 180 # date like in svn's $Id
182 181 @templatefilter(b'svnutcdate', intype=templateutil.date)
183 182 def svnutcdate(date):
184 183 """Date. Returns a UTC-date in this format: "2009-08-18
185 184 11:00:13Z".
186 185 """
187 186 dateformat = b'%Y-%m-%d %H:%M:%SZ'
188 187 return dateutil.datestr((date[0], 0), dateformat)
189 188
190 189
191 190 # make keyword tools accessible
192 191 kwtools = {b'hgcmd': b''}
193 192
194 193
195 194 def _defaultkwmaps(ui):
196 195 '''Returns default keywordmaps according to keywordset configuration.'''
197 196 templates = {
198 197 b'Revision': b'{node|short}',
199 198 b'Author': b'{author|user}',
200 199 }
201 200 kwsets = (
202 201 {
203 202 b'Date': b'{date|utcdate}',
204 203 b'RCSfile': b'{file|basename},v',
205 204 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
206 205 # with hg-keyword
207 206 b'Source': b'{root}/{file},v',
208 207 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
209 208 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
210 209 },
211 210 {
212 211 b'Date': b'{date|svnisodate}',
213 212 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
214 213 b'LastChangedRevision': b'{node|short}',
215 214 b'LastChangedBy': b'{author|user}',
216 215 b'LastChangedDate': b'{date|svnisodate}',
217 216 },
218 217 )
219 218 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
220 219 return templates
221 220
222 221
223 222 def _shrinktext(text, subfunc):
224 223 """Helper for keyword expansion removal in text.
225 224 Depending on subfunc also returns number of substitutions."""
226 225 return subfunc(br'$\1$', text)
227 226
228 227
229 228 def _preselect(wstatus, changed):
230 229 """Retrieves modified and added files from a working directory state
231 230 and returns the subset of each contained in given changed files
232 231 retrieved from a change context."""
233 232 modified = [f for f in wstatus.modified if f in changed]
234 233 added = [f for f in wstatus.added if f in changed]
235 234 return modified, added
236 235
237 236
238 237 class kwtemplater:
239 238 """
240 239 Sets up keyword templates, corresponding keyword regex, and
241 240 provides keyword substitution functions.
242 241 """
243 242
244 243 def __init__(self, ui, repo, inc, exc):
245 244 self.ui = ui
246 245 self._repo = weakref.ref(repo)
247 246 self.match = match.match(repo.root, b'', [], inc, exc)
248 247 self.restrict = kwtools[b'hgcmd'] in restricted.split()
249 248 self.postcommit = False
250 249
251 250 kwmaps = self.ui.configitems(b'keywordmaps')
252 251 if kwmaps: # override default templates
253 252 self.templates = dict(kwmaps)
254 253 else:
255 254 self.templates = _defaultkwmaps(self.ui)
256 255
257 256 @property
258 257 def repo(self):
259 258 return self._repo()
260 259
261 260 @util.propertycache
262 261 def escape(self):
263 262 '''Returns bar-separated and escaped keywords.'''
264 263 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
265 264
266 265 @util.propertycache
267 266 def rekw(self):
268 267 '''Returns regex for unexpanded keywords.'''
269 268 return re.compile(br'\$(%s)\$' % self.escape)
270 269
271 270 @util.propertycache
272 271 def rekwexp(self):
273 272 '''Returns regex for expanded keywords.'''
274 273 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
275 274
276 275 def substitute(self, data, path, ctx, subfunc):
277 276 '''Replaces keywords in data with expanded template.'''
278 277
279 278 def kwsub(mobj):
280 279 kw = mobj.group(1)
281 280 ct = logcmdutil.maketemplater(
282 281 self.ui, self.repo, self.templates[kw]
283 282 )
284 283 self.ui.pushbuffer()
285 284 ct.show(ctx, root=self.repo.root, file=path)
286 285 ekw = templatefilters.firstline(self.ui.popbuffer())
287 286 return b'$%s: %s $' % (kw, ekw)
288 287
289 288 return subfunc(kwsub, data)
290 289
291 290 def linkctx(self, path, fileid):
292 291 '''Similar to filelog.linkrev, but returns a changectx.'''
293 292 return self.repo.filectx(path, fileid=fileid).changectx()
294 293
295 294 def expand(self, path, node, data):
296 295 '''Returns data with keywords expanded.'''
297 296 if (
298 297 not self.restrict
299 298 and self.match(path)
300 299 and not stringutil.binary(data)
301 300 ):
302 301 ctx = self.linkctx(path, node)
303 302 return self.substitute(data, path, ctx, self.rekw.sub)
304 303 return data
305 304
306 305 def iskwfile(self, cand, ctx):
307 306 """Returns subset of candidates which are configured for keyword
308 307 expansion but are not symbolic links."""
309 308 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
310 309
311 310 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
312 311 '''Overwrites selected files expanding/shrinking keywords.'''
313 312 if self.restrict or lookup or self.postcommit: # exclude kw_copy
314 313 candidates = self.iskwfile(candidates, ctx)
315 314 if not candidates:
316 315 return
317 316 kwcmd = self.restrict and lookup # kwexpand/kwshrink
318 317 if self.restrict or expand and lookup:
319 318 mf = ctx.manifest()
320 319 if self.restrict or rekw:
321 320 re_kw = self.rekw
322 321 else:
323 322 re_kw = self.rekwexp
324 323 if expand:
325 324 msg = _(b'overwriting %s expanding keywords\n')
326 325 else:
327 326 msg = _(b'overwriting %s shrinking keywords\n')
328 327 wctx = self.repo[None]
329 328 for f in candidates:
330 329 if self.restrict:
331 330 data = self.repo.file(f).read(mf[f])
332 331 else:
333 332 data = self.repo.wread(f)
334 333 if stringutil.binary(data):
335 334 continue
336 335 if expand:
337 336 parents = ctx.parents()
338 337 if lookup:
339 338 ctx = self.linkctx(f, mf[f])
340 339 elif self.restrict and len(parents) > 1:
341 340 # merge commit
342 341 # in case of conflict f is in modified state during
343 342 # merge, even if f does not differ from f in parent
344 343 for p in parents:
345 344 if f in p and not p[f].cmp(ctx[f]):
346 345 ctx = p[f].changectx()
347 346 break
348 347 data, found = self.substitute(data, f, ctx, re_kw.subn)
349 348 elif self.restrict:
350 349 found = re_kw.search(data)
351 350 else:
352 351 data, found = _shrinktext(data, re_kw.subn)
353 352 if found:
354 353 self.ui.note(msg % f)
355 354 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
356 355 fp.write(data)
357 356 fp.close()
358 357 if kwcmd:
359 358 s = wctx[f].lstat()
360 359 mode = s.st_mode
361 360 size = s.st_size
362 361 mtime = timestamp.mtime_of(s)
363 362 cache_data = (mode, size, mtime)
364 363 self.repo.dirstate.set_clean(f, cache_data)
365 364 elif self.postcommit:
366 365 self.repo.dirstate.update_file_p1(f, p1_tracked=True)
367 366
368 367 def shrink(self, fname, text):
369 368 '''Returns text with all keyword substitutions removed.'''
370 369 if self.match(fname) and not stringutil.binary(text):
371 370 return _shrinktext(text, self.rekwexp.sub)
372 371 return text
373 372
374 373 def shrinklines(self, fname, lines):
375 374 '''Returns lines with keyword substitutions removed.'''
376 375 if self.match(fname):
377 376 text = b''.join(lines)
378 377 if not stringutil.binary(text):
379 378 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
380 379 return lines
381 380
382 381 def wread(self, fname, data):
383 382 """If in restricted mode returns data read from wdir with
384 383 keyword substitutions removed."""
385 384 if self.restrict:
386 385 return self.shrink(fname, data)
387 386 return data
388 387
389 388
390 389 class kwfilelog(filelog.filelog):
391 390 """
392 391 Subclass of filelog to hook into its read, add, cmp methods.
393 392 Keywords are "stored" unexpanded, and processed on reading.
394 393 """
395 394
396 395 def __init__(self, opener, kwt, path):
397 396 super(kwfilelog, self).__init__(opener, path)
398 397 self.kwt = kwt
399 398 self.path = path
400 399
401 400 def read(self, node):
402 401 '''Expands keywords when reading filelog.'''
403 402 data = super(kwfilelog, self).read(node)
404 403 if self.renamed(node):
405 404 return data
406 405 return self.kwt.expand(self.path, node, data)
407 406
408 407 def add(self, text, meta, tr, link, p1=None, p2=None):
409 408 '''Removes keyword substitutions when adding to filelog.'''
410 409 text = self.kwt.shrink(self.path, text)
411 410 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
412 411
413 412 def cmp(self, node, text):
414 413 '''Removes keyword substitutions for comparison.'''
415 414 text = self.kwt.shrink(self.path, text)
416 415 return super(kwfilelog, self).cmp(node, text)
417 416
418 417
419 418 def _status(ui, repo, wctx, kwt, *pats, **opts):
420 419 """Bails out if [keyword] configuration is not active.
421 420 Returns status of working directory."""
422 421 if kwt:
423 422 return repo.status(
424 423 match=scmutil.match(wctx, pats, pycompat.byteskwargs(opts)),
425 424 clean=True,
426 425 unknown=opts.get('unknown') or opts.get('all'),
427 426 )
428 427 if ui.configitems(b'keyword'):
429 428 raise error.Abort(_(b'[keyword] patterns cannot match'))
430 429 raise error.Abort(_(b'no [keyword] patterns configured'))
431 430
432 431
433 432 def _kwfwrite(ui, repo, expand, *pats, **opts):
434 433 '''Selects files and passes them to kwtemplater.overwrite.'''
435 434 wctx = repo[None]
436 435 if len(wctx.parents()) > 1:
437 436 raise error.Abort(_(b'outstanding uncommitted merge'))
438 437 kwt = getattr(repo, '_keywordkwt', None)
439 438 with repo.wlock(), repo.dirstate.changing_files(repo):
440 439 status = _status(ui, repo, wctx, kwt, *pats, **opts)
441 440 if status.modified or status.added or status.removed or status.deleted:
442 441 raise error.Abort(_(b'outstanding uncommitted changes'))
443 442 kwt.overwrite(wctx, status.clean, True, expand)
444 443
445 444
446 445 @command(
447 446 b'kwdemo',
448 447 [
449 448 (b'd', b'default', None, _(b'show default keyword template maps')),
450 449 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
451 450 ],
452 451 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
453 452 optionalrepo=True,
454 453 )
455 454 def demo(ui, repo, *args, **opts):
456 455 """print [keywordmaps] configuration and an expansion example
457 456
458 457 Show current, custom, or default keyword template maps and their
459 458 expansions.
460 459
461 460 Extend the current configuration by specifying maps as arguments
462 461 and using -f/--rcfile to source an external hgrc file.
463 462
464 463 Use -d/--default to disable current configuration.
465 464
466 465 See :hg:`help templates` for information on templates and filters.
467 466 """
468 467
469 468 def demoitems(section, items):
470 469 ui.write(b'[%s]\n' % section)
471 470 for k, v in sorted(items):
472 471 if isinstance(v, bool):
473 472 v = stringutil.pprint(v)
474 473 ui.write(b'%s = %s\n' % (k, v))
475 474
476 475 fn = b'demo.txt'
477 476 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
478 477 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
479 478 if repo is None:
480 479 baseui = ui
481 480 else:
482 481 baseui = repo.baseui
483 482 repo = localrepo.instance(baseui, tmpdir, create=True)
484 483 ui.setconfig(b'keyword', fn, b'', b'keyword')
485 484 svn = ui.configbool(b'keywordset', b'svn')
486 485 # explicitly set keywordset for demo output
487 486 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
488 487
489 488 uikwmaps = ui.configitems(b'keywordmaps')
490 489 if args or opts.get('rcfile'):
491 490 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
492 491 if uikwmaps:
493 492 ui.status(_(b'\textending current template maps\n'))
494 493 if opts.get('default') or not uikwmaps:
495 494 if svn:
496 495 ui.status(_(b'\toverriding default svn keywordset\n'))
497 496 else:
498 497 ui.status(_(b'\toverriding default cvs keywordset\n'))
499 498 if opts.get('rcfile'):
500 499 ui.readconfig(opts.get(b'rcfile'))
501 500 if args:
502 501 # simulate hgrc parsing
503 502 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
504 503 repo.vfs.write(b'hgrc', rcmaps)
505 504 ui.readconfig(repo.vfs.join(b'hgrc'))
506 505 kwmaps = dict(ui.configitems(b'keywordmaps'))
507 506 elif opts.get('default'):
508 507 if svn:
509 508 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
510 509 else:
511 510 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
512 511 kwmaps = _defaultkwmaps(ui)
513 512 if uikwmaps:
514 513 ui.status(_(b'\tdisabling current template maps\n'))
515 514 for k, v in kwmaps.items():
516 515 ui.setconfig(b'keywordmaps', k, v, b'keyword')
517 516 else:
518 517 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
519 518 if uikwmaps:
520 519 kwmaps = dict(uikwmaps)
521 520 else:
522 521 kwmaps = _defaultkwmaps(ui)
523 522
524 523 uisetup(ui)
525 524 reposetup(ui, repo)
526 525 ui.writenoi18n(b'[extensions]\nkeyword =\n')
527 526 demoitems(b'keyword', ui.configitems(b'keyword'))
528 527 demoitems(b'keywordset', ui.configitems(b'keywordset'))
529 528 demoitems(b'keywordmaps', kwmaps.items())
530 529 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
531 530 repo.wvfs.write(fn, keywords)
532 531 with repo.wlock():
533 532 with repo.dirstate.changing_files(repo):
534 533 repo[None].add([fn])
535 534 ui.note(_(b'\nkeywords written to %s:\n') % fn)
536 535 ui.note(keywords)
537 536 repo.dirstate.setbranch(b'demobranch', repo.currenttransaction())
538 537 for name, cmd in ui.configitems(b'hooks'):
539 538 if name.split(b'.', 1)[0].find(b'commit') > -1:
540 539 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
541 540 msg = _(b'hg keyword configuration and expansion example')
542 541 ui.note((b"hg ci -m '%s'\n" % msg))
543 542 repo.commit(text=msg)
544 543 ui.status(_(b'\n\tkeywords expanded\n'))
545 544 ui.write(repo.wread(fn))
546 545 repo.wvfs.rmtree(repo.root)
547 546
548 547
549 548 @command(
550 549 b'kwexpand',
551 550 cmdutil.walkopts,
552 551 _(b'hg kwexpand [OPTION]... [FILE]...'),
553 552 inferrepo=True,
554 553 )
555 554 def expand(ui, repo, *pats, **opts):
556 555 """expand keywords in the working directory
557 556
558 557 Run after (re)enabling keyword expansion.
559 558
560 559 kwexpand refuses to run if given files contain local changes.
561 560 """
562 561 # 3rd argument sets expansion to True
563 562 _kwfwrite(ui, repo, True, *pats, **opts)
564 563
565 564
566 565 @command(
567 566 b'kwfiles',
568 567 [
569 568 (b'A', b'all', None, _(b'show keyword status flags of all files')),
570 569 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
571 570 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
572 571 ]
573 572 + cmdutil.walkopts,
574 573 _(b'hg kwfiles [OPTION]... [FILE]...'),
575 574 inferrepo=True,
576 575 )
577 576 def files(ui, repo, *pats, **opts):
578 577 """show files configured for keyword expansion
579 578
580 579 List which files in the working directory are matched by the
581 580 [keyword] configuration patterns.
582 581
583 582 Useful to prevent inadvertent keyword expansion and to speed up
584 583 execution by including only files that are actual candidates for
585 584 expansion.
586 585
587 586 See :hg:`help keyword` on how to construct patterns both for
588 587 inclusion and exclusion of files.
589 588
590 589 With -A/--all and -v/--verbose the codes used to show the status
591 590 of files are::
592 591
593 592 K = keyword expansion candidate
594 593 k = keyword expansion candidate (not tracked)
595 594 I = ignored
596 595 i = ignored (not tracked)
597 596 """
598 597 kwt = getattr(repo, '_keywordkwt', None)
599 598 wctx = repo[None]
600 599 status = _status(ui, repo, wctx, kwt, *pats, **opts)
601 600 if pats:
602 601 cwd = repo.getcwd()
603 602 else:
604 603 cwd = b''
605 604 files = []
606 605
607 606 if not opts.get('unknown') or opts.get('all'):
608 607 files = sorted(status.modified + status.added + status.clean)
609 608 kwfiles = kwt.iskwfile(files, wctx)
610 609 kwdeleted = kwt.iskwfile(status.deleted, wctx)
611 610 kwunknown = kwt.iskwfile(status.unknown, wctx)
612 611 if not opts.get('ignore') or opts.get('all'):
613 612 showfiles = kwfiles, kwdeleted, kwunknown
614 613 else:
615 614 showfiles = [], [], []
616 615 if opts.get('all') or opts.get('ignore'):
617 616 showfiles += (
618 617 [f for f in files if f not in kwfiles],
619 618 [f for f in status.unknown if f not in kwunknown],
620 619 )
621 620 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
622 621 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
623 622 fm = ui.formatter(b'kwfiles', pycompat.byteskwargs(opts))
624 623 fmt = b'%.0s%s\n'
625 624 if opts.get('all') or ui.verbose:
626 625 fmt = b'%s %s\n'
627 626 for kwstate, char, filenames in kwstates:
628 627 label = b'kwfiles.' + kwstate
629 628 for f in filenames:
630 629 fm.startitem()
631 630 fm.data(kwstatus=char, path=f)
632 631 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
633 632 fm.end()
634 633
635 634
636 635 @command(
637 636 b'kwshrink',
638 637 cmdutil.walkopts,
639 638 _(b'hg kwshrink [OPTION]... [FILE]...'),
640 639 inferrepo=True,
641 640 )
642 641 def shrink(ui, repo, *pats, **opts):
643 642 """revert expanded keywords in the working directory
644 643
645 644 Must be run before changing/disabling active keywords.
646 645
647 646 kwshrink refuses to run if given files contain local changes.
648 647 """
649 648 # 3rd argument sets expansion to False
650 649 _kwfwrite(ui, repo, False, *pats, **opts)
651 650
652 651
653 652 # monkeypatches
654 653
655 654
656 655 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
657 656 """Monkeypatch/wrap patch.patchfile.__init__ to avoid
658 657 rejects or conflicts due to expanded keywords in working dir."""
659 658 orig(self, ui, gp, backend, store, eolmode)
660 659 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
661 660 if kwt:
662 661 # shrink keywords read from working dir
663 662 self.lines = kwt.shrinklines(self.fname, self.lines)
664 663
665 664
666 665 def kwdiff(orig, repo, *args, **kwargs):
667 666 '''Monkeypatch patch.diff to avoid expansion.'''
668 667 kwt = getattr(repo, '_keywordkwt', None)
669 668 if kwt:
670 669 restrict = kwt.restrict
671 670 kwt.restrict = True
672 671 try:
673 672 for chunk in orig(repo, *args, **kwargs):
674 673 yield chunk
675 674 finally:
676 675 if kwt:
677 676 kwt.restrict = restrict
678 677
679 678
680 679 def kwweb_skip(orig, web):
681 680 '''Wraps webcommands.x turning off keyword expansion.'''
682 681 kwt = getattr(web.repo, '_keywordkwt', None)
683 682 if kwt:
684 683 origmatch = kwt.match
685 684 kwt.match = util.never
686 685 try:
687 686 for chunk in orig(web):
688 687 yield chunk
689 688 finally:
690 689 if kwt:
691 690 kwt.match = origmatch
692 691
693 692
694 693 def kw_amend(orig, ui, repo, old, extra, pats, opts):
695 694 '''Wraps cmdutil.amend expanding keywords after amend.'''
696 695 kwt = getattr(repo, '_keywordkwt', None)
697 696 if kwt is None:
698 697 return orig(ui, repo, old, extra, pats, opts)
699 698 with repo.wlock(), repo.dirstate.changing_parents(repo):
700 699 kwt.postcommit = True
701 700 newid = orig(ui, repo, old, extra, pats, opts)
702 701 if newid != old.node():
703 702 ctx = repo[newid]
704 703 kwt.restrict = True
705 704 kwt.overwrite(ctx, ctx.files(), False, True)
706 705 kwt.restrict = False
707 706 return newid
708 707
709 708
710 709 def kw_copy(orig, ui, repo, pats, opts, rename=False):
711 710 """Wraps cmdutil.copy so that copy/rename destinations do not
712 711 contain expanded keywords.
713 712 Note that the source of a regular file destination may also be a
714 713 symlink:
715 714 hg cp sym x -> x is symlink
716 715 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
717 716 For the latter we have to follow the symlink to find out whether its
718 717 target is configured for expansion and we therefore must unexpand the
719 718 keywords in the destination."""
720 719 kwt = getattr(repo, '_keywordkwt', None)
721 720 if kwt is None:
722 721 return orig(ui, repo, pats, opts, rename)
723 722 with repo.wlock():
724 723 orig(ui, repo, pats, opts, rename)
725 724 if opts.get(b'dry_run'):
726 725 return
727 726 wctx = repo[None]
728 727 cwd = repo.getcwd()
729 728
730 729 def haskwsource(dest):
731 730 """Returns true if dest is a regular file and configured for
732 731 expansion or a symlink which points to a file configured for
733 732 expansion."""
734 733 source = repo.dirstate.copied(dest)
735 734 if b'l' in wctx.flags(source):
736 735 source = pathutil.canonpath(
737 736 repo.root, cwd, os.path.realpath(source)
738 737 )
739 738 return kwt.match(source)
740 739
741 740 candidates = [
742 741 f
743 742 for f in repo.dirstate.copies()
744 743 if b'l' not in wctx.flags(f) and haskwsource(f)
745 744 ]
746 745 kwt.overwrite(wctx, candidates, False, False)
747 746
748 747
749 748 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
750 749 '''Wraps record.dorecord expanding keywords after recording.'''
751 750 kwt = getattr(repo, '_keywordkwt', None)
752 751 if kwt is None:
753 752 return orig(ui, repo, commitfunc, *pats, **opts)
754 753 with repo.wlock():
755 754 # record returns 0 even when nothing has changed
756 755 # therefore compare nodes before and after
757 756 kwt.postcommit = True
758 757 ctx = repo[b'.']
759 758 wstatus = ctx.status()
760 759 ret = orig(ui, repo, commitfunc, *pats, **opts)
761 760 recctx = repo[b'.']
762 761 if ctx != recctx:
763 762 modified, added = _preselect(wstatus, recctx.files())
764 763 kwt.restrict = False
765 764 with repo.dirstate.changing_parents(repo):
766 765 kwt.overwrite(recctx, modified, False, True)
767 766 kwt.overwrite(recctx, added, False, True, True)
768 767 kwt.restrict = True
769 768 return ret
770 769
771 770
772 771 def kwfilectx_cmp(orig, self, fctx):
773 772 if fctx._customcmp:
774 773 return fctx.cmp(self)
775 774 kwt = getattr(self._repo, '_keywordkwt', None)
776 775 if kwt is None:
777 776 return orig(self, fctx)
778 777 # keyword affects data size, comparing wdir and filelog size does
779 778 # not make sense
780 779 if (
781 780 fctx._filenode is None
782 781 and (
783 782 self._repo._encodefilterpats
784 783 or kwt.match(fctx.path())
785 784 and b'l' not in fctx.flags()
786 785 or self.size() - 4 == fctx.size()
787 786 )
788 787 or self.size() == fctx.size()
789 788 ):
790 789 return self._filelog.cmp(self._filenode, fctx.data())
791 790 return True
792 791
793 792
794 793 def uisetup(ui):
795 794 """Monkeypatches dispatch._parse to retrieve user command.
796 795 Overrides file method to return kwfilelog instead of filelog
797 796 if file matches user configuration.
798 797 Wraps commit to overwrite configured files with updated
799 798 keyword substitutions.
800 799 Monkeypatches patch and webcommands."""
801 800
802 801 def kwdispatch_parse(orig, ui, args):
803 802 '''Monkeypatch dispatch._parse to obtain running hg command.'''
804 803 cmd, func, args, options, cmdoptions = orig(ui, args)
805 804 kwtools[b'hgcmd'] = cmd
806 805 return cmd, func, args, options, cmdoptions
807 806
808 807 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
809 808
810 809 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
811 810 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
812 811 extensions.wrapfunction(patch, 'diff', kwdiff)
813 812 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
814 813 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
815 814 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
816 815 for c in nokwwebcommands.split():
817 816 extensions.wrapfunction(webcommands, c, kwweb_skip)
818 817
819 818
820 819 def reposetup(ui, repo):
821 820 '''Sets up repo as kwrepo for keyword substitution.'''
822 821
823 822 try:
824 823 if (
825 824 not repo.local()
826 825 or kwtools[b'hgcmd'] in nokwcommands.split()
827 826 or b'.hg' in util.splitpath(repo.root)
828 827 or repo._url.startswith(b'bundle:')
829 828 ):
830 829 return
831 830 except AttributeError:
832 831 pass
833 832
834 833 inc, exc = [], [b'.hg*']
835 834 for pat, opt in ui.configitems(b'keyword'):
836 835 if opt != b'ignore':
837 836 inc.append(pat)
838 837 else:
839 838 exc.append(pat)
840 839 if not inc:
841 840 return
842 841
843 842 kwt = kwtemplater(ui, repo, inc, exc)
844 843
845 844 class kwrepo(repo.__class__):
846 845 def file(self, f):
847 846 if f[0] == b'/':
848 847 f = f[1:]
849 848 return kwfilelog(self.svfs, kwt, f)
850 849
851 850 def wread(self, filename):
852 851 data = super(kwrepo, self).wread(filename)
853 852 return kwt.wread(filename, data)
854 853
855 854 def commit(self, *args, **opts):
856 855 # use custom commitctx for user commands
857 856 # other extensions can still wrap repo.commitctx directly
858 857 self.commitctx = self.kwcommitctx
859 858 try:
860 859 return super(kwrepo, self).commit(*args, **opts)
861 860 finally:
862 861 del self.commitctx
863 862
864 863 def kwcommitctx(self, ctx, error=False, origctx=None):
865 864 n = super(kwrepo, self).commitctx(ctx, error, origctx)
866 865 # no lock needed, only called from repo.commit() which already locks
867 866 if not kwt.postcommit:
868 867 restrict = kwt.restrict
869 868 kwt.restrict = True
870 869 kwt.overwrite(
871 870 self[n], sorted(ctx.added() + ctx.modified()), False, True
872 871 )
873 872 kwt.restrict = restrict
874 873 return n
875 874
876 875 def rollback(self, dryrun=False, force=False):
877 876 with self.wlock():
878 877 origrestrict = kwt.restrict
879 878 try:
880 879 if not dryrun:
881 880 changed = self[b'.'].files()
882 881 ret = super(kwrepo, self).rollback(dryrun, force)
883 882 if not dryrun:
884 883 ctx = self[b'.']
885 884 modified, added = _preselect(ctx.status(), changed)
886 885 kwt.restrict = False
887 886 kwt.overwrite(ctx, modified, True, True)
888 887 kwt.overwrite(ctx, added, True, False)
889 888 return ret
890 889 finally:
891 890 kwt.restrict = origrestrict
892 891
893 892 repo.__class__ = kwrepo
894 893 repo._keywordkwt = kwt
@@ -1,96 +1,95
1 1 # This software may be used and distributed according to the terms of the
2 2 # GNU General Public License version 2 or any later version.
3 3
4 4
5 5 import re
6 6
7 7 from mercurial.i18n import _
8 from mercurial.pycompat import getattr
9 8 from mercurial import (
10 9 error,
11 10 hg,
12 11 util,
13 12 )
14 13 from mercurial.utils import (
15 14 urlutil,
16 15 )
17 16
18 17 from . import (
19 18 lfutil,
20 19 localstore,
21 20 wirestore,
22 21 )
23 22
24 23
25 24 # During clone this function is passed the src's ui object
26 25 # but it needs the dest's ui object so it can read out of
27 26 # the config file. Use repo.ui instead.
28 27 def openstore(repo=None, remote=None, put=False, ui=None):
29 28 if ui is None:
30 29 ui = repo.ui
31 30
32 31 if not remote:
33 32 lfpullsource = getattr(repo, 'lfpullsource', None)
34 33 if put:
35 34 path = urlutil.get_unique_push_path(
36 35 b'lfpullsource', repo, ui, lfpullsource
37 36 )
38 37 else:
39 38 path = urlutil.get_unique_pull_path_obj(
40 39 b'lfpullsource', ui, lfpullsource
41 40 )
42 41
43 42 # XXX we should not explicitly pass b'default', as this will result in
44 43 # b'default' being returned if no `paths.default` was defined. We
45 44 # should explicitely handle the lack of value instead.
46 45 if repo is None:
47 46 path = urlutil.get_unique_pull_path_obj(
48 47 b'lfs',
49 48 ui,
50 49 b'default',
51 50 )
52 51 remote = hg.peer(repo or ui, {}, path)
53 52 elif path.loc == b'default-push' or path.loc == b'default':
54 53 remote = repo
55 54 else:
56 55 remote = hg.peer(repo or ui, {}, path)
57 56
58 57 # The path could be a scheme so use Mercurial's normal functionality
59 58 # to resolve the scheme to a repository and use its path
60 59 path = hasattr(remote, 'url') and remote.url() or remote.path
61 60
62 61 match = _scheme_re.match(path)
63 62 if not match: # regular filesystem path
64 63 scheme = b'file'
65 64 else:
66 65 scheme = match.group(1)
67 66
68 67 try:
69 68 storeproviders = _storeprovider[scheme]
70 69 except KeyError:
71 70 raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
72 71
73 72 for classobj in storeproviders:
74 73 try:
75 74 return classobj(ui, repo, remote)
76 75 except lfutil.storeprotonotcapable:
77 76 pass
78 77
79 78 raise error.Abort(
80 79 _(b'%s does not appear to be a largefile store')
81 80 % urlutil.hidepassword(path)
82 81 )
83 82
84 83
85 84 _storeprovider = {
86 85 b'file': [localstore.localstore],
87 86 b'http': [wirestore.wirestore],
88 87 b'https': [wirestore.wirestore],
89 88 b'ssh': [wirestore.wirestore],
90 89 }
91 90
92 91 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
93 92
94 93
95 94 def getlfile(ui, hash):
96 95 return util.chunkbuffer(openstore(ui=ui)._get(hash))
@@ -1,789 +1,788
1 1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import contextlib
10 10 import errno
11 11 import hashlib
12 12 import json
13 13 import os
14 14 import re
15 15 import socket
16 16
17 17 from mercurial.i18n import _
18 from mercurial.pycompat import getattr
19 18 from mercurial.node import hex
20 19
21 20 from mercurial import (
22 21 encoding,
23 22 error,
24 23 httpconnection as httpconnectionmod,
25 24 pathutil,
26 25 pycompat,
27 26 url as urlmod,
28 27 util,
29 28 vfs as vfsmod,
30 29 worker,
31 30 )
32 31
33 32 from mercurial.utils import (
34 33 stringutil,
35 34 urlutil,
36 35 )
37 36
38 37 from ..largefiles import lfutil
39 38
40 39 # 64 bytes for SHA256
41 40 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
42 41
43 42
44 43 class lfsvfs(vfsmod.vfs):
45 44 def join(self, path):
46 45 """split the path at first two characters, like: XX/XXXXX..."""
47 46 if not _lfsre.match(path):
48 47 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
49 48 return super(lfsvfs, self).join(path[0:2], path[2:])
50 49
51 50 def walk(self, path=None, onerror=None):
52 51 """Yield (dirpath, [], oids) tuple for blobs under path
53 52
54 53 Oids only exist in the root of this vfs, so dirpath is always ''.
55 54 """
56 55 root = os.path.normpath(self.base)
57 56 # when dirpath == root, dirpath[prefixlen:] becomes empty
58 57 # because len(dirpath) < prefixlen.
59 58 prefixlen = len(pathutil.normasprefix(root))
60 59 oids = []
61 60
62 61 for dirpath, dirs, files in os.walk(
63 62 self.reljoin(self.base, path or b''), onerror=onerror
64 63 ):
65 64 dirpath = dirpath[prefixlen:]
66 65
67 66 # Silently skip unexpected files and directories
68 67 if len(dirpath) == 2:
69 68 oids.extend(
70 69 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
71 70 )
72 71
73 72 yield (b'', [], oids)
74 73
75 74
76 75 class nullvfs(lfsvfs):
77 76 def __init__(self):
78 77 pass
79 78
80 79 def exists(self, oid):
81 80 return False
82 81
83 82 def read(self, oid):
84 83 # store.read() calls into here if the blob doesn't exist in its
85 84 # self.vfs. Raise the same error as a normal vfs when asked to read a
86 85 # file that doesn't exist. The only difference is the full file path
87 86 # isn't available in the error.
88 87 raise IOError(
89 88 errno.ENOENT,
90 89 pycompat.sysstr(b'%s: No such file or directory' % oid),
91 90 )
92 91
93 92 def walk(self, path=None, onerror=None):
94 93 return (b'', [], [])
95 94
96 95 def write(self, oid, data):
97 96 pass
98 97
99 98
100 99 class lfsuploadfile(httpconnectionmod.httpsendfile):
101 100 """a file-like object that supports keepalive."""
102 101
103 102 def __init__(self, ui, filename):
104 103 super(lfsuploadfile, self).__init__(ui, filename, b'rb')
105 104 self.read = self._data.read
106 105
107 106 def _makeprogress(self):
108 107 return None # progress is handled by the worker client
109 108
110 109
111 110 class local:
112 111 """Local blobstore for large file contents.
113 112
114 113 This blobstore is used both as a cache and as a staging area for large blobs
115 114 to be uploaded to the remote blobstore.
116 115 """
117 116
118 117 def __init__(self, repo):
119 118 fullpath = repo.svfs.join(b'lfs/objects')
120 119 self.vfs = lfsvfs(fullpath)
121 120
122 121 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
123 122 self.cachevfs = nullvfs()
124 123 else:
125 124 usercache = lfutil._usercachedir(repo.ui, b'lfs')
126 125 self.cachevfs = lfsvfs(usercache)
127 126 self.ui = repo.ui
128 127
129 128 def open(self, oid):
130 129 """Open a read-only file descriptor to the named blob, in either the
131 130 usercache or the local store."""
132 131 return open(self.path(oid), 'rb')
133 132
134 133 def path(self, oid):
135 134 """Build the path for the given blob ``oid``.
136 135
137 136 If the blob exists locally, the path may point to either the usercache
138 137 or the local store. If it doesn't, it will point to the local store.
139 138 This is meant for situations where existing code that isn't LFS aware
140 139 needs to open a blob. Generally, prefer the ``open`` method on this
141 140 class.
142 141 """
143 142 # The usercache is the most likely place to hold the file. Commit will
144 143 # write to both it and the local store, as will anything that downloads
145 144 # the blobs. However, things like clone without an update won't
146 145 # populate the local store. For an init + push of a local clone,
147 146 # the usercache is the only place it _could_ be. If not present, the
148 147 # missing file msg here will indicate the local repo, not the usercache.
149 148 if self.cachevfs.exists(oid):
150 149 return self.cachevfs.join(oid)
151 150
152 151 return self.vfs.join(oid)
153 152
154 153 def download(self, oid, src, content_length):
155 154 """Read the blob from the remote source in chunks, verify the content,
156 155 and write to this local blobstore."""
157 156 sha256 = hashlib.sha256()
158 157 size = 0
159 158
160 159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
161 160 for chunk in util.filechunkiter(src, size=1048576):
162 161 fp.write(chunk)
163 162 sha256.update(chunk)
164 163 size += len(chunk)
165 164
166 165 # If the server advertised a length longer than what we actually
167 166 # received, then we should expect that the server crashed while
168 167 # producing the response (but the server has no way of telling us
169 168 # that), and we really don't need to try to write the response to
170 169 # the localstore, because it's not going to match the expected.
171 170 # The server also uses this method to store data uploaded by the
172 171 # client, so if this happens on the server side, it's possible
173 172 # that the client crashed or an antivirus interfered with the
174 173 # upload.
175 174 if content_length is not None and int(content_length) != size:
176 175 msg = (
177 176 b"Response length (%d) does not match Content-Length "
178 177 b"header (%d) for %s"
179 178 )
180 179 raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
181 180
182 181 realoid = hex(sha256.digest())
183 182 if realoid != oid:
184 183 raise LfsCorruptionError(
185 184 _(b'corrupt remote lfs object: %s') % oid
186 185 )
187 186
188 187 self._linktousercache(oid)
189 188
190 189 def write(self, oid, data):
191 190 """Write blob to local blobstore.
192 191
193 192 This should only be called from the filelog during a commit or similar.
194 193 As such, there is no need to verify the data. Imports from a remote
195 194 store must use ``download()`` instead."""
196 195 with self.vfs(oid, b'wb', atomictemp=True) as fp:
197 196 fp.write(data)
198 197
199 198 self._linktousercache(oid)
200 199
201 200 def linkfromusercache(self, oid):
202 201 """Link blobs found in the user cache into this store.
203 202
204 203 The server module needs to do this when it lets the client know not to
205 204 upload the blob, to ensure it is always available in this store.
206 205 Normally this is done implicitly when the client reads or writes the
207 206 blob, but that doesn't happen when the server tells the client that it
208 207 already has the blob.
209 208 """
210 209 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
211 210 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
212 211 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
213 212
214 213 def _linktousercache(self, oid):
215 214 # XXX: should we verify the content of the cache, and hardlink back to
216 215 # the local store on success, but truncate, write and link on failure?
217 216 if not self.cachevfs.exists(oid) and not isinstance(
218 217 self.cachevfs, nullvfs
219 218 ):
220 219 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
221 220 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
222 221
223 222 def read(self, oid, verify=True):
224 223 """Read blob from local blobstore."""
225 224 if not self.vfs.exists(oid):
226 225 blob = self._read(self.cachevfs, oid, verify)
227 226
228 227 # Even if revlog will verify the content, it needs to be verified
229 228 # now before making the hardlink to avoid propagating corrupt blobs.
230 229 # Don't abort if corruption is detected, because `hg verify` will
231 230 # give more useful info about the corruption- simply don't add the
232 231 # hardlink.
233 232 if verify or hex(hashlib.sha256(blob).digest()) == oid:
234 233 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
235 234 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
236 235 else:
237 236 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
238 237 blob = self._read(self.vfs, oid, verify)
239 238 return blob
240 239
241 240 def _read(self, vfs, oid, verify):
242 241 """Read blob (after verifying) from the given store"""
243 242 blob = vfs.read(oid)
244 243 if verify:
245 244 _verify(oid, blob)
246 245 return blob
247 246
248 247 def verify(self, oid):
249 248 """Indicate whether or not the hash of the underlying file matches its
250 249 name."""
251 250 sha256 = hashlib.sha256()
252 251
253 252 with self.open(oid) as fp:
254 253 for chunk in util.filechunkiter(fp, size=1048576):
255 254 sha256.update(chunk)
256 255
257 256 return oid == hex(sha256.digest())
258 257
259 258 def has(self, oid):
260 259 """Returns True if the local blobstore contains the requested blob,
261 260 False otherwise."""
262 261 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
263 262
264 263
265 264 def _urlerrorreason(urlerror):
266 265 """Create a friendly message for the given URLError to be used in an
267 266 LfsRemoteError message.
268 267 """
269 268 inst = urlerror
270 269
271 270 if isinstance(urlerror.reason, Exception):
272 271 inst = urlerror.reason
273 272
274 273 if hasattr(inst, 'reason'):
275 274 try: # usually it is in the form (errno, strerror)
276 275 reason = inst.reason.args[1]
277 276 except (AttributeError, IndexError):
278 277 # it might be anything, for example a string
279 278 reason = inst.reason
280 279 if isinstance(reason, str):
281 280 # SSLError of Python 2.7.9 contains a unicode
282 281 reason = encoding.unitolocal(reason)
283 282 return reason
284 283 elif getattr(inst, "strerror", None):
285 284 return encoding.strtolocal(inst.strerror)
286 285 else:
287 286 return stringutil.forcebytestr(urlerror)
288 287
289 288
290 289 class lfsauthhandler(util.urlreq.basehandler):
291 290 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
292 291
293 292 def http_error_401(self, req, fp, code, msg, headers):
294 293 """Enforces that any authentication performed is HTTP Basic
295 294 Authentication. No authentication is also acceptable.
296 295 """
297 296 authreq = headers.get('www-authenticate', None)
298 297 if authreq:
299 298 scheme = authreq.split()[0]
300 299
301 300 if scheme.lower() != 'basic':
302 301 msg = _(b'the server must support Basic Authentication')
303 302 raise util.urlerr.httperror(
304 303 req.get_full_url(),
305 304 code,
306 305 encoding.strfromlocal(msg),
307 306 headers,
308 307 fp,
309 308 )
310 309 return None
311 310
312 311
313 312 class _gitlfsremote:
314 313 def __init__(self, repo, url):
315 314 ui = repo.ui
316 315 self.ui = ui
317 316 baseurl, authinfo = url.authinfo()
318 317 self.baseurl = baseurl.rstrip(b'/')
319 318 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
320 319 if not useragent:
321 320 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
322 321 self.urlopener = urlmod.opener(ui, authinfo, useragent)
323 322 self.urlopener.add_handler(lfsauthhandler())
324 323 self.retry = ui.configint(b'lfs', b'retry')
325 324
326 325 def writebatch(self, pointers, fromstore):
327 326 """Batch upload from local to remote blobstore."""
328 327 self._batch(_deduplicate(pointers), fromstore, b'upload')
329 328
330 329 def readbatch(self, pointers, tostore):
331 330 """Batch download from remote to local blostore."""
332 331 self._batch(_deduplicate(pointers), tostore, b'download')
333 332
334 333 def _batchrequest(self, pointers, action):
335 334 """Get metadata about objects pointed by pointers for given action
336 335
337 336 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
338 337 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
339 338 """
340 339 objects = [
341 340 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
342 341 for p in pointers
343 342 ]
344 343 requestdata = pycompat.bytesurl(
345 344 json.dumps(
346 345 {
347 346 'objects': objects,
348 347 'operation': pycompat.strurl(action),
349 348 }
350 349 )
351 350 )
352 351 url = b'%s/objects/batch' % self.baseurl
353 352 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
354 353 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
355 354 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
356 355 try:
357 356 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
358 357 rawjson = rsp.read()
359 358 except util.urlerr.httperror as ex:
360 359 hints = {
361 360 400: _(
362 361 b'check that lfs serving is enabled on %s and "%s" is '
363 362 b'supported'
364 363 )
365 364 % (self.baseurl, action),
366 365 404: _(b'the "lfs.url" config may be used to override %s')
367 366 % self.baseurl,
368 367 }
369 368 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
370 369 raise LfsRemoteError(
371 370 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
372 371 hint=hint,
373 372 )
374 373 except util.urlerr.urlerror as ex:
375 374 hint = (
376 375 _(b'the "lfs.url" config may be used to override %s')
377 376 % self.baseurl
378 377 )
379 378 raise LfsRemoteError(
380 379 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
381 380 )
382 381 try:
383 382 response = pycompat.json_loads(rawjson)
384 383 except ValueError:
385 384 raise LfsRemoteError(
386 385 _(b'LFS server returns invalid JSON: %s')
387 386 % rawjson.encode("utf-8")
388 387 )
389 388
390 389 if self.ui.debugflag:
391 390 self.ui.debug(b'Status: %d\n' % rsp.status)
392 391 # lfs-test-server and hg serve return headers in different order
393 392 headers = pycompat.bytestr(rsp.info()).strip()
394 393 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
395 394
396 395 if 'objects' in response:
397 396 response['objects'] = sorted(
398 397 response['objects'], key=lambda p: p['oid']
399 398 )
400 399 self.ui.debug(
401 400 b'%s\n'
402 401 % pycompat.bytesurl(
403 402 json.dumps(
404 403 response,
405 404 indent=2,
406 405 separators=('', ': '),
407 406 sort_keys=True,
408 407 )
409 408 )
410 409 )
411 410
412 411 def encodestr(x):
413 412 if isinstance(x, str):
414 413 return x.encode('utf-8')
415 414 return x
416 415
417 416 return pycompat.rapply(encodestr, response)
418 417
419 418 def _checkforservererror(self, pointers, responses, action):
420 419 """Scans errors from objects
421 420
422 421 Raises LfsRemoteError if any objects have an error"""
423 422 for response in responses:
424 423 # The server should return 404 when objects cannot be found. Some
425 424 # server implementation (ex. lfs-test-server) does not set "error"
426 425 # but just removes "download" from "actions". Treat that case
427 426 # as the same as 404 error.
428 427 if b'error' not in response:
429 428 if action == b'download' and action not in response.get(
430 429 b'actions', []
431 430 ):
432 431 code = 404
433 432 else:
434 433 continue
435 434 else:
436 435 # An error dict without a code doesn't make much sense, so
437 436 # treat as a server error.
438 437 code = response.get(b'error').get(b'code', 500)
439 438
440 439 ptrmap = {p.oid(): p for p in pointers}
441 440 p = ptrmap.get(response[b'oid'], None)
442 441 if p:
443 442 filename = getattr(p, 'filename', b'unknown')
444 443 errors = {
445 444 404: b'The object does not exist',
446 445 410: b'The object was removed by the owner',
447 446 422: b'Validation error',
448 447 500: b'Internal server error',
449 448 }
450 449 msg = errors.get(code, b'status code %d' % code)
451 450 raise LfsRemoteError(
452 451 _(b'LFS server error for "%s": %s') % (filename, msg)
453 452 )
454 453 else:
455 454 raise LfsRemoteError(
456 455 _(b'LFS server error. Unsolicited response for oid %s')
457 456 % response[b'oid']
458 457 )
459 458
460 459 def _extractobjects(self, response, pointers, action):
461 460 """extract objects from response of the batch API
462 461
463 462 response: parsed JSON object returned by batch API
464 463 return response['objects'] filtered by action
465 464 raise if any object has an error
466 465 """
467 466 # Scan errors from objects - fail early
468 467 objects = response.get(b'objects', [])
469 468 self._checkforservererror(pointers, objects, action)
470 469
471 470 # Filter objects with given action. Practically, this skips uploading
472 471 # objects which exist in the server.
473 472 filteredobjects = [
474 473 o for o in objects if action in o.get(b'actions', [])
475 474 ]
476 475
477 476 return filteredobjects
478 477
479 478 def _basictransfer(self, obj, action, localstore):
480 479 """Download or upload a single object using basic transfer protocol
481 480
482 481 obj: dict, an object description returned by batch API
483 482 action: string, one of ['upload', 'download']
484 483 localstore: blobstore.local
485 484
486 485 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
487 486 basic-transfers.md
488 487 """
489 488 oid = obj[b'oid']
490 489 href = obj[b'actions'][action].get(b'href')
491 490 headers = obj[b'actions'][action].get(b'header', {}).items()
492 491
493 492 request = util.urlreq.request(pycompat.strurl(href))
494 493 if action == b'upload':
495 494 # If uploading blobs, read data from local blobstore.
496 495 if not localstore.verify(oid):
497 496 raise error.Abort(
498 497 _(b'detected corrupt lfs object: %s') % oid,
499 498 hint=_(b'run hg verify'),
500 499 )
501 500
502 501 for k, v in headers:
503 502 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
504 503
505 504 try:
506 505 if action == b'upload':
507 506 request.data = lfsuploadfile(self.ui, localstore.path(oid))
508 507 request.get_method = lambda: 'PUT'
509 508 request.add_header('Content-Type', 'application/octet-stream')
510 509 request.add_header('Content-Length', request.data.length)
511 510
512 511 with contextlib.closing(self.urlopener.open(request)) as res:
513 512 contentlength = res.info().get(b"content-length")
514 513 ui = self.ui # Shorten debug lines
515 514 if self.ui.debugflag:
516 515 ui.debug(b'Status: %d\n' % res.status)
517 516 # lfs-test-server and hg serve return headers in different
518 517 # order
519 518 headers = pycompat.bytestr(res.info()).strip()
520 519 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
521 520
522 521 if action == b'download':
523 522 # If downloading blobs, store downloaded data to local
524 523 # blobstore
525 524 localstore.download(oid, res, contentlength)
526 525 else:
527 526 blocks = []
528 527 while True:
529 528 data = res.read(1048576)
530 529 if not data:
531 530 break
532 531 blocks.append(data)
533 532
534 533 response = b"".join(blocks)
535 534 if response:
536 535 ui.debug(b'lfs %s response: %s' % (action, response))
537 536 except util.urlerr.httperror as ex:
538 537 if self.ui.debugflag:
539 538 self.ui.debug(
540 539 b'%s: %s\n' % (oid, ex.read())
541 540 ) # XXX: also bytes?
542 541 raise LfsRemoteError(
543 542 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
544 543 % (stringutil.forcebytestr(ex), oid, action)
545 544 )
546 545 except util.urlerr.urlerror as ex:
547 546 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
548 547 util.urllibcompat.getfullurl(request)
549 548 )
550 549 raise LfsRemoteError(
551 550 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
552 551 )
553 552 finally:
554 553 if request.data:
555 554 request.data.close()
556 555
557 556 def _batch(self, pointers, localstore, action):
558 557 if action not in [b'upload', b'download']:
559 558 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
560 559
561 560 response = self._batchrequest(pointers, action)
562 561 objects = self._extractobjects(response, pointers, action)
563 562 total = sum(x.get(b'size', 0) for x in objects)
564 563 sizes = {}
565 564 for obj in objects:
566 565 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
567 566 topic = {
568 567 b'upload': _(b'lfs uploading'),
569 568 b'download': _(b'lfs downloading'),
570 569 }[action]
571 570 if len(objects) > 1:
572 571 self.ui.note(
573 572 _(b'lfs: need to transfer %d objects (%s)\n')
574 573 % (len(objects), util.bytecount(total))
575 574 )
576 575
577 576 def transfer(chunk):
578 577 for obj in chunk:
579 578 objsize = obj.get(b'size', 0)
580 579 if self.ui.verbose:
581 580 if action == b'download':
582 581 msg = _(b'lfs: downloading %s (%s)\n')
583 582 elif action == b'upload':
584 583 msg = _(b'lfs: uploading %s (%s)\n')
585 584 self.ui.note(
586 585 msg % (obj.get(b'oid'), util.bytecount(objsize))
587 586 )
588 587 retry = self.retry
589 588 while True:
590 589 try:
591 590 self._basictransfer(obj, action, localstore)
592 591 yield 1, obj.get(b'oid')
593 592 break
594 593 except socket.error as ex:
595 594 if retry > 0:
596 595 self.ui.note(
597 596 _(b'lfs: failed: %r (remaining retry %d)\n')
598 597 % (stringutil.forcebytestr(ex), retry)
599 598 )
600 599 retry -= 1
601 600 continue
602 601 raise
603 602
604 603 # Until https multiplexing gets sorted out. It's not clear if
605 604 # ConnectionManager.set_ready() is externally synchronized for thread
606 605 # safety with Windows workers.
607 606 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
608 607 # The POSIX workers are forks of this process, so before spinning
609 608 # them up, close all pooled connections. Otherwise, there's no way
610 609 # to coordinate between them about who is using what, and the
611 610 # transfers will get corrupted.
612 611 #
613 612 # TODO: add a function to keepalive.ConnectionManager to mark all
614 613 # ready connections as in use, and roll that back after the fork?
615 614 # That would allow the existing pool of connections in this process
616 615 # to be preserved.
617 616 def prefork():
618 617 for h in self.urlopener.handlers:
619 618 getattr(h, "close_all", lambda: None)()
620 619
621 620 oids = worker.worker(
622 621 self.ui,
623 622 0.1,
624 623 transfer,
625 624 (),
626 625 sorted(objects, key=lambda o: o.get(b'oid')),
627 626 prefork=prefork,
628 627 )
629 628 else:
630 629 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
631 630
632 631 with self.ui.makeprogress(
633 632 topic, unit=_(b"bytes"), total=total
634 633 ) as progress:
635 634 progress.update(0)
636 635 processed = 0
637 636 blobs = 0
638 637 for _one, oid in oids:
639 638 processed += sizes[oid]
640 639 blobs += 1
641 640 progress.update(processed)
642 641 self.ui.note(_(b'lfs: processed: %s\n') % oid)
643 642
644 643 if blobs > 0:
645 644 if action == b'upload':
646 645 self.ui.status(
647 646 _(b'lfs: uploaded %d files (%s)\n')
648 647 % (blobs, util.bytecount(processed))
649 648 )
650 649 elif action == b'download':
651 650 self.ui.status(
652 651 _(b'lfs: downloaded %d files (%s)\n')
653 652 % (blobs, util.bytecount(processed))
654 653 )
655 654
656 655 def __del__(self):
657 656 # copied from mercurial/httppeer.py
658 657 urlopener = getattr(self, 'urlopener', None)
659 658 if urlopener:
660 659 for h in urlopener.handlers:
661 660 h.close()
662 661 getattr(h, "close_all", lambda: None)()
663 662
664 663
665 664 class _dummyremote:
666 665 """Dummy store storing blobs to temp directory."""
667 666
668 667 def __init__(self, repo, url):
669 668 fullpath = repo.vfs.join(b'lfs', url.path)
670 669 self.vfs = lfsvfs(fullpath)
671 670
672 671 def writebatch(self, pointers, fromstore):
673 672 for p in _deduplicate(pointers):
674 673 content = fromstore.read(p.oid(), verify=True)
675 674 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
676 675 fp.write(content)
677 676
678 677 def readbatch(self, pointers, tostore):
679 678 for p in _deduplicate(pointers):
680 679 with self.vfs(p.oid(), b'rb') as fp:
681 680 tostore.download(p.oid(), fp, None)
682 681
683 682
684 683 class _nullremote:
685 684 """Null store storing blobs to /dev/null."""
686 685
687 686 def __init__(self, repo, url):
688 687 pass
689 688
690 689 def writebatch(self, pointers, fromstore):
691 690 pass
692 691
693 692 def readbatch(self, pointers, tostore):
694 693 pass
695 694
696 695
697 696 class _promptremote:
698 697 """Prompt user to set lfs.url when accessed."""
699 698
700 699 def __init__(self, repo, url):
701 700 pass
702 701
703 702 def writebatch(self, pointers, fromstore, ui=None):
704 703 self._prompt()
705 704
706 705 def readbatch(self, pointers, tostore, ui=None):
707 706 self._prompt()
708 707
709 708 def _prompt(self):
710 709 raise error.Abort(_(b'lfs.url needs to be configured'))
711 710
712 711
713 712 _storemap = {
714 713 b'https': _gitlfsremote,
715 714 b'http': _gitlfsremote,
716 715 b'file': _dummyremote,
717 716 b'null': _nullremote,
718 717 None: _promptremote,
719 718 }
720 719
721 720
722 721 def _deduplicate(pointers):
723 722 """Remove any duplicate oids that exist in the list"""
724 723 reduced = util.sortdict()
725 724 for p in pointers:
726 725 reduced[p.oid()] = p
727 726 return reduced.values()
728 727
729 728
730 729 def _verify(oid, content):
731 730 realoid = hex(hashlib.sha256(content).digest())
732 731 if realoid != oid:
733 732 raise LfsCorruptionError(
734 733 _(b'detected corrupt lfs object: %s') % oid,
735 734 hint=_(b'run hg verify'),
736 735 )
737 736
738 737
739 738 def remote(repo, remote=None):
740 739 """remotestore factory. return a store in _storemap depending on config
741 740
742 741 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
743 742 infer the endpoint, based on the remote repository using the same path
744 743 adjustments as git. As an extension, 'http' is supported as well so that
745 744 ``hg serve`` works out of the box.
746 745
747 746 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
748 747 """
749 748 lfsurl = repo.ui.config(b'lfs', b'url')
750 749 url = urlutil.url(lfsurl or b'')
751 750 if lfsurl is None:
752 751 if remote:
753 752 path = remote
754 753 elif hasattr(repo, '_subtoppath'):
755 754 # The pull command sets this during the optional update phase, which
756 755 # tells exactly where the pull originated, whether 'paths.default'
757 756 # or explicit.
758 757 path = repo._subtoppath
759 758 else:
760 759 # TODO: investigate 'paths.remote:lfsurl' style path customization,
761 760 # and fall back to inferring from 'paths.remote' if unspecified.
762 761 path = repo.ui.config(b'paths', b'default') or b''
763 762
764 763 defaulturl = urlutil.url(path)
765 764
766 765 # TODO: support local paths as well.
767 766 # TODO: consider the ssh -> https transformation that git applies
768 767 if defaulturl.scheme in (b'http', b'https'):
769 768 if defaulturl.path and defaulturl.path[:-1] != b'/':
770 769 defaulturl.path += b'/'
771 770 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
772 771
773 772 url = urlutil.url(bytes(defaulturl))
774 773 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
775 774
776 775 scheme = url.scheme
777 776 if scheme not in _storemap:
778 777 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
779 778 return _storemap[scheme](repo, url)
780 779
781 780
782 781 class LfsRemoteError(error.StorageError):
783 782 pass
784 783
785 784
786 785 class LfsCorruptionError(error.Abort):
787 786 """Raised when a corrupt blob is detected, aborting an operation
788 787
789 788 It exists to allow specialized handling on the server side."""
@@ -1,544 +1,540
1 1 # wrapper.py - methods wrapping core mercurial logic
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 import hashlib
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial.node import bin, hex, short
13 from mercurial.pycompat import (
14 getattr,
15 setattr,
16 )
17 13
18 14 from mercurial import (
19 15 bundle2,
20 16 changegroup,
21 17 cmdutil,
22 18 context,
23 19 error,
24 20 exchange,
25 21 exthelper,
26 22 localrepo,
27 23 revlog,
28 24 scmutil,
29 25 vfs as vfsmod,
30 26 wireprotov1server,
31 27 )
32 28
33 29 from mercurial.upgrade_utils import (
34 30 actions as upgrade_actions,
35 31 engine as upgrade_engine,
36 32 )
37 33
38 34 from mercurial.interfaces import repository
39 35
40 36 from mercurial.utils import (
41 37 storageutil,
42 38 stringutil,
43 39 )
44 40
45 41 from ..largefiles import lfutil
46 42
47 43 from . import (
48 44 blobstore,
49 45 pointer,
50 46 )
51 47
52 48 eh = exthelper.exthelper()
53 49
54 50
55 51 @eh.wrapfunction(localrepo, 'makefilestorage')
56 52 def localrepomakefilestorage(orig, requirements, features, **kwargs):
57 53 if b'lfs' in requirements:
58 54 features.add(repository.REPO_FEATURE_LFS)
59 55
60 56 return orig(requirements=requirements, features=features, **kwargs)
61 57
62 58
63 59 @eh.wrapfunction(changegroup, 'allsupportedversions')
64 60 def allsupportedversions(orig, ui):
65 61 versions = orig(ui)
66 62 versions.add(b'03')
67 63 return versions
68 64
69 65
70 66 @eh.wrapfunction(wireprotov1server, '_capabilities')
71 67 def _capabilities(orig, repo, proto):
72 68 '''Wrap server command to announce lfs server capability'''
73 69 caps = orig(repo, proto)
74 70 if hasattr(repo.svfs, 'lfslocalblobstore'):
75 71 # Advertise a slightly different capability when lfs is *required*, so
76 72 # that the client knows it MUST load the extension. If lfs is not
77 73 # required on the server, there's no reason to autoload the extension
78 74 # on the client.
79 75 if b'lfs' in repo.requirements:
80 76 caps.append(b'lfs-serve')
81 77
82 78 caps.append(b'lfs')
83 79 return caps
84 80
85 81
86 82 def bypasscheckhash(self, text):
87 83 return False
88 84
89 85
90 86 def readfromstore(self, text):
91 87 """Read filelog content from local blobstore transform for flagprocessor.
92 88
93 89 Default tranform for flagprocessor, returning contents from blobstore.
94 90 Returns a 2-typle (text, validatehash) where validatehash is True as the
95 91 contents of the blobstore should be checked using checkhash.
96 92 """
97 93 p = pointer.deserialize(text)
98 94 oid = p.oid()
99 95 store = self.opener.lfslocalblobstore
100 96 if not store.has(oid):
101 97 p.filename = self.filename
102 98 self.opener.lfsremoteblobstore.readbatch([p], store)
103 99
104 100 # The caller will validate the content
105 101 text = store.read(oid, verify=False)
106 102
107 103 # pack hg filelog metadata
108 104 hgmeta = {}
109 105 for k in p.keys():
110 106 if k.startswith(b'x-hg-'):
111 107 name = k[len(b'x-hg-') :]
112 108 hgmeta[name] = p[k]
113 109 if hgmeta or text.startswith(b'\1\n'):
114 110 text = storageutil.packmeta(hgmeta, text)
115 111
116 112 return (text, True)
117 113
118 114
119 115 def writetostore(self, text):
120 116 # hg filelog metadata (includes rename, etc)
121 117 hgmeta, offset = storageutil.parsemeta(text)
122 118 if offset and offset > 0:
123 119 # lfs blob does not contain hg filelog metadata
124 120 text = text[offset:]
125 121
126 122 # git-lfs only supports sha256
127 123 oid = hex(hashlib.sha256(text).digest())
128 124 self.opener.lfslocalblobstore.write(oid, text)
129 125
130 126 # replace contents with metadata
131 127 longoid = b'sha256:%s' % oid
132 128 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
133 129
134 130 # by default, we expect the content to be binary. however, LFS could also
135 131 # be used for non-binary content. add a special entry for non-binary data.
136 132 # this will be used by filectx.isbinary().
137 133 if not stringutil.binary(text):
138 134 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
139 135 metadata[b'x-is-binary'] = b'0'
140 136
141 137 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
142 138 if hgmeta is not None:
143 139 for k, v in hgmeta.items():
144 140 metadata[b'x-hg-%s' % k] = v
145 141
146 142 rawtext = metadata.serialize()
147 143 return (rawtext, False)
148 144
149 145
150 146 def _islfs(rlog, node=None, rev=None):
151 147 if rev is None:
152 148 if node is None:
153 149 # both None - likely working copy content where node is not ready
154 150 return False
155 151 rev = rlog.rev(node)
156 152 else:
157 153 node = rlog.node(rev)
158 154 if node == rlog.nullid:
159 155 return False
160 156 flags = rlog.flags(rev)
161 157 return bool(flags & revlog.REVIDX_EXTSTORED)
162 158
163 159
164 160 # Wrapping may also be applied by remotefilelog
165 161 def filelogaddrevision(
166 162 orig,
167 163 self,
168 164 text,
169 165 transaction,
170 166 link,
171 167 p1,
172 168 p2,
173 169 cachedelta=None,
174 170 node=None,
175 171 flags=revlog.REVIDX_DEFAULT_FLAGS,
176 172 **kwds
177 173 ):
178 174 # The matcher isn't available if reposetup() wasn't called.
179 175 lfstrack = self._revlog.opener.options.get(b'lfstrack')
180 176
181 177 if lfstrack:
182 178 textlen = len(text)
183 179 # exclude hg rename meta from file size
184 180 meta, offset = storageutil.parsemeta(text)
185 181 if offset:
186 182 textlen -= offset
187 183
188 184 if lfstrack(self._revlog.filename, textlen):
189 185 flags |= revlog.REVIDX_EXTSTORED
190 186
191 187 return orig(
192 188 self,
193 189 text,
194 190 transaction,
195 191 link,
196 192 p1,
197 193 p2,
198 194 cachedelta=cachedelta,
199 195 node=node,
200 196 flags=flags,
201 197 **kwds
202 198 )
203 199
204 200
205 201 # Wrapping may also be applied by remotefilelog
206 202 def filelogrenamed(orig, self, node):
207 203 if _islfs(self._revlog, node):
208 204 rawtext = self._revlog.rawdata(node)
209 205 if not rawtext:
210 206 return False
211 207 metadata = pointer.deserialize(rawtext)
212 208 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
213 209 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
214 210 else:
215 211 return False
216 212 return orig(self, node)
217 213
218 214
219 215 # Wrapping may also be applied by remotefilelog
220 216 def filelogsize(orig, self, rev):
221 217 if _islfs(self._revlog, rev=rev):
222 218 # fast path: use lfs metadata to answer size
223 219 rawtext = self._revlog.rawdata(rev)
224 220 metadata = pointer.deserialize(rawtext)
225 221 return int(metadata[b'size'])
226 222 return orig(self, rev)
227 223
228 224
229 225 @eh.wrapfunction(revlog, '_verify_revision')
230 226 def _verify_revision(orig, rl, skipflags, state, node):
231 227 if _islfs(rl, node=node):
232 228 rawtext = rl.rawdata(node)
233 229 metadata = pointer.deserialize(rawtext)
234 230
235 231 # Don't skip blobs that are stored locally, as local verification is
236 232 # relatively cheap and there's no other way to verify the raw data in
237 233 # the revlog.
238 234 if rl.opener.lfslocalblobstore.has(metadata.oid()):
239 235 skipflags &= ~revlog.REVIDX_EXTSTORED
240 236 elif skipflags & revlog.REVIDX_EXTSTORED:
241 237 # The wrapped method will set `skipread`, but there's enough local
242 238 # info to check renames.
243 239 state[b'safe_renamed'].add(node)
244 240
245 241 orig(rl, skipflags, state, node)
246 242
247 243
248 244 @eh.wrapfunction(context.basefilectx, 'cmp')
249 245 def filectxcmp(orig, self, fctx):
250 246 """returns True if text is different than fctx"""
251 247 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
252 248 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
253 249 # fast path: check LFS oid
254 250 p1 = pointer.deserialize(self.rawdata())
255 251 p2 = pointer.deserialize(fctx.rawdata())
256 252 return p1.oid() != p2.oid()
257 253 return orig(self, fctx)
258 254
259 255
260 256 @eh.wrapfunction(context.basefilectx, 'isbinary')
261 257 def filectxisbinary(orig, self):
262 258 if self.islfs():
263 259 # fast path: use lfs metadata to answer isbinary
264 260 metadata = pointer.deserialize(self.rawdata())
265 261 # if lfs metadata says nothing, assume it's binary by default
266 262 return bool(int(metadata.get(b'x-is-binary', 1)))
267 263 return orig(self)
268 264
269 265
270 266 def filectxislfs(self):
271 267 return _islfs(self.filelog()._revlog, self.filenode())
272 268
273 269
274 270 @eh.wrapfunction(cmdutil, '_updatecatformatter')
275 271 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
276 272 orig(fm, ctx, matcher, path, decode)
277 273 fm.data(rawdata=ctx[path].rawdata())
278 274
279 275
280 276 @eh.wrapfunction(scmutil, 'wrapconvertsink')
281 277 def convertsink(orig, sink):
282 278 sink = orig(sink)
283 279 if sink.repotype == b'hg':
284 280
285 281 class lfssink(sink.__class__):
286 282 def putcommit(
287 283 self,
288 284 files,
289 285 copies,
290 286 parents,
291 287 commit,
292 288 source,
293 289 revmap,
294 290 full,
295 291 cleanp2,
296 292 ):
297 293 pc = super(lfssink, self).putcommit
298 294 node = pc(
299 295 files,
300 296 copies,
301 297 parents,
302 298 commit,
303 299 source,
304 300 revmap,
305 301 full,
306 302 cleanp2,
307 303 )
308 304
309 305 if b'lfs' not in self.repo.requirements:
310 306 ctx = self.repo[node]
311 307
312 308 # The file list may contain removed files, so check for
313 309 # membership before assuming it is in the context.
314 310 if any(f in ctx and ctx[f].islfs() for f, n in files):
315 311 self.repo.requirements.add(b'lfs')
316 312 scmutil.writereporequirements(self.repo)
317 313
318 314 return node
319 315
320 316 sink.__class__ = lfssink
321 317
322 318 return sink
323 319
324 320
325 321 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
326 322 # options and blob stores are passed from othervfs to the new readonlyvfs.
327 323 @eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
328 324 def vfsinit(orig, self, othervfs):
329 325 orig(self, othervfs)
330 326 # copy lfs related options
331 327 for k, v in othervfs.options.items():
332 328 if k.startswith(b'lfs'):
333 329 self.options[k] = v
334 330 # also copy lfs blobstores. note: this can run before reposetup, so lfs
335 331 # blobstore attributes are not always ready at this time.
336 332 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
337 333 if hasattr(othervfs, name):
338 334 setattr(self, name, getattr(othervfs, name))
339 335
340 336
341 337 def _prefetchfiles(repo, revmatches):
342 338 """Ensure that required LFS blobs are present, fetching them as a group if
343 339 needed."""
344 340 if not hasattr(repo.svfs, 'lfslocalblobstore'):
345 341 return
346 342
347 343 pointers = []
348 344 oids = set()
349 345 localstore = repo.svfs.lfslocalblobstore
350 346
351 347 for rev, match in revmatches:
352 348 ctx = repo[rev]
353 349 for f in ctx.walk(match):
354 350 p = pointerfromctx(ctx, f)
355 351 if p and p.oid() not in oids and not localstore.has(p.oid()):
356 352 p.filename = f
357 353 pointers.append(p)
358 354 oids.add(p.oid())
359 355
360 356 if pointers:
361 357 # Recalculating the repo store here allows 'paths.default' that is set
362 358 # on the repo by a clone command to be used for the update.
363 359 blobstore.remote(repo).readbatch(pointers, localstore)
364 360
365 361
366 362 def _canskipupload(repo):
367 363 # Skip if this hasn't been passed to reposetup()
368 364 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
369 365 return True
370 366
371 367 # if remotestore is a null store, upload is a no-op and can be skipped
372 368 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
373 369
374 370
375 371 def candownload(repo):
376 372 # Skip if this hasn't been passed to reposetup()
377 373 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
378 374 return False
379 375
380 376 # if remotestore is a null store, downloads will lead to nothing
381 377 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
382 378
383 379
384 380 def uploadblobsfromrevs(repo, revs):
385 381 """upload lfs blobs introduced by revs"""
386 382 if _canskipupload(repo):
387 383 return
388 384 pointers = extractpointers(repo, revs)
389 385 uploadblobs(repo, pointers)
390 386
391 387
392 388 def prepush(pushop):
393 389 """Prepush hook.
394 390
395 391 Read through the revisions to push, looking for filelog entries that can be
396 392 deserialized into metadata so that we can block the push on their upload to
397 393 the remote blobstore.
398 394 """
399 395 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
400 396
401 397
402 398 @eh.wrapfunction(exchange, 'push')
403 399 def push(orig, repo, remote, *args, **kwargs):
404 400 """bail on push if the extension isn't enabled on remote when needed, and
405 401 update the remote store based on the destination path."""
406 402 if b'lfs' in repo.requirements:
407 403 # If the remote peer is for a local repo, the requirement tests in the
408 404 # base class method enforce lfs support. Otherwise, some revisions in
409 405 # this repo use lfs, and the remote repo needs the extension loaded.
410 406 if not remote.local() and not remote.capable(b'lfs'):
411 407 # This is a copy of the message in exchange.push() when requirements
412 408 # are missing between local repos.
413 409 m = _(b"required features are not supported in the destination: %s")
414 410 raise error.Abort(
415 411 m % b'lfs', hint=_(b'enable the lfs extension on the server')
416 412 )
417 413
418 414 # Repositories where this extension is disabled won't have the field.
419 415 # But if there's a requirement, then the extension must be loaded AND
420 416 # there may be blobs to push.
421 417 remotestore = repo.svfs.lfsremoteblobstore
422 418 try:
423 419 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
424 420 return orig(repo, remote, *args, **kwargs)
425 421 finally:
426 422 repo.svfs.lfsremoteblobstore = remotestore
427 423 else:
428 424 return orig(repo, remote, *args, **kwargs)
429 425
430 426
431 427 # when writing a bundle via "hg bundle" command, upload related LFS blobs
432 428 @eh.wrapfunction(bundle2, 'writenewbundle')
433 429 def writenewbundle(
434 430 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
435 431 ):
436 432 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
437 433 uploadblobsfromrevs(repo, outgoing.missing)
438 434 return orig(
439 435 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
440 436 )
441 437
442 438
443 439 def extractpointers(repo, revs):
444 440 """return a list of lfs pointers added by given revs"""
445 441 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
446 442 pointers = {}
447 443
448 444 makeprogress = repo.ui.makeprogress
449 445 with makeprogress(
450 446 _(b'lfs search'), _(b'changesets'), len(revs)
451 447 ) as progress:
452 448 for r in revs:
453 449 ctx = repo[r]
454 450 for p in pointersfromctx(ctx).values():
455 451 pointers[p.oid()] = p
456 452 progress.increment()
457 453 return sorted(pointers.values(), key=lambda p: p.oid())
458 454
459 455
460 456 def pointerfromctx(ctx, f, removed=False):
461 457 """return a pointer for the named file from the given changectx, or None if
462 458 the file isn't LFS.
463 459
464 460 Optionally, the pointer for a file deleted from the context can be returned.
465 461 Since no such pointer is actually stored, and to distinguish from a non LFS
466 462 file, this pointer is represented by an empty dict.
467 463 """
468 464 _ctx = ctx
469 465 if f not in ctx:
470 466 if not removed:
471 467 return None
472 468 if f in ctx.p1():
473 469 _ctx = ctx.p1()
474 470 elif f in ctx.p2():
475 471 _ctx = ctx.p2()
476 472 else:
477 473 return None
478 474 fctx = _ctx[f]
479 475 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
480 476 return None
481 477 try:
482 478 p = pointer.deserialize(fctx.rawdata())
483 479 if ctx == _ctx:
484 480 return p
485 481 return {}
486 482 except pointer.InvalidPointer as ex:
487 483 raise error.Abort(
488 484 _(b'lfs: corrupted pointer (%s@%s): %s\n')
489 485 % (f, short(_ctx.node()), ex)
490 486 )
491 487
492 488
493 489 def pointersfromctx(ctx, removed=False):
494 490 """return a dict {path: pointer} for given single changectx.
495 491
496 492 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
497 493 stored for the path is an empty dict.
498 494 """
499 495 result = {}
500 496 m = ctx.repo().narrowmatch()
501 497
502 498 # TODO: consider manifest.fastread() instead
503 499 for f in ctx.files():
504 500 if not m(f):
505 501 continue
506 502 p = pointerfromctx(ctx, f, removed=removed)
507 503 if p is not None:
508 504 result[f] = p
509 505 return result
510 506
511 507
512 508 def uploadblobs(repo, pointers):
513 509 """upload given pointers from local blobstore"""
514 510 if not pointers:
515 511 return
516 512
517 513 remoteblob = repo.svfs.lfsremoteblobstore
518 514 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
519 515
520 516
521 517 @eh.wrapfunction(upgrade_engine, 'finishdatamigration')
522 518 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
523 519 orig(ui, srcrepo, dstrepo, requirements)
524 520
525 521 # Skip if this hasn't been passed to reposetup()
526 522 if hasattr(srcrepo.svfs, 'lfslocalblobstore') and hasattr(
527 523 dstrepo.svfs, 'lfslocalblobstore'
528 524 ):
529 525 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
530 526 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
531 527
532 528 for dirpath, dirs, files in srclfsvfs.walk():
533 529 for oid in files:
534 530 ui.write(_(b'copying lfs blob %s\n') % oid)
535 531 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
536 532
537 533
538 534 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
539 535 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
540 536 def upgraderequirements(orig, repo):
541 537 reqs = orig(repo)
542 538 if b'lfs' in repo.requirements:
543 539 reqs.add(b'lfs')
544 540 return reqs
@@ -1,4303 +1,4301
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help COMMAND` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behavior can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 It may be desirable for mq changesets to be kept in the secret phase (see
42 42 :hg:`help phases`), which can be enabled with the following setting::
43 43
44 44 [mq]
45 45 secret = True
46 46
47 47 You will by default be managing a patch queue named "patches". You can
48 48 create other, independent patch queues with the :hg:`qqueue` command.
49 49
50 50 If the working directory contains uncommitted files, qpush, qpop and
51 51 qgoto abort immediately. If -f/--force is used, the changes are
52 52 discarded. Setting::
53 53
54 54 [mq]
55 55 keepchanges = True
56 56
57 57 make them behave as if --keep-changes were passed, and non-conflicting
58 58 local changes will be tolerated and preserved. If incompatible options
59 59 such as -f/--force or --exact are passed, this setting is ignored.
60 60
61 61 This extension used to provide a strip command. This command now lives
62 62 in the strip extension.
63 63 '''
64 64
65 65
66 66 import os
67 67 import re
68 68 import shutil
69 69 import sys
70 70 from mercurial.i18n import _
71 71 from mercurial.node import (
72 72 bin,
73 73 hex,
74 74 nullrev,
75 75 short,
76 76 )
77 77 from mercurial.pycompat import (
78 delattr,
79 getattr,
80 78 open,
81 79 )
82 80 from mercurial import (
83 81 cmdutil,
84 82 commands,
85 83 encoding,
86 84 error,
87 85 extensions,
88 86 hg,
89 87 localrepo,
90 88 lock as lockmod,
91 89 logcmdutil,
92 90 patch as patchmod,
93 91 phases,
94 92 pycompat,
95 93 registrar,
96 94 revsetlang,
97 95 scmutil,
98 96 smartset,
99 97 strip,
100 98 subrepoutil,
101 99 util,
102 100 vfs as vfsmod,
103 101 )
104 102 from mercurial.utils import (
105 103 dateutil,
106 104 stringutil,
107 105 urlutil,
108 106 )
109 107
110 108 release = lockmod.release
111 109 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
112 110
113 111 cmdtable = {}
114 112 command = registrar.command(cmdtable)
115 113 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
116 114 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 115 # be specifying the version(s) of Mercurial they are tested with, or
118 116 # leave the attribute unspecified.
119 117 testedwith = b'ships-with-hg-core'
120 118
121 119 configtable = {}
122 120 configitem = registrar.configitem(configtable)
123 121
124 122 configitem(
125 123 b'mq',
126 124 b'git',
127 125 default=b'auto',
128 126 )
129 127 configitem(
130 128 b'mq',
131 129 b'keepchanges',
132 130 default=False,
133 131 )
134 132 configitem(
135 133 b'mq',
136 134 b'plain',
137 135 default=False,
138 136 )
139 137 configitem(
140 138 b'mq',
141 139 b'secret',
142 140 default=False,
143 141 )
144 142
145 143 # force load strip extension formerly included in mq and import some utility
146 144 try:
147 145 extensions.find(b'strip')
148 146 except KeyError:
149 147 # note: load is lazy so we could avoid the try-except,
150 148 # but I (marmoute) prefer this explicit code.
151 149 class dummyui:
152 150 def debug(self, msg):
153 151 pass
154 152
155 153 def log(self, event, msgfmt, *msgargs, **opts):
156 154 pass
157 155
158 156 extensions.load(dummyui(), b'strip', b'')
159 157
160 158 strip = strip.strip
161 159
162 160
163 161 def checksubstate(repo, baserev=None):
164 162 """return list of subrepos at a different revision than substate.
165 163 Abort if any subrepos have uncommitted changes."""
166 164 inclsubs = []
167 165 wctx = repo[None]
168 166 if baserev:
169 167 bctx = repo[baserev]
170 168 else:
171 169 bctx = wctx.p1()
172 170 for s in sorted(wctx.substate):
173 171 wctx.sub(s).bailifchanged(True)
174 172 if s not in bctx.substate or bctx.sub(s).dirty():
175 173 inclsubs.append(s)
176 174 return inclsubs
177 175
178 176
179 177 # Patch names looks like unix-file names.
180 178 # They must be joinable with queue directory and result in the patch path.
181 179 normname = util.normpath
182 180
183 181
184 182 class statusentry:
185 183 def __init__(self, node, name):
186 184 self.node, self.name = node, name
187 185
188 186 def __bytes__(self):
189 187 return hex(self.node) + b':' + self.name
190 188
191 189 __str__ = encoding.strmethod(__bytes__)
192 190 __repr__ = encoding.strmethod(__bytes__)
193 191
194 192
195 193 # The order of the headers in 'hg export' HG patches:
196 194 HGHEADERS = [
197 195 # '# HG changeset patch',
198 196 b'# User ',
199 197 b'# Date ',
200 198 b'# ',
201 199 b'# Branch ',
202 200 b'# Node ID ',
203 201 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
204 202 ]
205 203 # The order of headers in plain 'mail style' patches:
206 204 PLAINHEADERS = {
207 205 b'from': 0,
208 206 b'date': 1,
209 207 b'subject': 2,
210 208 }
211 209
212 210
213 211 def inserthgheader(lines, header, value):
214 212 """Assuming lines contains a HG patch header, add a header line with value.
215 213 >>> try: inserthgheader([], b'# Date ', b'z')
216 214 ... except ValueError as inst: print("oops")
217 215 oops
218 216 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
219 217 ['# HG changeset patch', '# Date z']
220 218 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
221 219 ['# HG changeset patch', '# Date z', '']
222 220 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
223 221 ['# HG changeset patch', '# User y', '# Date z']
224 222 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
225 223 ... b'# User ', b'z')
226 224 ['# HG changeset patch', '# Date x', '# User z']
227 225 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
228 226 ['# HG changeset patch', '# Date z']
229 227 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
230 228 ... b'# Date ', b'z')
231 229 ['# HG changeset patch', '# Date z', '', '# Date y']
232 230 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
233 231 ... b'# Date ', b'z')
234 232 ['# HG changeset patch', '# Date z', '# Parent y']
235 233 """
236 234 start = lines.index(b'# HG changeset patch') + 1
237 235 newindex = HGHEADERS.index(header)
238 236 bestpos = len(lines)
239 237 for i in range(start, len(lines)):
240 238 line = lines[i]
241 239 if not line.startswith(b'# '):
242 240 bestpos = min(bestpos, i)
243 241 break
244 242 for lineindex, h in enumerate(HGHEADERS):
245 243 if line.startswith(h):
246 244 if lineindex == newindex:
247 245 lines[i] = header + value
248 246 return lines
249 247 if lineindex > newindex:
250 248 bestpos = min(bestpos, i)
251 249 break # next line
252 250 lines.insert(bestpos, header + value)
253 251 return lines
254 252
255 253
256 254 def insertplainheader(lines, header, value):
257 255 """For lines containing a plain patch header, add a header line with value.
258 256 >>> insertplainheader([], b'Date', b'z')
259 257 ['Date: z']
260 258 >>> insertplainheader([b''], b'Date', b'z')
261 259 ['Date: z', '']
262 260 >>> insertplainheader([b'x'], b'Date', b'z')
263 261 ['Date: z', '', 'x']
264 262 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
265 263 ['From: y', 'Date: z', '', 'x']
266 264 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
267 265 [' date : x', 'From: z', '']
268 266 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
269 267 ['Date: z', '', 'Date: y']
270 268 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
271 269 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
272 270 """
273 271 newprio = PLAINHEADERS[header.lower()]
274 272 bestpos = len(lines)
275 273 for i, line in enumerate(lines):
276 274 if b':' in line:
277 275 lheader = line.split(b':', 1)[0].strip().lower()
278 276 lprio = PLAINHEADERS.get(lheader, newprio + 1)
279 277 if lprio == newprio:
280 278 lines[i] = b'%s: %s' % (header, value)
281 279 return lines
282 280 if lprio > newprio and i < bestpos:
283 281 bestpos = i
284 282 else:
285 283 if line:
286 284 lines.insert(i, b'')
287 285 if i < bestpos:
288 286 bestpos = i
289 287 break
290 288 lines.insert(bestpos, b'%s: %s' % (header, value))
291 289 return lines
292 290
293 291
294 292 class patchheader:
295 293 def __init__(self, pf, plainmode=False):
296 294 def eatdiff(lines):
297 295 while lines:
298 296 l = lines[-1]
299 297 if (
300 298 l.startswith(b"diff -")
301 299 or l.startswith(b"Index:")
302 300 or l.startswith(b"===========")
303 301 ):
304 302 del lines[-1]
305 303 else:
306 304 break
307 305
308 306 def eatempty(lines):
309 307 while lines:
310 308 if not lines[-1].strip():
311 309 del lines[-1]
312 310 else:
313 311 break
314 312
315 313 message = []
316 314 comments = []
317 315 user = None
318 316 date = None
319 317 parent = None
320 318 format = None
321 319 subject = None
322 320 branch = None
323 321 nodeid = None
324 322 diffstart = 0
325 323
326 324 for line in open(pf, b'rb'):
327 325 line = line.rstrip()
328 326 if line.startswith(b'diff --git') or (
329 327 diffstart and line.startswith(b'+++ ')
330 328 ):
331 329 diffstart = 2
332 330 break
333 331 diffstart = 0 # reset
334 332 if line.startswith(b"--- "):
335 333 diffstart = 1
336 334 continue
337 335 elif format == b"hgpatch":
338 336 # parse values when importing the result of an hg export
339 337 if line.startswith(b"# User "):
340 338 user = line[7:]
341 339 elif line.startswith(b"# Date "):
342 340 date = line[7:]
343 341 elif line.startswith(b"# Parent "):
344 342 parent = line[9:].lstrip() # handle double trailing space
345 343 elif line.startswith(b"# Branch "):
346 344 branch = line[9:]
347 345 elif line.startswith(b"# Node ID "):
348 346 nodeid = line[10:]
349 347 elif not line.startswith(b"# ") and line:
350 348 message.append(line)
351 349 format = None
352 350 elif line == b'# HG changeset patch':
353 351 message = []
354 352 format = b"hgpatch"
355 353 elif format != b"tagdone" and (
356 354 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
357 355 ):
358 356 subject = line[9:]
359 357 format = b"tag"
360 358 elif format != b"tagdone" and (
361 359 line.startswith(b"From: ") or line.startswith(b"from: ")
362 360 ):
363 361 user = line[6:]
364 362 format = b"tag"
365 363 elif format != b"tagdone" and (
366 364 line.startswith(b"Date: ") or line.startswith(b"date: ")
367 365 ):
368 366 date = line[6:]
369 367 format = b"tag"
370 368 elif format == b"tag" and line == b"":
371 369 # when looking for tags (subject: from: etc) they
372 370 # end once you find a blank line in the source
373 371 format = b"tagdone"
374 372 elif message or line:
375 373 message.append(line)
376 374 comments.append(line)
377 375
378 376 eatdiff(message)
379 377 eatdiff(comments)
380 378 # Remember the exact starting line of the patch diffs before consuming
381 379 # empty lines, for external use by TortoiseHg and others
382 380 self.diffstartline = len(comments)
383 381 eatempty(message)
384 382 eatempty(comments)
385 383
386 384 # make sure message isn't empty
387 385 if format and format.startswith(b"tag") and subject:
388 386 message.insert(0, subject)
389 387
390 388 self.message = message
391 389 self.comments = comments
392 390 self.user = user
393 391 self.date = date
394 392 self.parent = parent
395 393 # nodeid and branch are for external use by TortoiseHg and others
396 394 self.nodeid = nodeid
397 395 self.branch = branch
398 396 self.haspatch = diffstart > 1
399 397 self.plainmode = (
400 398 plainmode
401 399 or b'# HG changeset patch' not in self.comments
402 400 and any(
403 401 c.startswith(b'Date: ') or c.startswith(b'From: ')
404 402 for c in self.comments
405 403 )
406 404 )
407 405
408 406 def setuser(self, user):
409 407 try:
410 408 inserthgheader(self.comments, b'# User ', user)
411 409 except ValueError:
412 410 if self.plainmode:
413 411 insertplainheader(self.comments, b'From', user)
414 412 else:
415 413 tmp = [b'# HG changeset patch', b'# User ' + user]
416 414 self.comments = tmp + self.comments
417 415 self.user = user
418 416
419 417 def setdate(self, date):
420 418 try:
421 419 inserthgheader(self.comments, b'# Date ', date)
422 420 except ValueError:
423 421 if self.plainmode:
424 422 insertplainheader(self.comments, b'Date', date)
425 423 else:
426 424 tmp = [b'# HG changeset patch', b'# Date ' + date]
427 425 self.comments = tmp + self.comments
428 426 self.date = date
429 427
430 428 def setparent(self, parent):
431 429 try:
432 430 inserthgheader(self.comments, b'# Parent ', parent)
433 431 except ValueError:
434 432 if not self.plainmode:
435 433 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
436 434 self.comments = tmp + self.comments
437 435 self.parent = parent
438 436
439 437 def setmessage(self, message):
440 438 if self.comments:
441 439 self._delmsg()
442 440 self.message = [message]
443 441 if message:
444 442 if self.plainmode and self.comments and self.comments[-1]:
445 443 self.comments.append(b'')
446 444 self.comments.append(message)
447 445
448 446 def __bytes__(self):
449 447 s = b'\n'.join(self.comments).rstrip()
450 448 if not s:
451 449 return b''
452 450 return s + b'\n\n'
453 451
454 452 __str__ = encoding.strmethod(__bytes__)
455 453
456 454 def _delmsg(self):
457 455 """Remove existing message, keeping the rest of the comments fields.
458 456 If comments contains 'subject: ', message will prepend
459 457 the field and a blank line."""
460 458 if self.message:
461 459 subj = b'subject: ' + self.message[0].lower()
462 460 for i in range(len(self.comments)):
463 461 if subj == self.comments[i].lower():
464 462 del self.comments[i]
465 463 self.message = self.message[2:]
466 464 break
467 465 ci = 0
468 466 for mi in self.message:
469 467 while mi != self.comments[ci]:
470 468 ci += 1
471 469 del self.comments[ci]
472 470
473 471
474 472 def newcommit(repo, phase, *args, **kwargs):
475 473 """helper dedicated to ensure a commit respect mq.secret setting
476 474
477 475 It should be used instead of repo.commit inside the mq source for operation
478 476 creating new changeset.
479 477 """
480 478 repo = repo.unfiltered()
481 479 if phase is None:
482 480 if repo.ui.configbool(b'mq', b'secret'):
483 481 phase = phases.secret
484 482 overrides = {(b'ui', b'allowemptycommit'): True}
485 483 if phase is not None:
486 484 overrides[(b'phases', b'new-commit')] = phase
487 485 with repo.ui.configoverride(overrides, b'mq'):
488 486 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
489 487 return repo.commit(*args, **kwargs)
490 488
491 489
492 490 class AbortNoCleanup(error.Abort):
493 491 pass
494 492
495 493
496 494 class queue:
497 495 def __init__(self, ui, baseui, path, patchdir=None):
498 496 self.basepath = path
499 497 try:
500 498 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
501 499 cur = fh.read().rstrip()
502 500
503 501 if not cur:
504 502 curpath = os.path.join(path, b'patches')
505 503 else:
506 504 curpath = os.path.join(path, b'patches-' + cur)
507 505 except IOError:
508 506 curpath = os.path.join(path, b'patches')
509 507 self.path = patchdir or curpath
510 508 self.opener = vfsmod.vfs(self.path)
511 509 self.ui = ui
512 510 self.baseui = baseui
513 511 self.applieddirty = False
514 512 self.seriesdirty = False
515 513 self.added = []
516 514 self.seriespath = b"series"
517 515 self.statuspath = b"status"
518 516 self.guardspath = b"guards"
519 517 self.activeguards = None
520 518 self.guardsdirty = False
521 519 # Handle mq.git as a bool with extended values
522 520 gitmode = ui.config(b'mq', b'git').lower()
523 521 boolmode = stringutil.parsebool(gitmode)
524 522 if boolmode is not None:
525 523 if boolmode:
526 524 gitmode = b'yes'
527 525 else:
528 526 gitmode = b'no'
529 527 self.gitmode = gitmode
530 528 # deprecated config: mq.plain
531 529 self.plainmode = ui.configbool(b'mq', b'plain')
532 530 self.checkapplied = True
533 531
534 532 @util.propertycache
535 533 def applied(self):
536 534 def parselines(lines):
537 535 for l in lines:
538 536 entry = l.split(b':', 1)
539 537 if len(entry) > 1:
540 538 n, name = entry
541 539 yield statusentry(bin(n), name)
542 540 elif l.strip():
543 541 self.ui.warn(
544 542 _(b'malformated mq status line: %s\n')
545 543 % stringutil.pprint(entry)
546 544 )
547 545 # else we ignore empty lines
548 546
549 547 try:
550 548 lines = self.opener.read(self.statuspath).splitlines()
551 549 return list(parselines(lines))
552 550 except FileNotFoundError:
553 551 return []
554 552
555 553 @util.propertycache
556 554 def fullseries(self):
557 555 try:
558 556 return self.opener.read(self.seriespath).splitlines()
559 557 except FileNotFoundError:
560 558 return []
561 559
562 560 @util.propertycache
563 561 def series(self):
564 562 self.parseseries()
565 563 return self.series
566 564
567 565 @util.propertycache
568 566 def seriesguards(self):
569 567 self.parseseries()
570 568 return self.seriesguards
571 569
572 570 def invalidate(self):
573 571 for a in 'applied fullseries series seriesguards'.split():
574 572 if a in self.__dict__:
575 573 delattr(self, a)
576 574 self.applieddirty = False
577 575 self.seriesdirty = False
578 576 self.guardsdirty = False
579 577 self.activeguards = None
580 578
581 579 def diffopts(self, opts=None, patchfn=None, plain=False):
582 580 """Return diff options tweaked for this mq use, possibly upgrading to
583 581 git format, and possibly plain and without lossy options."""
584 582 diffopts = patchmod.difffeatureopts(
585 583 self.ui,
586 584 opts,
587 585 git=True,
588 586 whitespace=not plain,
589 587 formatchanging=not plain,
590 588 )
591 589 if self.gitmode == b'auto':
592 590 diffopts.upgrade = True
593 591 elif self.gitmode == b'keep':
594 592 pass
595 593 elif self.gitmode in (b'yes', b'no'):
596 594 diffopts.git = self.gitmode == b'yes'
597 595 else:
598 596 raise error.Abort(
599 597 _(b'mq.git option can be auto/keep/yes/no got %s')
600 598 % self.gitmode
601 599 )
602 600 if patchfn:
603 601 diffopts = self.patchopts(diffopts, patchfn)
604 602 return diffopts
605 603
606 604 def patchopts(self, diffopts, *patches):
607 605 """Return a copy of input diff options with git set to true if
608 606 referenced patch is a git patch and should be preserved as such.
609 607 """
610 608 diffopts = diffopts.copy()
611 609 if not diffopts.git and self.gitmode == b'keep':
612 610 for patchfn in patches:
613 611 patchf = self.opener(patchfn, b'r')
614 612 # if the patch was a git patch, refresh it as a git patch
615 613 diffopts.git = any(
616 614 line.startswith(b'diff --git') for line in patchf
617 615 )
618 616 patchf.close()
619 617 return diffopts
620 618
621 619 def join(self, *p):
622 620 return os.path.join(self.path, *p)
623 621
624 622 def findseries(self, patch):
625 623 def matchpatch(l):
626 624 l = l.split(b'#', 1)[0]
627 625 return l.strip() == patch
628 626
629 627 for index, l in enumerate(self.fullseries):
630 628 if matchpatch(l):
631 629 return index
632 630 return None
633 631
634 632 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
635 633
636 634 def parseseries(self):
637 635 self.series = []
638 636 self.seriesguards = []
639 637 for l in self.fullseries:
640 638 h = l.find(b'#')
641 639 if h == -1:
642 640 patch = l
643 641 comment = b''
644 642 elif h == 0:
645 643 continue
646 644 else:
647 645 patch = l[:h]
648 646 comment = l[h:]
649 647 patch = patch.strip()
650 648 if patch:
651 649 if patch in self.series:
652 650 raise error.Abort(
653 651 _(b'%s appears more than once in %s')
654 652 % (patch, self.join(self.seriespath))
655 653 )
656 654 self.series.append(patch)
657 655 self.seriesguards.append(self.guard_re.findall(comment))
658 656
659 657 def checkguard(self, guard):
660 658 if not guard:
661 659 return _(b'guard cannot be an empty string')
662 660 bad_chars = b'# \t\r\n\f'
663 661 first = guard[0]
664 662 if first in b'-+':
665 663 return _(b'guard %r starts with invalid character: %r') % (
666 664 guard,
667 665 first,
668 666 )
669 667 for c in bad_chars:
670 668 if c in guard:
671 669 return _(b'invalid character in guard %r: %r') % (guard, c)
672 670
673 671 def setactive(self, guards):
674 672 for guard in guards:
675 673 bad = self.checkguard(guard)
676 674 if bad:
677 675 raise error.Abort(bad)
678 676 guards = sorted(set(guards))
679 677 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
680 678 self.activeguards = guards
681 679 self.guardsdirty = True
682 680
683 681 def active(self):
684 682 if self.activeguards is None:
685 683 self.activeguards = []
686 684 try:
687 685 guards = self.opener.read(self.guardspath).split()
688 686 except FileNotFoundError:
689 687 guards = []
690 688 for i, guard in enumerate(guards):
691 689 bad = self.checkguard(guard)
692 690 if bad:
693 691 self.ui.warn(
694 692 b'%s:%d: %s\n'
695 693 % (self.join(self.guardspath), i + 1, bad)
696 694 )
697 695 else:
698 696 self.activeguards.append(guard)
699 697 return self.activeguards
700 698
701 699 def setguards(self, idx, guards):
702 700 for g in guards:
703 701 if len(g) < 2:
704 702 raise error.Abort(_(b'guard %r too short') % g)
705 703 if g[0] not in b'-+':
706 704 raise error.Abort(_(b'guard %r starts with invalid char') % g)
707 705 bad = self.checkguard(g[1:])
708 706 if bad:
709 707 raise error.Abort(bad)
710 708 drop = self.guard_re.sub(b'', self.fullseries[idx])
711 709 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
712 710 self.parseseries()
713 711 self.seriesdirty = True
714 712
715 713 def pushable(self, idx):
716 714 if isinstance(idx, bytes):
717 715 idx = self.series.index(idx)
718 716 patchguards = self.seriesguards[idx]
719 717 if not patchguards:
720 718 return True, None
721 719 guards = self.active()
722 720 exactneg = [
723 721 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
724 722 ]
725 723 if exactneg:
726 724 return False, stringutil.pprint(exactneg[0])
727 725 pos = [g for g in patchguards if g.startswith(b'+')]
728 726 exactpos = [g for g in pos if g[1:] in guards]
729 727 if pos:
730 728 if exactpos:
731 729 return True, stringutil.pprint(exactpos[0])
732 730 return False, b' '.join([stringutil.pprint(p) for p in pos])
733 731 return True, b''
734 732
735 733 def explainpushable(self, idx, all_patches=False):
736 734 if all_patches:
737 735 write = self.ui.write
738 736 else:
739 737 write = self.ui.warn
740 738
741 739 if all_patches or self.ui.verbose:
742 740 if isinstance(idx, bytes):
743 741 idx = self.series.index(idx)
744 742 pushable, why = self.pushable(idx)
745 743 if all_patches and pushable:
746 744 if why is None:
747 745 write(
748 746 _(b'allowing %s - no guards in effect\n')
749 747 % self.series[idx]
750 748 )
751 749 else:
752 750 if not why:
753 751 write(
754 752 _(b'allowing %s - no matching negative guards\n')
755 753 % self.series[idx]
756 754 )
757 755 else:
758 756 write(
759 757 _(b'allowing %s - guarded by %s\n')
760 758 % (self.series[idx], why)
761 759 )
762 760 if not pushable:
763 761 if why:
764 762 write(
765 763 _(b'skipping %s - guarded by %s\n')
766 764 % (self.series[idx], why)
767 765 )
768 766 else:
769 767 write(
770 768 _(b'skipping %s - no matching guards\n')
771 769 % self.series[idx]
772 770 )
773 771
774 772 def savedirty(self):
775 773 def writelist(items, path):
776 774 fp = self.opener(path, b'wb')
777 775 for i in items:
778 776 fp.write(b"%s\n" % i)
779 777 fp.close()
780 778
781 779 if self.applieddirty:
782 780 writelist(map(bytes, self.applied), self.statuspath)
783 781 self.applieddirty = False
784 782 if self.seriesdirty:
785 783 writelist(self.fullseries, self.seriespath)
786 784 self.seriesdirty = False
787 785 if self.guardsdirty:
788 786 writelist(self.activeguards, self.guardspath)
789 787 self.guardsdirty = False
790 788 if self.added:
791 789 qrepo = self.qrepo()
792 790 if qrepo:
793 791 with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo):
794 792 qrepo[None].add(
795 793 f for f in self.added if f not in qrepo[None]
796 794 )
797 795 self.added = []
798 796
799 797 def removeundo(self, repo):
800 798 undo = repo.sjoin(b'undo')
801 799 if not os.path.exists(undo):
802 800 return
803 801 try:
804 802 os.unlink(undo)
805 803 except OSError as inst:
806 804 self.ui.warn(
807 805 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
808 806 )
809 807
810 808 def backup(self, repo, files, copy=False):
811 809 # backup local changes in --force case
812 810 for f in sorted(files):
813 811 absf = repo.wjoin(f)
814 812 if os.path.lexists(absf):
815 813 absorig = scmutil.backuppath(self.ui, repo, f)
816 814 self.ui.note(
817 815 _(b'saving current version of %s as %s\n')
818 816 % (f, os.path.relpath(absorig))
819 817 )
820 818
821 819 if copy:
822 820 util.copyfile(absf, absorig)
823 821 else:
824 822 util.rename(absf, absorig)
825 823
826 824 def printdiff(
827 825 self,
828 826 repo,
829 827 diffopts,
830 828 node1,
831 829 node2=None,
832 830 files=None,
833 831 fp=None,
834 832 changes=None,
835 833 opts=None,
836 834 ):
837 835 if opts is None:
838 836 opts = {}
839 837 stat = opts.get(b'stat')
840 838 m = scmutil.match(repo[node1], files, opts)
841 839 logcmdutil.diffordiffstat(
842 840 self.ui,
843 841 repo,
844 842 diffopts,
845 843 repo[node1],
846 844 repo[node2],
847 845 m,
848 846 changes,
849 847 stat,
850 848 fp,
851 849 )
852 850
853 851 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
854 852 # first try just applying the patch
855 853 (err, n) = self.apply(
856 854 repo, [patch], update_status=False, strict=True, merge=rev
857 855 )
858 856
859 857 if err == 0:
860 858 return (err, n)
861 859
862 860 if n is None:
863 861 raise error.Abort(_(b"apply failed for patch %s") % patch)
864 862
865 863 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
866 864
867 865 # apply failed, strip away that rev and merge.
868 866 hg.clean(repo, head)
869 867 strip(self.ui, repo, [n], update=False, backup=False)
870 868
871 869 ctx = repo[rev]
872 870 ret = hg.merge(ctx, remind=False)
873 871 if ret:
874 872 raise error.Abort(_(b"update returned %d") % ret)
875 873 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
876 874 if n is None:
877 875 raise error.Abort(_(b"repo commit failed"))
878 876 try:
879 877 ph = patchheader(mergeq.join(patch), self.plainmode)
880 878 except Exception:
881 879 raise error.Abort(_(b"unable to read %s") % patch)
882 880
883 881 diffopts = self.patchopts(diffopts, patch)
884 882 patchf = self.opener(patch, b"w")
885 883 comments = bytes(ph)
886 884 if comments:
887 885 patchf.write(comments)
888 886 self.printdiff(repo, diffopts, head, n, fp=patchf)
889 887 patchf.close()
890 888 self.removeundo(repo)
891 889 return (0, n)
892 890
893 891 def qparents(self, repo, rev=None):
894 892 """return the mq handled parent or p1
895 893
896 894 In some case where mq get himself in being the parent of a merge the
897 895 appropriate parent may be p2.
898 896 (eg: an in progress merge started with mq disabled)
899 897
900 898 If no parent are managed by mq, p1 is returned.
901 899 """
902 900 if rev is None:
903 901 (p1, p2) = repo.dirstate.parents()
904 902 if p2 == repo.nullid:
905 903 return p1
906 904 if not self.applied:
907 905 return None
908 906 return self.applied[-1].node
909 907 p1, p2 = repo.changelog.parents(rev)
910 908 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
911 909 return p2
912 910 return p1
913 911
914 912 def mergepatch(self, repo, mergeq, series, diffopts):
915 913 if not self.applied:
916 914 # each of the patches merged in will have two parents. This
917 915 # can confuse the qrefresh, qdiff, and strip code because it
918 916 # needs to know which parent is actually in the patch queue.
919 917 # so, we insert a merge marker with only one parent. This way
920 918 # the first patch in the queue is never a merge patch
921 919 #
922 920 pname = b".hg.patches.merge.marker"
923 921 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
924 922 self.removeundo(repo)
925 923 self.applied.append(statusentry(n, pname))
926 924 self.applieddirty = True
927 925
928 926 head = self.qparents(repo)
929 927
930 928 for patch in series:
931 929 patch = mergeq.lookup(patch, strict=True)
932 930 if not patch:
933 931 self.ui.warn(_(b"patch %s does not exist\n") % patch)
934 932 return (1, None)
935 933 pushable, reason = self.pushable(patch)
936 934 if not pushable:
937 935 self.explainpushable(patch, all_patches=True)
938 936 continue
939 937 info = mergeq.isapplied(patch)
940 938 if not info:
941 939 self.ui.warn(_(b"patch %s is not applied\n") % patch)
942 940 return (1, None)
943 941 rev = info[1]
944 942 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
945 943 if head:
946 944 self.applied.append(statusentry(head, patch))
947 945 self.applieddirty = True
948 946 if err:
949 947 return (err, head)
950 948 self.savedirty()
951 949 return (0, head)
952 950
953 951 def patch(self, repo, patchfile):
954 952 """Apply patchfile to the working directory.
955 953 patchfile: name of patch file"""
956 954 files = set()
957 955 try:
958 956 fuzz = patchmod.patch(
959 957 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
960 958 )
961 959 return (True, list(files), fuzz)
962 960 except Exception as inst:
963 961 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
964 962 if not self.ui.verbose:
965 963 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
966 964 self.ui.traceback()
967 965 return (False, list(files), False)
968 966
969 967 def apply(
970 968 self,
971 969 repo,
972 970 series,
973 971 list=False,
974 972 update_status=True,
975 973 strict=False,
976 974 patchdir=None,
977 975 merge=None,
978 976 all_files=None,
979 977 tobackup=None,
980 978 keepchanges=False,
981 979 ):
982 980 wlock = lock = tr = None
983 981 try:
984 982 wlock = repo.wlock()
985 983 lock = repo.lock()
986 984 tr = repo.transaction(b"qpush")
987 985 try:
988 986 ret = self._apply(
989 987 repo,
990 988 series,
991 989 list,
992 990 update_status,
993 991 strict,
994 992 patchdir,
995 993 merge,
996 994 all_files=all_files,
997 995 tobackup=tobackup,
998 996 keepchanges=keepchanges,
999 997 )
1000 998 tr.close()
1001 999 self.savedirty()
1002 1000 return ret
1003 1001 except AbortNoCleanup:
1004 1002 tr.close()
1005 1003 self.savedirty()
1006 1004 raise
1007 1005 except: # re-raises
1008 1006 try:
1009 1007 tr.abort()
1010 1008 finally:
1011 1009 self.invalidate()
1012 1010 raise
1013 1011 finally:
1014 1012 release(tr, lock, wlock)
1015 1013 self.removeundo(repo)
1016 1014
1017 1015 def _apply(
1018 1016 self,
1019 1017 repo,
1020 1018 series,
1021 1019 list=False,
1022 1020 update_status=True,
1023 1021 strict=False,
1024 1022 patchdir=None,
1025 1023 merge=None,
1026 1024 all_files=None,
1027 1025 tobackup=None,
1028 1026 keepchanges=False,
1029 1027 ):
1030 1028 """returns (error, hash)
1031 1029
1032 1030 error = 1 for unable to read, 2 for patch failed, 3 for patch
1033 1031 fuzz. tobackup is None or a set of files to backup before they
1034 1032 are modified by a patch.
1035 1033 """
1036 1034 # TODO unify with commands.py
1037 1035 if not patchdir:
1038 1036 patchdir = self.path
1039 1037 err = 0
1040 1038 n = None
1041 1039 for patchname in series:
1042 1040 pushable, reason = self.pushable(patchname)
1043 1041 if not pushable:
1044 1042 self.explainpushable(patchname, all_patches=True)
1045 1043 continue
1046 1044 self.ui.status(_(b"applying %s\n") % patchname)
1047 1045 pf = os.path.join(patchdir, patchname)
1048 1046
1049 1047 try:
1050 1048 ph = patchheader(self.join(patchname), self.plainmode)
1051 1049 except IOError:
1052 1050 self.ui.warn(_(b"unable to read %s\n") % patchname)
1053 1051 err = 1
1054 1052 break
1055 1053
1056 1054 message = ph.message
1057 1055 if not message:
1058 1056 # The commit message should not be translated
1059 1057 message = b"imported patch %s\n" % patchname
1060 1058 else:
1061 1059 if list:
1062 1060 # The commit message should not be translated
1063 1061 message.append(b"\nimported patch %s" % patchname)
1064 1062 message = b'\n'.join(message)
1065 1063
1066 1064 if ph.haspatch:
1067 1065 if tobackup:
1068 1066 touched = patchmod.changedfiles(self.ui, repo, pf)
1069 1067 touched = set(touched) & tobackup
1070 1068 if touched and keepchanges:
1071 1069 raise AbortNoCleanup(
1072 1070 _(b"conflicting local changes found"),
1073 1071 hint=_(b"did you forget to qrefresh?"),
1074 1072 )
1075 1073 self.backup(repo, touched, copy=True)
1076 1074 tobackup = tobackup - touched
1077 1075 (patcherr, files, fuzz) = self.patch(repo, pf)
1078 1076 if all_files is not None:
1079 1077 all_files.update(files)
1080 1078 patcherr = not patcherr
1081 1079 else:
1082 1080 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1083 1081 patcherr, files, fuzz = 0, [], 0
1084 1082
1085 1083 if merge and files:
1086 1084 # Mark as removed/merged and update dirstate parent info
1087 1085 with repo.dirstate.changing_parents(repo):
1088 1086 for f in files:
1089 1087 repo.dirstate.update_file_p1(f, p1_tracked=True)
1090 1088 p1 = repo.dirstate.p1()
1091 1089 repo.setparents(p1, merge)
1092 1090
1093 1091 if all_files and b'.hgsubstate' in all_files:
1094 1092 wctx = repo[None]
1095 1093 pctx = repo[b'.']
1096 1094 overwrite = False
1097 1095 mergedsubstate = subrepoutil.submerge(
1098 1096 repo, pctx, wctx, wctx, overwrite
1099 1097 )
1100 1098 files += mergedsubstate.keys()
1101 1099
1102 1100 match = scmutil.matchfiles(repo, files or [])
1103 1101 oldtip = repo.changelog.tip()
1104 1102 n = newcommit(
1105 1103 repo, None, message, ph.user, ph.date, match=match, force=True
1106 1104 )
1107 1105 if repo.changelog.tip() == oldtip:
1108 1106 raise error.Abort(
1109 1107 _(b"qpush exactly duplicates child changeset")
1110 1108 )
1111 1109 if n is None:
1112 1110 raise error.Abort(_(b"repository commit failed"))
1113 1111
1114 1112 if update_status:
1115 1113 self.applied.append(statusentry(n, patchname))
1116 1114
1117 1115 if patcherr:
1118 1116 self.ui.warn(
1119 1117 _(b"patch failed, rejects left in working directory\n")
1120 1118 )
1121 1119 err = 2
1122 1120 break
1123 1121
1124 1122 if fuzz and strict:
1125 1123 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1126 1124 err = 3
1127 1125 break
1128 1126 return (err, n)
1129 1127
1130 1128 def _cleanup(self, patches, numrevs, keep=False):
1131 1129 if not keep:
1132 1130 r = self.qrepo()
1133 1131 if r:
1134 1132 with r.wlock(), r.dirstate.changing_files(r):
1135 1133 r[None].forget(patches)
1136 1134 for p in patches:
1137 1135 try:
1138 1136 os.unlink(self.join(p))
1139 1137 except FileNotFoundError:
1140 1138 pass
1141 1139
1142 1140 qfinished = []
1143 1141 if numrevs:
1144 1142 qfinished = self.applied[:numrevs]
1145 1143 del self.applied[:numrevs]
1146 1144 self.applieddirty = True
1147 1145
1148 1146 unknown = []
1149 1147
1150 1148 sortedseries = []
1151 1149 for p in patches:
1152 1150 idx = self.findseries(p)
1153 1151 if idx is None:
1154 1152 sortedseries.append((-1, p))
1155 1153 else:
1156 1154 sortedseries.append((idx, p))
1157 1155
1158 1156 sortedseries.sort(reverse=True)
1159 1157 for i, p in sortedseries:
1160 1158 if i != -1:
1161 1159 del self.fullseries[i]
1162 1160 else:
1163 1161 unknown.append(p)
1164 1162
1165 1163 if unknown:
1166 1164 if numrevs:
1167 1165 rev = {entry.name: entry.node for entry in qfinished}
1168 1166 for p in unknown:
1169 1167 msg = _(b'revision %s refers to unknown patches: %s\n')
1170 1168 self.ui.warn(msg % (short(rev[p]), p))
1171 1169 else:
1172 1170 msg = _(b'unknown patches: %s\n')
1173 1171 raise error.Abort(b''.join(msg % p for p in unknown))
1174 1172
1175 1173 self.parseseries()
1176 1174 self.seriesdirty = True
1177 1175 return [entry.node for entry in qfinished]
1178 1176
1179 1177 def _revpatches(self, repo, revs):
1180 1178 firstrev = repo[self.applied[0].node].rev()
1181 1179 patches = []
1182 1180 for i, rev in enumerate(revs):
1183 1181 if rev < firstrev:
1184 1182 raise error.Abort(_(b'revision %d is not managed') % rev)
1185 1183
1186 1184 ctx = repo[rev]
1187 1185 base = self.applied[i].node
1188 1186 if ctx.node() != base:
1189 1187 msg = _(b'cannot delete revision %d above applied patches')
1190 1188 raise error.Abort(msg % rev)
1191 1189
1192 1190 patch = self.applied[i].name
1193 1191 for fmt in (b'[mq]: %s', b'imported patch %s'):
1194 1192 if ctx.description() == fmt % patch:
1195 1193 msg = _(b'patch %s finalized without changeset message\n')
1196 1194 repo.ui.status(msg % patch)
1197 1195 break
1198 1196
1199 1197 patches.append(patch)
1200 1198 return patches
1201 1199
1202 1200 def finish(self, repo, revs):
1203 1201 # Manually trigger phase computation to ensure phasedefaults is
1204 1202 # executed before we remove the patches.
1205 1203 repo._phasecache
1206 1204 patches = self._revpatches(repo, sorted(revs))
1207 1205 qfinished = self._cleanup(patches, len(patches))
1208 1206 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1209 1207 # only use this logic when the secret option is added
1210 1208 oldqbase = repo[qfinished[0]]
1211 1209 tphase = phases.newcommitphase(repo.ui)
1212 1210 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1213 1211 with repo.transaction(b'qfinish') as tr:
1214 1212 phases.advanceboundary(repo, tr, tphase, qfinished)
1215 1213
1216 1214 def delete(self, repo, patches, opts):
1217 1215 if not patches and not opts.get(b'rev'):
1218 1216 raise error.Abort(
1219 1217 _(b'qdelete requires at least one revision or patch name')
1220 1218 )
1221 1219
1222 1220 realpatches = []
1223 1221 for patch in patches:
1224 1222 patch = self.lookup(patch, strict=True)
1225 1223 info = self.isapplied(patch)
1226 1224 if info:
1227 1225 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1228 1226 if patch not in self.series:
1229 1227 raise error.Abort(_(b"patch %s not in series file") % patch)
1230 1228 if patch not in realpatches:
1231 1229 realpatches.append(patch)
1232 1230
1233 1231 numrevs = 0
1234 1232 if opts.get(b'rev'):
1235 1233 if not self.applied:
1236 1234 raise error.Abort(_(b'no patches applied'))
1237 1235 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1238 1236 revs.sort()
1239 1237 revpatches = self._revpatches(repo, revs)
1240 1238 realpatches += revpatches
1241 1239 numrevs = len(revpatches)
1242 1240
1243 1241 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1244 1242
1245 1243 def checktoppatch(self, repo):
1246 1244 '''check that working directory is at qtip'''
1247 1245 if self.applied:
1248 1246 top = self.applied[-1].node
1249 1247 patch = self.applied[-1].name
1250 1248 if repo.dirstate.p1() != top:
1251 1249 raise error.Abort(_(b"working directory revision is not qtip"))
1252 1250 return top, patch
1253 1251 return None, None
1254 1252
1255 1253 def putsubstate2changes(self, substatestate, changes):
1256 1254 if isinstance(changes, list):
1257 1255 mar = changes[:3]
1258 1256 else:
1259 1257 mar = (changes.modified, changes.added, changes.removed)
1260 1258 if any((b'.hgsubstate' in files for files in mar)):
1261 1259 return # already listed up
1262 1260 # not yet listed up
1263 1261 if substatestate.added or not substatestate.any_tracked:
1264 1262 mar[1].append(b'.hgsubstate')
1265 1263 elif substatestate.removed:
1266 1264 mar[2].append(b'.hgsubstate')
1267 1265 else: # modified
1268 1266 mar[0].append(b'.hgsubstate')
1269 1267
1270 1268 def checklocalchanges(self, repo, force=False, refresh=True):
1271 1269 excsuffix = b''
1272 1270 if refresh:
1273 1271 excsuffix = b', qrefresh first'
1274 1272 # plain versions for i18n tool to detect them
1275 1273 _(b"local changes found, qrefresh first")
1276 1274 _(b"local changed subrepos found, qrefresh first")
1277 1275
1278 1276 s = repo.status()
1279 1277 if not force:
1280 1278 cmdutil.checkunfinished(repo)
1281 1279 if s.modified or s.added or s.removed or s.deleted:
1282 1280 _(b"local changes found") # i18n tool detection
1283 1281 raise error.Abort(_(b"local changes found" + excsuffix))
1284 1282 if checksubstate(repo):
1285 1283 _(b"local changed subrepos found") # i18n tool detection
1286 1284 raise error.Abort(
1287 1285 _(b"local changed subrepos found" + excsuffix)
1288 1286 )
1289 1287 else:
1290 1288 cmdutil.checkunfinished(repo, skipmerge=True)
1291 1289 return s
1292 1290
1293 1291 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1294 1292
1295 1293 def checkreservedname(self, name):
1296 1294 if name in self._reserved:
1297 1295 raise error.Abort(
1298 1296 _(b'"%s" cannot be used as the name of a patch') % name
1299 1297 )
1300 1298 if name != name.strip():
1301 1299 # whitespace is stripped by parseseries()
1302 1300 raise error.Abort(
1303 1301 _(b'patch name cannot begin or end with whitespace')
1304 1302 )
1305 1303 for prefix in (b'.hg', b'.mq'):
1306 1304 if name.startswith(prefix):
1307 1305 raise error.Abort(
1308 1306 _(b'patch name cannot begin with "%s"') % prefix
1309 1307 )
1310 1308 for c in (b'#', b':', b'\r', b'\n'):
1311 1309 if c in name:
1312 1310 raise error.Abort(
1313 1311 _(b'%r cannot be used in the name of a patch')
1314 1312 % pycompat.bytestr(c)
1315 1313 )
1316 1314
1317 1315 def checkpatchname(self, name, force=False):
1318 1316 self.checkreservedname(name)
1319 1317 if not force and os.path.exists(self.join(name)):
1320 1318 if os.path.isdir(self.join(name)):
1321 1319 raise error.Abort(
1322 1320 _(b'"%s" already exists as a directory') % name
1323 1321 )
1324 1322 else:
1325 1323 raise error.Abort(_(b'patch "%s" already exists') % name)
1326 1324
1327 1325 def makepatchname(self, title, fallbackname):
1328 1326 """Return a suitable filename for title, adding a suffix to make
1329 1327 it unique in the existing list"""
1330 1328 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1331 1329 namebase = namebase[:75] # avoid too long name (issue5117)
1332 1330 if namebase:
1333 1331 try:
1334 1332 self.checkreservedname(namebase)
1335 1333 except error.Abort:
1336 1334 namebase = fallbackname
1337 1335 else:
1338 1336 namebase = fallbackname
1339 1337 name = namebase
1340 1338 i = 0
1341 1339 while True:
1342 1340 if name not in self.fullseries:
1343 1341 try:
1344 1342 self.checkpatchname(name)
1345 1343 break
1346 1344 except error.Abort:
1347 1345 pass
1348 1346 i += 1
1349 1347 name = b'%s__%d' % (namebase, i)
1350 1348 return name
1351 1349
1352 1350 def checkkeepchanges(self, keepchanges, force):
1353 1351 if force and keepchanges:
1354 1352 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1355 1353
1356 1354 def new(self, repo, patchfn, *pats, **opts):
1357 1355 """options:
1358 1356 msg: a string or a no-argument function returning a string
1359 1357 """
1360 1358 opts = pycompat.byteskwargs(opts)
1361 1359 msg = opts.get(b'msg')
1362 1360 edit = opts.get(b'edit')
1363 1361 editform = opts.get(b'editform', b'mq.qnew')
1364 1362 user = opts.get(b'user')
1365 1363 date = opts.get(b'date')
1366 1364 if date:
1367 1365 date = dateutil.parsedate(date)
1368 1366 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1369 1367 if opts.get(b'checkname', True):
1370 1368 self.checkpatchname(patchfn)
1371 1369 inclsubs = checksubstate(repo)
1372 1370 if inclsubs:
1373 1371 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1374 1372 if opts.get(b'include') or opts.get(b'exclude') or pats:
1375 1373 # detect missing files in pats
1376 1374 def badfn(f, msg):
1377 1375 if f != b'.hgsubstate': # .hgsubstate is auto-created
1378 1376 raise error.Abort(b'%s: %s' % (f, msg))
1379 1377
1380 1378 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1381 1379 changes = repo.status(match=match)
1382 1380 else:
1383 1381 changes = self.checklocalchanges(repo, force=True)
1384 1382 commitfiles = list(inclsubs)
1385 1383 commitfiles.extend(changes.modified)
1386 1384 commitfiles.extend(changes.added)
1387 1385 commitfiles.extend(changes.removed)
1388 1386 match = scmutil.matchfiles(repo, commitfiles)
1389 1387 if len(repo[None].parents()) > 1:
1390 1388 raise error.Abort(_(b'cannot manage merge changesets'))
1391 1389 self.checktoppatch(repo)
1392 1390 insert = self.fullseriesend()
1393 1391 with repo.wlock():
1394 1392 try:
1395 1393 # if patch file write fails, abort early
1396 1394 p = self.opener(patchfn, b"w")
1397 1395 except IOError as e:
1398 1396 raise error.Abort(
1399 1397 _(b'cannot write patch "%s": %s')
1400 1398 % (patchfn, encoding.strtolocal(e.strerror))
1401 1399 )
1402 1400 try:
1403 1401 defaultmsg = b"[mq]: %s" % patchfn
1404 1402 editor = cmdutil.getcommiteditor(editform=editform)
1405 1403 if edit:
1406 1404
1407 1405 def finishdesc(desc):
1408 1406 if desc.rstrip():
1409 1407 return desc
1410 1408 else:
1411 1409 return defaultmsg
1412 1410
1413 1411 # i18n: this message is shown in editor with "HG: " prefix
1414 1412 extramsg = _(b'Leave message empty to use default message.')
1415 1413 editor = cmdutil.getcommiteditor(
1416 1414 finishdesc=finishdesc,
1417 1415 extramsg=extramsg,
1418 1416 editform=editform,
1419 1417 )
1420 1418 commitmsg = msg
1421 1419 else:
1422 1420 commitmsg = msg or defaultmsg
1423 1421
1424 1422 n = newcommit(
1425 1423 repo,
1426 1424 None,
1427 1425 commitmsg,
1428 1426 user,
1429 1427 date,
1430 1428 match=match,
1431 1429 force=True,
1432 1430 editor=editor,
1433 1431 )
1434 1432 if n is None:
1435 1433 raise error.Abort(_(b"repo commit failed"))
1436 1434 try:
1437 1435 self.fullseries[insert:insert] = [patchfn]
1438 1436 self.applied.append(statusentry(n, patchfn))
1439 1437 self.parseseries()
1440 1438 self.seriesdirty = True
1441 1439 self.applieddirty = True
1442 1440 nctx = repo[n]
1443 1441 ph = patchheader(self.join(patchfn), self.plainmode)
1444 1442 if user:
1445 1443 ph.setuser(user)
1446 1444 if date:
1447 1445 ph.setdate(b'%d %d' % date)
1448 1446 ph.setparent(hex(nctx.p1().node()))
1449 1447 msg = nctx.description().strip()
1450 1448 if msg == defaultmsg.strip():
1451 1449 msg = b''
1452 1450 ph.setmessage(msg)
1453 1451 p.write(bytes(ph))
1454 1452 if commitfiles:
1455 1453 parent = self.qparents(repo, n)
1456 1454 if inclsubs:
1457 1455 self.putsubstate2changes(substatestate, changes)
1458 1456 chunks = patchmod.diff(
1459 1457 repo,
1460 1458 node1=parent,
1461 1459 node2=n,
1462 1460 changes=changes,
1463 1461 opts=diffopts,
1464 1462 )
1465 1463 for chunk in chunks:
1466 1464 p.write(chunk)
1467 1465 p.close()
1468 1466 r = self.qrepo()
1469 1467 if r:
1470 1468 with r.wlock(), r.dirstate.changing_files(r):
1471 1469 r[None].add([patchfn])
1472 1470 except: # re-raises
1473 1471 repo.rollback()
1474 1472 raise
1475 1473 except Exception:
1476 1474 patchpath = self.join(patchfn)
1477 1475 try:
1478 1476 os.unlink(patchpath)
1479 1477 except OSError:
1480 1478 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1481 1479 raise
1482 1480 self.removeundo(repo)
1483 1481
1484 1482 def isapplied(self, patch):
1485 1483 """returns (index, rev, patch)"""
1486 1484 for i, a in enumerate(self.applied):
1487 1485 if a.name == patch:
1488 1486 return (i, a.node, a.name)
1489 1487 return None
1490 1488
1491 1489 # if the exact patch name does not exist, we try a few
1492 1490 # variations. If strict is passed, we try only #1
1493 1491 #
1494 1492 # 1) a number (as string) to indicate an offset in the series file
1495 1493 # 2) a unique substring of the patch name was given
1496 1494 # 3) patchname[-+]num to indicate an offset in the series file
1497 1495 def lookup(self, patch, strict=False):
1498 1496 def partialname(s):
1499 1497 if s in self.series:
1500 1498 return s
1501 1499 matches = [x for x in self.series if s in x]
1502 1500 if len(matches) > 1:
1503 1501 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1504 1502 for m in matches:
1505 1503 self.ui.warn(b' %s\n' % m)
1506 1504 return None
1507 1505 if matches:
1508 1506 return matches[0]
1509 1507 if self.series and self.applied:
1510 1508 if s == b'qtip':
1511 1509 return self.series[self.seriesend(True) - 1]
1512 1510 if s == b'qbase':
1513 1511 return self.series[0]
1514 1512 return None
1515 1513
1516 1514 if patch in self.series:
1517 1515 return patch
1518 1516
1519 1517 if not os.path.isfile(self.join(patch)):
1520 1518 try:
1521 1519 sno = int(patch)
1522 1520 except (ValueError, OverflowError):
1523 1521 pass
1524 1522 else:
1525 1523 if -len(self.series) <= sno < len(self.series):
1526 1524 return self.series[sno]
1527 1525
1528 1526 if not strict:
1529 1527 res = partialname(patch)
1530 1528 if res:
1531 1529 return res
1532 1530 minus = patch.rfind(b'-')
1533 1531 if minus >= 0:
1534 1532 res = partialname(patch[:minus])
1535 1533 if res:
1536 1534 i = self.series.index(res)
1537 1535 try:
1538 1536 off = int(patch[minus + 1 :] or 1)
1539 1537 except (ValueError, OverflowError):
1540 1538 pass
1541 1539 else:
1542 1540 if i - off >= 0:
1543 1541 return self.series[i - off]
1544 1542 plus = patch.rfind(b'+')
1545 1543 if plus >= 0:
1546 1544 res = partialname(patch[:plus])
1547 1545 if res:
1548 1546 i = self.series.index(res)
1549 1547 try:
1550 1548 off = int(patch[plus + 1 :] or 1)
1551 1549 except (ValueError, OverflowError):
1552 1550 pass
1553 1551 else:
1554 1552 if i + off < len(self.series):
1555 1553 return self.series[i + off]
1556 1554 raise error.Abort(_(b"patch %s not in series") % patch)
1557 1555
1558 1556 def push(
1559 1557 self,
1560 1558 repo,
1561 1559 patch=None,
1562 1560 force=False,
1563 1561 list=False,
1564 1562 mergeq=None,
1565 1563 all=False,
1566 1564 move=False,
1567 1565 exact=False,
1568 1566 nobackup=False,
1569 1567 keepchanges=False,
1570 1568 ):
1571 1569 self.checkkeepchanges(keepchanges, force)
1572 1570 diffopts = self.diffopts()
1573 1571 with repo.wlock():
1574 1572 heads = []
1575 1573 for hs in repo.branchmap().iterheads():
1576 1574 heads.extend(hs)
1577 1575 if not heads:
1578 1576 heads = [repo.nullid]
1579 1577 if repo.dirstate.p1() not in heads and not exact:
1580 1578 self.ui.status(_(b"(working directory not at a head)\n"))
1581 1579
1582 1580 if not self.series:
1583 1581 self.ui.warn(_(b'no patches in series\n'))
1584 1582 return 0
1585 1583
1586 1584 # Suppose our series file is: A B C and the current 'top'
1587 1585 # patch is B. qpush C should be performed (moving forward)
1588 1586 # qpush B is a NOP (no change) qpush A is an error (can't
1589 1587 # go backwards with qpush)
1590 1588 if patch:
1591 1589 patch = self.lookup(patch)
1592 1590 info = self.isapplied(patch)
1593 1591 if info and info[0] >= len(self.applied) - 1:
1594 1592 self.ui.warn(
1595 1593 _(b'qpush: %s is already at the top\n') % patch
1596 1594 )
1597 1595 return 0
1598 1596
1599 1597 pushable, reason = self.pushable(patch)
1600 1598 if pushable:
1601 1599 if self.series.index(patch) < self.seriesend():
1602 1600 raise error.Abort(
1603 1601 _(b"cannot push to a previous patch: %s") % patch
1604 1602 )
1605 1603 else:
1606 1604 if reason:
1607 1605 reason = _(b'guarded by %s') % reason
1608 1606 else:
1609 1607 reason = _(b'no matching guards')
1610 1608 self.ui.warn(
1611 1609 _(b"cannot push '%s' - %s\n") % (patch, reason)
1612 1610 )
1613 1611 return 1
1614 1612 elif all:
1615 1613 patch = self.series[-1]
1616 1614 if self.isapplied(patch):
1617 1615 self.ui.warn(_(b'all patches are currently applied\n'))
1618 1616 return 0
1619 1617
1620 1618 # Following the above example, starting at 'top' of B:
1621 1619 # qpush should be performed (pushes C), but a subsequent
1622 1620 # qpush without an argument is an error (nothing to
1623 1621 # apply). This allows a loop of "...while hg qpush..." to
1624 1622 # work as it detects an error when done
1625 1623 start = self.seriesend()
1626 1624 if start == len(self.series):
1627 1625 self.ui.warn(_(b'patch series already fully applied\n'))
1628 1626 return 1
1629 1627 if not force and not keepchanges:
1630 1628 self.checklocalchanges(repo, refresh=self.applied)
1631 1629
1632 1630 if exact:
1633 1631 if keepchanges:
1634 1632 raise error.Abort(
1635 1633 _(b"cannot use --exact and --keep-changes together")
1636 1634 )
1637 1635 if move:
1638 1636 raise error.Abort(
1639 1637 _(b'cannot use --exact and --move together')
1640 1638 )
1641 1639 if self.applied:
1642 1640 raise error.Abort(
1643 1641 _(b'cannot push --exact with applied patches')
1644 1642 )
1645 1643 root = self.series[start]
1646 1644 target = patchheader(self.join(root), self.plainmode).parent
1647 1645 if not target:
1648 1646 raise error.Abort(
1649 1647 _(b"%s does not have a parent recorded") % root
1650 1648 )
1651 1649 if not repo[target] == repo[b'.']:
1652 1650 hg.update(repo, target)
1653 1651
1654 1652 if move:
1655 1653 if not patch:
1656 1654 raise error.Abort(_(b"please specify the patch to move"))
1657 1655 for fullstart, rpn in enumerate(self.fullseries):
1658 1656 # strip markers for patch guards
1659 1657 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1660 1658 break
1661 1659 for i, rpn in enumerate(self.fullseries[fullstart:]):
1662 1660 # strip markers for patch guards
1663 1661 if self.guard_re.split(rpn, 1)[0] == patch:
1664 1662 break
1665 1663 index = fullstart + i
1666 1664 assert index < len(self.fullseries)
1667 1665 fullpatch = self.fullseries[index]
1668 1666 del self.fullseries[index]
1669 1667 self.fullseries.insert(fullstart, fullpatch)
1670 1668 self.parseseries()
1671 1669 self.seriesdirty = True
1672 1670
1673 1671 self.applieddirty = True
1674 1672 if start > 0:
1675 1673 self.checktoppatch(repo)
1676 1674 if not patch:
1677 1675 patch = self.series[start]
1678 1676 end = start + 1
1679 1677 else:
1680 1678 end = self.series.index(patch, start) + 1
1681 1679
1682 1680 tobackup = set()
1683 1681 if (not nobackup and force) or keepchanges:
1684 1682 status = self.checklocalchanges(repo, force=True)
1685 1683 if keepchanges:
1686 1684 tobackup.update(
1687 1685 status.modified
1688 1686 + status.added
1689 1687 + status.removed
1690 1688 + status.deleted
1691 1689 )
1692 1690 else:
1693 1691 tobackup.update(status.modified + status.added)
1694 1692
1695 1693 s = self.series[start:end]
1696 1694 all_files = set()
1697 1695 try:
1698 1696 if mergeq:
1699 1697 ret = self.mergepatch(repo, mergeq, s, diffopts)
1700 1698 else:
1701 1699 ret = self.apply(
1702 1700 repo,
1703 1701 s,
1704 1702 list,
1705 1703 all_files=all_files,
1706 1704 tobackup=tobackup,
1707 1705 keepchanges=keepchanges,
1708 1706 )
1709 1707 except AbortNoCleanup:
1710 1708 raise
1711 1709 except: # re-raises
1712 1710 self.ui.warn(_(b'cleaning up working directory...\n'))
1713 1711 cmdutil.revert(
1714 1712 self.ui,
1715 1713 repo,
1716 1714 repo[b'.'],
1717 1715 no_backup=True,
1718 1716 )
1719 1717 # only remove unknown files that we know we touched or
1720 1718 # created while patching
1721 1719 for f in all_files:
1722 1720 if f not in repo.dirstate:
1723 1721 repo.wvfs.unlinkpath(f, ignoremissing=True)
1724 1722 self.ui.warn(_(b'done\n'))
1725 1723 raise
1726 1724
1727 1725 if not self.applied:
1728 1726 return ret[0]
1729 1727 top = self.applied[-1].name
1730 1728 if ret[0] and ret[0] > 1:
1731 1729 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1732 1730 self.ui.write(msg % top)
1733 1731 else:
1734 1732 self.ui.write(_(b"now at: %s\n") % top)
1735 1733 return ret[0]
1736 1734
1737 1735 def pop(
1738 1736 self,
1739 1737 repo,
1740 1738 patch=None,
1741 1739 force=False,
1742 1740 update=True,
1743 1741 all=False,
1744 1742 nobackup=False,
1745 1743 keepchanges=False,
1746 1744 ):
1747 1745 self.checkkeepchanges(keepchanges, force)
1748 1746 with repo.wlock():
1749 1747 if patch:
1750 1748 # index, rev, patch
1751 1749 info = self.isapplied(patch)
1752 1750 if not info:
1753 1751 patch = self.lookup(patch)
1754 1752 info = self.isapplied(patch)
1755 1753 if not info:
1756 1754 raise error.Abort(_(b"patch %s is not applied") % patch)
1757 1755
1758 1756 if not self.applied:
1759 1757 # Allow qpop -a to work repeatedly,
1760 1758 # but not qpop without an argument
1761 1759 self.ui.warn(_(b"no patches applied\n"))
1762 1760 return not all
1763 1761
1764 1762 if all:
1765 1763 start = 0
1766 1764 elif patch:
1767 1765 start = info[0] + 1
1768 1766 else:
1769 1767 start = len(self.applied) - 1
1770 1768
1771 1769 if start >= len(self.applied):
1772 1770 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1773 1771 return
1774 1772
1775 1773 if not update:
1776 1774 parents = repo.dirstate.parents()
1777 1775 rr = [x.node for x in self.applied]
1778 1776 for p in parents:
1779 1777 if p in rr:
1780 1778 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1781 1779 update = True
1782 1780 else:
1783 1781 parents = [p.node() for p in repo[None].parents()]
1784 1782 update = any(
1785 1783 entry.node in parents for entry in self.applied[start:]
1786 1784 )
1787 1785
1788 1786 tobackup = set()
1789 1787 if update:
1790 1788 s = self.checklocalchanges(repo, force=force or keepchanges)
1791 1789 if force:
1792 1790 if not nobackup:
1793 1791 tobackup.update(s.modified + s.added)
1794 1792 elif keepchanges:
1795 1793 tobackup.update(
1796 1794 s.modified + s.added + s.removed + s.deleted
1797 1795 )
1798 1796
1799 1797 self.applieddirty = True
1800 1798 end = len(self.applied)
1801 1799 rev = self.applied[start].node
1802 1800
1803 1801 try:
1804 1802 heads = repo.changelog.heads(rev)
1805 1803 except error.LookupError:
1806 1804 node = short(rev)
1807 1805 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1808 1806
1809 1807 if heads != [self.applied[-1].node]:
1810 1808 raise error.Abort(
1811 1809 _(
1812 1810 b"popping would remove a revision not "
1813 1811 b"managed by this patch queue"
1814 1812 )
1815 1813 )
1816 1814 if not repo[self.applied[-1].node].mutable():
1817 1815 raise error.Abort(
1818 1816 _(b"popping would remove a public revision"),
1819 1817 hint=_(b"see 'hg help phases' for details"),
1820 1818 )
1821 1819
1822 1820 # we know there are no local changes, so we can make a simplified
1823 1821 # form of hg.update.
1824 1822 if update:
1825 1823 qp = self.qparents(repo, rev)
1826 1824 ctx = repo[qp]
1827 1825 st = repo.status(qp, b'.')
1828 1826 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1829 1827 if d:
1830 1828 raise error.Abort(_(b"deletions found between repo revs"))
1831 1829
1832 1830 tobackup = set(a + m + r) & tobackup
1833 1831 if keepchanges and tobackup:
1834 1832 raise error.Abort(_(b"local changes found, qrefresh first"))
1835 1833 self.backup(repo, tobackup)
1836 1834 with repo.dirstate.changing_parents(repo):
1837 1835 for f in a:
1838 1836 repo.wvfs.unlinkpath(f, ignoremissing=True)
1839 1837 repo.dirstate.update_file(
1840 1838 f, p1_tracked=False, wc_tracked=False
1841 1839 )
1842 1840 for f in m + r:
1843 1841 fctx = ctx[f]
1844 1842 repo.wwrite(f, fctx.data(), fctx.flags())
1845 1843 repo.dirstate.update_file(
1846 1844 f, p1_tracked=True, wc_tracked=True
1847 1845 )
1848 1846 repo.setparents(qp, repo.nullid)
1849 1847 for patch in reversed(self.applied[start:end]):
1850 1848 self.ui.status(_(b"popping %s\n") % patch.name)
1851 1849 del self.applied[start:end]
1852 1850 strip(self.ui, repo, [rev], update=False, backup=False)
1853 1851 for s, state in repo[b'.'].substate.items():
1854 1852 repo[b'.'].sub(s).get(state)
1855 1853 if self.applied:
1856 1854 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1857 1855 else:
1858 1856 self.ui.write(_(b"patch queue now empty\n"))
1859 1857
1860 1858 def diff(self, repo, pats, opts):
1861 1859 top, patch = self.checktoppatch(repo)
1862 1860 if not top:
1863 1861 self.ui.write(_(b"no patches applied\n"))
1864 1862 return
1865 1863 qp = self.qparents(repo, top)
1866 1864 if opts.get(b'reverse'):
1867 1865 node1, node2 = None, qp
1868 1866 else:
1869 1867 node1, node2 = qp, None
1870 1868 diffopts = self.diffopts(opts, patch)
1871 1869 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1872 1870
1873 1871 def refresh(self, repo, pats=None, **opts):
1874 1872 opts = pycompat.byteskwargs(opts)
1875 1873 if not self.applied:
1876 1874 self.ui.write(_(b"no patches applied\n"))
1877 1875 return 1
1878 1876 msg = opts.get(b'msg', b'').rstrip()
1879 1877 edit = opts.get(b'edit')
1880 1878 editform = opts.get(b'editform', b'mq.qrefresh')
1881 1879 newuser = opts.get(b'user')
1882 1880 newdate = opts.get(b'date')
1883 1881 if newdate:
1884 1882 newdate = b'%d %d' % dateutil.parsedate(newdate)
1885 1883 wlock = repo.wlock()
1886 1884
1887 1885 try:
1888 1886 self.checktoppatch(repo)
1889 1887 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1890 1888 if repo.changelog.heads(top) != [top]:
1891 1889 raise error.Abort(
1892 1890 _(b"cannot qrefresh a revision with children")
1893 1891 )
1894 1892 if not repo[top].mutable():
1895 1893 raise error.Abort(
1896 1894 _(b"cannot qrefresh public revision"),
1897 1895 hint=_(b"see 'hg help phases' for details"),
1898 1896 )
1899 1897
1900 1898 cparents = repo.changelog.parents(top)
1901 1899 patchparent = self.qparents(repo, top)
1902 1900
1903 1901 inclsubs = checksubstate(repo, patchparent)
1904 1902 if inclsubs:
1905 1903 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1906 1904
1907 1905 ph = patchheader(self.join(patchfn), self.plainmode)
1908 1906 diffopts = self.diffopts(
1909 1907 {b'git': opts.get(b'git')}, patchfn, plain=True
1910 1908 )
1911 1909 if newuser:
1912 1910 ph.setuser(newuser)
1913 1911 if newdate:
1914 1912 ph.setdate(newdate)
1915 1913 ph.setparent(hex(patchparent))
1916 1914
1917 1915 # only commit new patch when write is complete
1918 1916 patchf = self.opener(patchfn, b'w', atomictemp=True)
1919 1917
1920 1918 # update the dirstate in place, strip off the qtip commit
1921 1919 # and then commit.
1922 1920 #
1923 1921 # this should really read:
1924 1922 # st = repo.status(top, patchparent)
1925 1923 # but we do it backwards to take advantage of manifest/changelog
1926 1924 # caching against the next repo.status call
1927 1925 st = repo.status(patchparent, top)
1928 1926 mm, aa, dd = st.modified, st.added, st.removed
1929 1927 ctx = repo[top]
1930 1928 aaa = aa[:]
1931 1929 match1 = scmutil.match(repo[None], pats, opts)
1932 1930 # in short mode, we only diff the files included in the
1933 1931 # patch already plus specified files
1934 1932 if opts.get(b'short'):
1935 1933 # if amending a patch, we start with existing
1936 1934 # files plus specified files - unfiltered
1937 1935 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1938 1936 # filter with include/exclude options
1939 1937 match1 = scmutil.match(repo[None], opts=opts)
1940 1938 else:
1941 1939 match = scmutil.matchall(repo)
1942 1940 stb = repo.status(match=match)
1943 1941 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1944 1942 mm = set(mm)
1945 1943 aa = set(aa)
1946 1944 dd = set(dd)
1947 1945
1948 1946 # we might end up with files that were added between
1949 1947 # qtip and the dirstate parent, but then changed in the
1950 1948 # local dirstate. in this case, we want them to only
1951 1949 # show up in the added section
1952 1950 for x in m:
1953 1951 if x not in aa:
1954 1952 mm.add(x)
1955 1953 # we might end up with files added by the local dirstate that
1956 1954 # were deleted by the patch. In this case, they should only
1957 1955 # show up in the changed section.
1958 1956 for x in a:
1959 1957 if x in dd:
1960 1958 dd.remove(x)
1961 1959 mm.add(x)
1962 1960 else:
1963 1961 aa.add(x)
1964 1962 # make sure any files deleted in the local dirstate
1965 1963 # are not in the add or change column of the patch
1966 1964 forget = []
1967 1965 for x in d + r:
1968 1966 if x in aa:
1969 1967 aa.remove(x)
1970 1968 forget.append(x)
1971 1969 continue
1972 1970 else:
1973 1971 mm.discard(x)
1974 1972 dd.add(x)
1975 1973
1976 1974 m = list(mm)
1977 1975 r = list(dd)
1978 1976 a = list(aa)
1979 1977
1980 1978 # create 'match' that includes the files to be recommitted.
1981 1979 # apply match1 via repo.status to ensure correct case handling.
1982 1980 st = repo.status(patchparent, match=match1)
1983 1981 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1984 1982 allmatches = set(cm + ca + cr + cd)
1985 1983 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1986 1984
1987 1985 files = set(inclsubs)
1988 1986 for x in refreshchanges:
1989 1987 files.update(x)
1990 1988 match = scmutil.matchfiles(repo, files)
1991 1989
1992 1990 bmlist = repo[top].bookmarks()
1993 1991
1994 1992 with repo.dirstate.changing_parents(repo):
1995 1993 if diffopts.git or diffopts.upgrade:
1996 1994 copies = {}
1997 1995 for dst in a:
1998 1996 src = repo.dirstate.copied(dst)
1999 1997 # during qfold, the source file for copies may
2000 1998 # be removed. Treat this as a simple add.
2001 1999 if src is not None and src in repo.dirstate:
2002 2000 copies.setdefault(src, []).append(dst)
2003 2001 repo.dirstate.update_file(
2004 2002 dst, p1_tracked=False, wc_tracked=True
2005 2003 )
2006 2004 # remember the copies between patchparent and qtip
2007 2005 for dst in aaa:
2008 2006 src = ctx[dst].copysource()
2009 2007 if src:
2010 2008 copies.setdefault(src, []).extend(
2011 2009 copies.get(dst, [])
2012 2010 )
2013 2011 if dst in a:
2014 2012 copies[src].append(dst)
2015 2013 # we can't copy a file created by the patch itself
2016 2014 if dst in copies:
2017 2015 del copies[dst]
2018 2016 for src, dsts in copies.items():
2019 2017 for dst in dsts:
2020 2018 repo.dirstate.copy(src, dst)
2021 2019 else:
2022 2020 for dst in a:
2023 2021 repo.dirstate.update_file(
2024 2022 dst, p1_tracked=False, wc_tracked=True
2025 2023 )
2026 2024 # Drop useless copy information
2027 2025 for f in list(repo.dirstate.copies()):
2028 2026 repo.dirstate.copy(None, f)
2029 2027 for f in r:
2030 2028 repo.dirstate.update_file_p1(f, p1_tracked=True)
2031 2029 # if the patch excludes a modified file, mark that
2032 2030 # file with mtime=0 so status can see it.
2033 2031 mm = []
2034 2032 for i in range(len(m) - 1, -1, -1):
2035 2033 if not match1(m[i]):
2036 2034 mm.append(m[i])
2037 2035 del m[i]
2038 2036 for f in m:
2039 2037 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 2038 for f in mm:
2041 2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2042 2040 for f in forget:
2043 2041 repo.dirstate.update_file_p1(f, p1_tracked=False)
2044 2042
2045 2043 user = ph.user or ctx.user()
2046 2044
2047 2045 oldphase = repo[top].phase()
2048 2046
2049 2047 # assumes strip can roll itself back if interrupted
2050 2048 repo.setparents(*cparents)
2051 2049 repo.dirstate.write(repo.currenttransaction())
2052 2050 self.applied.pop()
2053 2051 self.applieddirty = True
2054 2052 strip(self.ui, repo, [top], update=False, backup=False)
2055 2053
2056 2054 try:
2057 2055 # might be nice to attempt to roll back strip after this
2058 2056
2059 2057 defaultmsg = b"[mq]: %s" % patchfn
2060 2058 editor = cmdutil.getcommiteditor(editform=editform)
2061 2059 if edit:
2062 2060
2063 2061 def finishdesc(desc):
2064 2062 if desc.rstrip():
2065 2063 ph.setmessage(desc)
2066 2064 return desc
2067 2065 return defaultmsg
2068 2066
2069 2067 # i18n: this message is shown in editor with "HG: " prefix
2070 2068 extramsg = _(b'Leave message empty to use default message.')
2071 2069 editor = cmdutil.getcommiteditor(
2072 2070 finishdesc=finishdesc,
2073 2071 extramsg=extramsg,
2074 2072 editform=editform,
2075 2073 )
2076 2074 message = msg or b"\n".join(ph.message)
2077 2075 elif not msg:
2078 2076 if not ph.message:
2079 2077 message = defaultmsg
2080 2078 else:
2081 2079 message = b"\n".join(ph.message)
2082 2080 else:
2083 2081 message = msg
2084 2082 ph.setmessage(msg)
2085 2083
2086 2084 # Ensure we create a new changeset in the same phase than
2087 2085 # the old one.
2088 2086 lock = tr = None
2089 2087 try:
2090 2088 lock = repo.lock()
2091 2089 tr = repo.transaction(b'mq')
2092 2090 n = newcommit(
2093 2091 repo,
2094 2092 oldphase,
2095 2093 message,
2096 2094 user,
2097 2095 ph.date,
2098 2096 match=match,
2099 2097 force=True,
2100 2098 editor=editor,
2101 2099 )
2102 2100 # only write patch after a successful commit
2103 2101 c = [list(x) for x in refreshchanges]
2104 2102 if inclsubs:
2105 2103 self.putsubstate2changes(substatestate, c)
2106 2104 chunks = patchmod.diff(
2107 2105 repo, patchparent, changes=c, opts=diffopts
2108 2106 )
2109 2107 comments = bytes(ph)
2110 2108 if comments:
2111 2109 patchf.write(comments)
2112 2110 for chunk in chunks:
2113 2111 patchf.write(chunk)
2114 2112 patchf.close()
2115 2113
2116 2114 marks = repo._bookmarks
2117 2115 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2118 2116 tr.close()
2119 2117
2120 2118 self.applied.append(statusentry(n, patchfn))
2121 2119 finally:
2122 2120 lockmod.release(tr, lock)
2123 2121 except: # re-raises
2124 2122 with repo.dirstate.changing_parents(repo):
2125 2123 ctx = repo[cparents[0]]
2126 2124 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2127 2125 self.savedirty()
2128 2126 self.ui.warn(
2129 2127 _(
2130 2128 b'qrefresh interrupted while patch was popped! '
2131 2129 b'(revert --all, qpush to recover)\n'
2132 2130 )
2133 2131 )
2134 2132 raise
2135 2133 finally:
2136 2134 wlock.release()
2137 2135 self.removeundo(repo)
2138 2136
2139 2137 def init(self, repo, create=False):
2140 2138 if not create and os.path.isdir(self.path):
2141 2139 raise error.Abort(_(b"patch queue directory already exists"))
2142 2140 try:
2143 2141 os.mkdir(self.path)
2144 2142 except FileExistsError:
2145 2143 if not create:
2146 2144 raise
2147 2145 if create:
2148 2146 return self.qrepo(create=True)
2149 2147
2150 2148 def unapplied(self, repo, patch=None):
2151 2149 if patch and patch not in self.series:
2152 2150 raise error.Abort(_(b"patch %s is not in series file") % patch)
2153 2151 if not patch:
2154 2152 start = self.seriesend()
2155 2153 else:
2156 2154 start = self.series.index(patch) + 1
2157 2155 unapplied = []
2158 2156 for i in range(start, len(self.series)):
2159 2157 pushable, reason = self.pushable(i)
2160 2158 if pushable:
2161 2159 unapplied.append((i, self.series[i]))
2162 2160 self.explainpushable(i)
2163 2161 return unapplied
2164 2162
2165 2163 def qseries(
2166 2164 self,
2167 2165 repo,
2168 2166 missing=None,
2169 2167 start=0,
2170 2168 length=None,
2171 2169 status=None,
2172 2170 summary=False,
2173 2171 ):
2174 2172 def displayname(pfx, patchname, state):
2175 2173 if pfx:
2176 2174 self.ui.write(pfx)
2177 2175 if summary:
2178 2176 ph = patchheader(self.join(patchname), self.plainmode)
2179 2177 if ph.message:
2180 2178 msg = ph.message[0]
2181 2179 else:
2182 2180 msg = b''
2183 2181
2184 2182 if self.ui.formatted():
2185 2183 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2186 2184 if width > 0:
2187 2185 msg = stringutil.ellipsis(msg, width)
2188 2186 else:
2189 2187 msg = b''
2190 2188 self.ui.write(patchname, label=b'qseries.' + state)
2191 2189 self.ui.write(b': ')
2192 2190 self.ui.write(msg, label=b'qseries.message.' + state)
2193 2191 else:
2194 2192 self.ui.write(patchname, label=b'qseries.' + state)
2195 2193 self.ui.write(b'\n')
2196 2194
2197 2195 applied = {p.name for p in self.applied}
2198 2196 if length is None:
2199 2197 length = len(self.series) - start
2200 2198 if not missing:
2201 2199 if self.ui.verbose:
2202 2200 idxwidth = len(b"%d" % (start + length - 1))
2203 2201 for i in range(start, start + length):
2204 2202 patch = self.series[i]
2205 2203 if patch in applied:
2206 2204 char, state = b'A', b'applied'
2207 2205 elif self.pushable(i)[0]:
2208 2206 char, state = b'U', b'unapplied'
2209 2207 else:
2210 2208 char, state = b'G', b'guarded'
2211 2209 pfx = b''
2212 2210 if self.ui.verbose:
2213 2211 pfx = b'%*d %s ' % (idxwidth, i, char)
2214 2212 elif status and status != char:
2215 2213 continue
2216 2214 displayname(pfx, patch, state)
2217 2215 else:
2218 2216 msng_list = []
2219 2217 for root, dirs, files in os.walk(self.path):
2220 2218 d = root[len(self.path) + 1 :]
2221 2219 for f in files:
2222 2220 fl = os.path.join(d, f)
2223 2221 if (
2224 2222 fl not in self.series
2225 2223 and fl
2226 2224 not in (
2227 2225 self.statuspath,
2228 2226 self.seriespath,
2229 2227 self.guardspath,
2230 2228 )
2231 2229 and not fl.startswith(b'.')
2232 2230 ):
2233 2231 msng_list.append(fl)
2234 2232 for x in sorted(msng_list):
2235 2233 pfx = self.ui.verbose and b'D ' or b''
2236 2234 displayname(pfx, x, b'missing')
2237 2235
2238 2236 def issaveline(self, l):
2239 2237 if l.name == b'.hg.patches.save.line':
2240 2238 return True
2241 2239
2242 2240 def qrepo(self, create=False):
2243 2241 ui = self.baseui.copy()
2244 2242 # copy back attributes set by ui.pager()
2245 2243 if self.ui.pageractive and not ui.pageractive:
2246 2244 ui.pageractive = self.ui.pageractive
2247 2245 # internal config: ui.formatted
2248 2246 ui.setconfig(
2249 2247 b'ui',
2250 2248 b'formatted',
2251 2249 self.ui.config(b'ui', b'formatted'),
2252 2250 b'mqpager',
2253 2251 )
2254 2252 ui.setconfig(
2255 2253 b'ui',
2256 2254 b'interactive',
2257 2255 self.ui.config(b'ui', b'interactive'),
2258 2256 b'mqpager',
2259 2257 )
2260 2258 if create or os.path.isdir(self.join(b".hg")):
2261 2259 return hg.repository(ui, path=self.path, create=create)
2262 2260
2263 2261 def restore(self, repo, rev, delete=None, qupdate=None):
2264 2262 desc = repo[rev].description().strip()
2265 2263 lines = desc.splitlines()
2266 2264 datastart = None
2267 2265 series = []
2268 2266 applied = []
2269 2267 qpp = None
2270 2268 for i, line in enumerate(lines):
2271 2269 if line == b'Patch Data:':
2272 2270 datastart = i + 1
2273 2271 elif line.startswith(b'Dirstate:'):
2274 2272 l = line.rstrip()
2275 2273 l = l[10:].split(b' ')
2276 2274 qpp = [bin(x) for x in l]
2277 2275 elif datastart is not None:
2278 2276 l = line.rstrip()
2279 2277 n, name = l.split(b':', 1)
2280 2278 if n:
2281 2279 applied.append(statusentry(bin(n), name))
2282 2280 else:
2283 2281 series.append(l)
2284 2282 if datastart is None:
2285 2283 self.ui.warn(_(b"no saved patch data found\n"))
2286 2284 return 1
2287 2285 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2288 2286 self.fullseries = series
2289 2287 self.applied = applied
2290 2288 self.parseseries()
2291 2289 self.seriesdirty = True
2292 2290 self.applieddirty = True
2293 2291 heads = repo.changelog.heads()
2294 2292 if delete:
2295 2293 if rev not in heads:
2296 2294 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2297 2295 else:
2298 2296 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2299 2297 pp = repo.dirstate.parents()
2300 2298 if rev in pp:
2301 2299 update = True
2302 2300 else:
2303 2301 update = False
2304 2302 strip(self.ui, repo, [rev], update=update, backup=False)
2305 2303 if qpp:
2306 2304 self.ui.warn(
2307 2305 _(b"saved queue repository parents: %s %s\n")
2308 2306 % (short(qpp[0]), short(qpp[1]))
2309 2307 )
2310 2308 if qupdate:
2311 2309 self.ui.status(_(b"updating queue directory\n"))
2312 2310 r = self.qrepo()
2313 2311 if not r:
2314 2312 self.ui.warn(_(b"unable to load queue repository\n"))
2315 2313 return 1
2316 2314 hg.clean(r, qpp[0])
2317 2315
2318 2316 def save(self, repo, msg=None):
2319 2317 if not self.applied:
2320 2318 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2321 2319 return 1
2322 2320 if self.issaveline(self.applied[-1]):
2323 2321 self.ui.warn(_(b"status is already saved\n"))
2324 2322 return 1
2325 2323
2326 2324 if not msg:
2327 2325 msg = _(b"hg patches saved state")
2328 2326 else:
2329 2327 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2330 2328 r = self.qrepo()
2331 2329 if r:
2332 2330 pp = r.dirstate.parents()
2333 2331 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2334 2332 msg += b"\n\nPatch Data:\n"
2335 2333 msg += b''.join(b'%s\n' % x for x in self.applied)
2336 2334 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2337 2335 n = repo.commit(msg, force=True)
2338 2336 if not n:
2339 2337 self.ui.warn(_(b"repo commit failed\n"))
2340 2338 return 1
2341 2339 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2342 2340 self.applieddirty = True
2343 2341 self.removeundo(repo)
2344 2342
2345 2343 def fullseriesend(self):
2346 2344 if self.applied:
2347 2345 p = self.applied[-1].name
2348 2346 end = self.findseries(p)
2349 2347 if end is None:
2350 2348 return len(self.fullseries)
2351 2349 return end + 1
2352 2350 return 0
2353 2351
2354 2352 def seriesend(self, all_patches=False):
2355 2353 """If all_patches is False, return the index of the next pushable patch
2356 2354 in the series, or the series length. If all_patches is True, return the
2357 2355 index of the first patch past the last applied one.
2358 2356 """
2359 2357 end = 0
2360 2358
2361 2359 def nextpatch(start):
2362 2360 if all_patches or start >= len(self.series):
2363 2361 return start
2364 2362 for i in range(start, len(self.series)):
2365 2363 p, reason = self.pushable(i)
2366 2364 if p:
2367 2365 return i
2368 2366 self.explainpushable(i)
2369 2367 return len(self.series)
2370 2368
2371 2369 if self.applied:
2372 2370 p = self.applied[-1].name
2373 2371 try:
2374 2372 end = self.series.index(p)
2375 2373 except ValueError:
2376 2374 return 0
2377 2375 return nextpatch(end + 1)
2378 2376 return nextpatch(end)
2379 2377
2380 2378 def appliedname(self, index):
2381 2379 pname = self.applied[index].name
2382 2380 if not self.ui.verbose:
2383 2381 p = pname
2384 2382 else:
2385 2383 p = (b"%d" % self.series.index(pname)) + b" " + pname
2386 2384 return p
2387 2385
2388 2386 def qimport(
2389 2387 self,
2390 2388 repo,
2391 2389 files,
2392 2390 patchname=None,
2393 2391 rev=None,
2394 2392 existing=None,
2395 2393 force=None,
2396 2394 git=False,
2397 2395 ):
2398 2396 def checkseries(patchname):
2399 2397 if patchname in self.series:
2400 2398 raise error.Abort(
2401 2399 _(b'patch %s is already in the series file') % patchname
2402 2400 )
2403 2401
2404 2402 if rev:
2405 2403 if files:
2406 2404 raise error.Abort(
2407 2405 _(b'option "-r" not valid when importing files')
2408 2406 )
2409 2407 rev = logcmdutil.revrange(repo, rev)
2410 2408 rev.sort(reverse=True)
2411 2409 elif not files:
2412 2410 raise error.Abort(_(b'no files or revisions specified'))
2413 2411 if (len(files) > 1 or len(rev) > 1) and patchname:
2414 2412 raise error.Abort(
2415 2413 _(b'option "-n" not valid when importing multiple patches')
2416 2414 )
2417 2415 imported = []
2418 2416 if rev:
2419 2417 # If mq patches are applied, we can only import revisions
2420 2418 # that form a linear path to qbase.
2421 2419 # Otherwise, they should form a linear path to a head.
2422 2420 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2423 2421 if len(heads) > 1:
2424 2422 raise error.Abort(
2425 2423 _(b'revision %d is the root of more than one branch')
2426 2424 % rev.last()
2427 2425 )
2428 2426 if self.applied:
2429 2427 base = repo.changelog.node(rev.first())
2430 2428 if base in [n.node for n in self.applied]:
2431 2429 raise error.Abort(
2432 2430 _(b'revision %d is already managed') % rev.first()
2433 2431 )
2434 2432 if heads != [self.applied[-1].node]:
2435 2433 raise error.Abort(
2436 2434 _(b'revision %d is not the parent of the queue')
2437 2435 % rev.first()
2438 2436 )
2439 2437 base = repo.changelog.rev(self.applied[0].node)
2440 2438 lastparent = repo.changelog.parentrevs(base)[0]
2441 2439 else:
2442 2440 if heads != [repo.changelog.node(rev.first())]:
2443 2441 raise error.Abort(
2444 2442 _(b'revision %d has unmanaged children') % rev.first()
2445 2443 )
2446 2444 lastparent = None
2447 2445
2448 2446 diffopts = self.diffopts({b'git': git})
2449 2447 with repo.transaction(b'qimport') as tr:
2450 2448 for r in rev:
2451 2449 if not repo[r].mutable():
2452 2450 raise error.Abort(
2453 2451 _(b'revision %d is not mutable') % r,
2454 2452 hint=_(b"see 'hg help phases' " b'for details'),
2455 2453 )
2456 2454 p1, p2 = repo.changelog.parentrevs(r)
2457 2455 n = repo.changelog.node(r)
2458 2456 if p2 != nullrev:
2459 2457 raise error.Abort(
2460 2458 _(b'cannot import merge revision %d') % r
2461 2459 )
2462 2460 if lastparent and lastparent != r:
2463 2461 raise error.Abort(
2464 2462 _(b'revision %d is not the parent of %d')
2465 2463 % (r, lastparent)
2466 2464 )
2467 2465 lastparent = p1
2468 2466
2469 2467 if not patchname:
2470 2468 patchname = self.makepatchname(
2471 2469 repo[r].description().split(b'\n', 1)[0],
2472 2470 b'%d.diff' % r,
2473 2471 )
2474 2472 checkseries(patchname)
2475 2473 self.checkpatchname(patchname, force)
2476 2474 self.fullseries.insert(0, patchname)
2477 2475
2478 2476 with self.opener(patchname, b"w") as fp:
2479 2477 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2480 2478
2481 2479 se = statusentry(n, patchname)
2482 2480 self.applied.insert(0, se)
2483 2481
2484 2482 self.added.append(patchname)
2485 2483 imported.append(patchname)
2486 2484 patchname = None
2487 2485 if rev and repo.ui.configbool(b'mq', b'secret'):
2488 2486 # if we added anything with --rev, move the secret root
2489 2487 phases.retractboundary(repo, tr, phases.secret, [n])
2490 2488 self.parseseries()
2491 2489 self.applieddirty = True
2492 2490 self.seriesdirty = True
2493 2491
2494 2492 for i, filename in enumerate(files):
2495 2493 if existing:
2496 2494 if filename == b'-':
2497 2495 raise error.Abort(
2498 2496 _(b'-e is incompatible with import from -')
2499 2497 )
2500 2498 filename = normname(filename)
2501 2499 self.checkreservedname(filename)
2502 2500 if urlutil.url(filename).islocal():
2503 2501 originpath = self.join(filename)
2504 2502 if not os.path.isfile(originpath):
2505 2503 raise error.Abort(
2506 2504 _(b"patch %s does not exist") % filename
2507 2505 )
2508 2506
2509 2507 if patchname:
2510 2508 self.checkpatchname(patchname, force)
2511 2509
2512 2510 self.ui.write(
2513 2511 _(b'renaming %s to %s\n') % (filename, patchname)
2514 2512 )
2515 2513 util.rename(originpath, self.join(patchname))
2516 2514 else:
2517 2515 patchname = filename
2518 2516
2519 2517 else:
2520 2518 if filename == b'-' and not patchname:
2521 2519 raise error.Abort(
2522 2520 _(b'need --name to import a patch from -')
2523 2521 )
2524 2522 elif not patchname:
2525 2523 patchname = normname(
2526 2524 os.path.basename(filename.rstrip(b'/'))
2527 2525 )
2528 2526 self.checkpatchname(patchname, force)
2529 2527 try:
2530 2528 if filename == b'-':
2531 2529 text = self.ui.fin.read()
2532 2530 else:
2533 2531 fp = hg.openpath(self.ui, filename)
2534 2532 text = fp.read()
2535 2533 fp.close()
2536 2534 except (OSError, IOError):
2537 2535 raise error.Abort(_(b"unable to read file %s") % filename)
2538 2536 patchf = self.opener(patchname, b"w")
2539 2537 patchf.write(text)
2540 2538 patchf.close()
2541 2539 if not force:
2542 2540 checkseries(patchname)
2543 2541 if patchname not in self.series:
2544 2542 index = self.fullseriesend() + i
2545 2543 self.fullseries[index:index] = [patchname]
2546 2544 self.parseseries()
2547 2545 self.seriesdirty = True
2548 2546 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2549 2547 self.added.append(patchname)
2550 2548 imported.append(patchname)
2551 2549 patchname = None
2552 2550
2553 2551 self.removeundo(repo)
2554 2552 return imported
2555 2553
2556 2554
2557 2555 def fixkeepchangesopts(ui, opts):
2558 2556 if (
2559 2557 not ui.configbool(b'mq', b'keepchanges')
2560 2558 or opts.get(b'force')
2561 2559 or opts.get(b'exact')
2562 2560 ):
2563 2561 return opts
2564 2562 opts = dict(opts)
2565 2563 opts[b'keep_changes'] = True
2566 2564 return opts
2567 2565
2568 2566
2569 2567 @command(
2570 2568 b"qdelete|qremove|qrm",
2571 2569 [
2572 2570 (b'k', b'keep', None, _(b'keep patch file')),
2573 2571 (
2574 2572 b'r',
2575 2573 b'rev',
2576 2574 [],
2577 2575 _(b'stop managing a revision (DEPRECATED)'),
2578 2576 _(b'REV'),
2579 2577 ),
2580 2578 ],
2581 2579 _(b'hg qdelete [-k] [PATCH]...'),
2582 2580 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2583 2581 )
2584 2582 def delete(ui, repo, *patches, **opts):
2585 2583 """remove patches from queue
2586 2584
2587 2585 The patches must not be applied, and at least one patch is required. Exact
2588 2586 patch identifiers must be given. With -k/--keep, the patch files are
2589 2587 preserved in the patch directory.
2590 2588
2591 2589 To stop managing a patch and move it into permanent history,
2592 2590 use the :hg:`qfinish` command."""
2593 2591 q = repo.mq
2594 2592 q.delete(repo, patches, pycompat.byteskwargs(opts))
2595 2593 q.savedirty()
2596 2594 return 0
2597 2595
2598 2596
2599 2597 @command(
2600 2598 b"qapplied",
2601 2599 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2602 2600 + seriesopts,
2603 2601 _(b'hg qapplied [-1] [-s] [PATCH]'),
2604 2602 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2605 2603 )
2606 2604 def applied(ui, repo, patch=None, **opts):
2607 2605 """print the patches already applied
2608 2606
2609 2607 Returns 0 on success."""
2610 2608
2611 2609 q = repo.mq
2612 2610 opts = pycompat.byteskwargs(opts)
2613 2611
2614 2612 if patch:
2615 2613 if patch not in q.series:
2616 2614 raise error.Abort(_(b"patch %s is not in series file") % patch)
2617 2615 end = q.series.index(patch) + 1
2618 2616 else:
2619 2617 end = q.seriesend(True)
2620 2618
2621 2619 if opts.get(b'last') and not end:
2622 2620 ui.write(_(b"no patches applied\n"))
2623 2621 return 1
2624 2622 elif opts.get(b'last') and end == 1:
2625 2623 ui.write(_(b"only one patch applied\n"))
2626 2624 return 1
2627 2625 elif opts.get(b'last'):
2628 2626 start = end - 2
2629 2627 end = 1
2630 2628 else:
2631 2629 start = 0
2632 2630
2633 2631 q.qseries(
2634 2632 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2635 2633 )
2636 2634
2637 2635
2638 2636 @command(
2639 2637 b"qunapplied",
2640 2638 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2641 2639 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2642 2640 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2643 2641 )
2644 2642 def unapplied(ui, repo, patch=None, **opts):
2645 2643 """print the patches not yet applied
2646 2644
2647 2645 Returns 0 on success."""
2648 2646
2649 2647 q = repo.mq
2650 2648 opts = pycompat.byteskwargs(opts)
2651 2649 if patch:
2652 2650 if patch not in q.series:
2653 2651 raise error.Abort(_(b"patch %s is not in series file") % patch)
2654 2652 start = q.series.index(patch) + 1
2655 2653 else:
2656 2654 start = q.seriesend(True)
2657 2655
2658 2656 if start == len(q.series) and opts.get(b'first'):
2659 2657 ui.write(_(b"all patches applied\n"))
2660 2658 return 1
2661 2659
2662 2660 if opts.get(b'first'):
2663 2661 length = 1
2664 2662 else:
2665 2663 length = None
2666 2664 q.qseries(
2667 2665 repo,
2668 2666 start=start,
2669 2667 length=length,
2670 2668 status=b'U',
2671 2669 summary=opts.get(b'summary'),
2672 2670 )
2673 2671
2674 2672
2675 2673 @command(
2676 2674 b"qimport",
2677 2675 [
2678 2676 (b'e', b'existing', None, _(b'import file in patch directory')),
2679 2677 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2680 2678 (b'f', b'force', None, _(b'overwrite existing files')),
2681 2679 (
2682 2680 b'r',
2683 2681 b'rev',
2684 2682 [],
2685 2683 _(b'place existing revisions under mq control'),
2686 2684 _(b'REV'),
2687 2685 ),
2688 2686 (b'g', b'git', None, _(b'use git extended diff format')),
2689 2687 (b'P', b'push', None, _(b'qpush after importing')),
2690 2688 ],
2691 2689 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2692 2690 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2693 2691 )
2694 2692 def qimport(ui, repo, *filename, **opts):
2695 2693 """import a patch or existing changeset
2696 2694
2697 2695 The patch is inserted into the series after the last applied
2698 2696 patch. If no patches have been applied, qimport prepends the patch
2699 2697 to the series.
2700 2698
2701 2699 The patch will have the same name as its source file unless you
2702 2700 give it a new one with -n/--name.
2703 2701
2704 2702 You can register an existing patch inside the patch directory with
2705 2703 the -e/--existing flag.
2706 2704
2707 2705 With -f/--force, an existing patch of the same name will be
2708 2706 overwritten.
2709 2707
2710 2708 An existing changeset may be placed under mq control with -r/--rev
2711 2709 (e.g. qimport --rev . -n patch will place the current revision
2712 2710 under mq control). With -g/--git, patches imported with --rev will
2713 2711 use the git diff format. See the diffs help topic for information
2714 2712 on why this is important for preserving rename/copy information
2715 2713 and permission changes. Use :hg:`qfinish` to remove changesets
2716 2714 from mq control.
2717 2715
2718 2716 To import a patch from standard input, pass - as the patch file.
2719 2717 When importing from standard input, a patch name must be specified
2720 2718 using the --name flag.
2721 2719
2722 2720 To import an existing patch while renaming it::
2723 2721
2724 2722 hg qimport -e existing-patch -n new-name
2725 2723
2726 2724 Returns 0 if import succeeded.
2727 2725 """
2728 2726 opts = pycompat.byteskwargs(opts)
2729 2727 with repo.lock(): # cause this may move phase
2730 2728 q = repo.mq
2731 2729 try:
2732 2730 imported = q.qimport(
2733 2731 repo,
2734 2732 filename,
2735 2733 patchname=opts.get(b'name'),
2736 2734 existing=opts.get(b'existing'),
2737 2735 force=opts.get(b'force'),
2738 2736 rev=opts.get(b'rev'),
2739 2737 git=opts.get(b'git'),
2740 2738 )
2741 2739 finally:
2742 2740 q.savedirty()
2743 2741
2744 2742 if imported and opts.get(b'push') and not opts.get(b'rev'):
2745 2743 return q.push(repo, imported[-1])
2746 2744 return 0
2747 2745
2748 2746
2749 2747 def qinit(ui, repo, create):
2750 2748 """initialize a new queue repository
2751 2749
2752 2750 This command also creates a series file for ordering patches, and
2753 2751 an mq-specific .hgignore file in the queue repository, to exclude
2754 2752 the status and guards files (these contain mostly transient state).
2755 2753
2756 2754 Returns 0 if initialization succeeded."""
2757 2755 q = repo.mq
2758 2756 r = q.init(repo, create)
2759 2757 q.savedirty()
2760 2758 if r:
2761 2759 with r.wlock(), r.dirstate.changing_files(r):
2762 2760 if not os.path.exists(r.wjoin(b'.hgignore')):
2763 2761 fp = r.wvfs(b'.hgignore', b'w')
2764 2762 fp.write(b'^\\.hg\n')
2765 2763 fp.write(b'^\\.mq\n')
2766 2764 fp.write(b'syntax: glob\n')
2767 2765 fp.write(b'status\n')
2768 2766 fp.write(b'guards\n')
2769 2767 fp.close()
2770 2768 if not os.path.exists(r.wjoin(b'series')):
2771 2769 r.wvfs(b'series', b'w').close()
2772 2770 r[None].add([b'.hgignore', b'series'])
2773 2771 commands.add(ui, r)
2774 2772 return 0
2775 2773
2776 2774
2777 2775 @command(
2778 2776 b"qinit",
2779 2777 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2780 2778 _(b'hg qinit [-c]'),
2781 2779 helpcategory=command.CATEGORY_REPO_CREATION,
2782 2780 helpbasic=True,
2783 2781 )
2784 2782 def init(ui, repo, **opts):
2785 2783 """init a new queue repository (DEPRECATED)
2786 2784
2787 2785 The queue repository is unversioned by default. If
2788 2786 -c/--create-repo is specified, qinit will create a separate nested
2789 2787 repository for patches (qinit -c may also be run later to convert
2790 2788 an unversioned patch repository into a versioned one). You can use
2791 2789 qcommit to commit changes to this queue repository.
2792 2790
2793 2791 This command is deprecated. Without -c, it's implied by other relevant
2794 2792 commands. With -c, use :hg:`init --mq` instead."""
2795 2793 return qinit(ui, repo, create=opts.get('create_repo'))
2796 2794
2797 2795
2798 2796 @command(
2799 2797 b"qclone",
2800 2798 [
2801 2799 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2802 2800 (
2803 2801 b'U',
2804 2802 b'noupdate',
2805 2803 None,
2806 2804 _(b'do not update the new working directories'),
2807 2805 ),
2808 2806 (
2809 2807 b'',
2810 2808 b'uncompressed',
2811 2809 None,
2812 2810 _(b'use uncompressed transfer (fast over LAN)'),
2813 2811 ),
2814 2812 (
2815 2813 b'p',
2816 2814 b'patches',
2817 2815 b'',
2818 2816 _(b'location of source patch repository'),
2819 2817 _(b'REPO'),
2820 2818 ),
2821 2819 ]
2822 2820 + cmdutil.remoteopts,
2823 2821 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2824 2822 helpcategory=command.CATEGORY_REPO_CREATION,
2825 2823 norepo=True,
2826 2824 )
2827 2825 def clone(ui, source, dest=None, **opts):
2828 2826 """clone main and patch repository at same time
2829 2827
2830 2828 If source is local, destination will have no patches applied. If
2831 2829 source is remote, this command can not check if patches are
2832 2830 applied in source, so cannot guarantee that patches are not
2833 2831 applied in destination. If you clone remote repository, be sure
2834 2832 before that it has no patches applied.
2835 2833
2836 2834 Source patch repository is looked for in <src>/.hg/patches by
2837 2835 default. Use -p <url> to change.
2838 2836
2839 2837 The patch directory must be a nested Mercurial repository, as
2840 2838 would be created by :hg:`init --mq`.
2841 2839
2842 2840 Return 0 on success.
2843 2841 """
2844 2842 opts = pycompat.byteskwargs(opts)
2845 2843
2846 2844 def patchdir(repo):
2847 2845 """compute a patch repo url from a repo object"""
2848 2846 url = repo.url()
2849 2847 if url.endswith(b'/'):
2850 2848 url = url[:-1]
2851 2849 return url + b'/.hg/patches'
2852 2850
2853 2851 # main repo (destination and sources)
2854 2852 if dest is None:
2855 2853 dest = hg.defaultdest(source)
2856 2854 source_path = urlutil.get_clone_path_obj(ui, source)
2857 2855 sr = hg.peer(ui, opts, source_path)
2858 2856
2859 2857 # patches repo (source only)
2860 2858 if opts.get(b'patches'):
2861 2859 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2862 2860 else:
2863 2861 # XXX path: we should turn this into a path object
2864 2862 patches_path = patchdir(sr)
2865 2863 try:
2866 2864 hg.peer(ui, opts, patches_path)
2867 2865 except error.RepoError:
2868 2866 raise error.Abort(
2869 2867 _(b'versioned patch repository not found (see init --mq)')
2870 2868 )
2871 2869 qbase, destrev = None, None
2872 2870 if sr.local():
2873 2871 repo = sr.local()
2874 2872 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2875 2873 qbase = repo.mq.applied[0].node
2876 2874 if not hg.islocal(dest):
2877 2875 heads = set(repo.heads())
2878 2876 destrev = list(heads.difference(repo.heads(qbase)))
2879 2877 destrev.append(repo.changelog.parents(qbase)[0])
2880 2878 elif sr.capable(b'lookup'):
2881 2879 try:
2882 2880 qbase = sr.lookup(b'qbase')
2883 2881 except error.RepoError:
2884 2882 pass
2885 2883
2886 2884 ui.note(_(b'cloning main repository\n'))
2887 2885 sr, dr = hg.clone(
2888 2886 ui,
2889 2887 opts,
2890 2888 sr.url(),
2891 2889 dest,
2892 2890 pull=opts.get(b'pull'),
2893 2891 revs=destrev,
2894 2892 update=False,
2895 2893 stream=opts.get(b'uncompressed'),
2896 2894 )
2897 2895
2898 2896 ui.note(_(b'cloning patch repository\n'))
2899 2897 hg.clone(
2900 2898 ui,
2901 2899 opts,
2902 2900 opts.get(b'patches') or patchdir(sr),
2903 2901 patchdir(dr),
2904 2902 pull=opts.get(b'pull'),
2905 2903 update=not opts.get(b'noupdate'),
2906 2904 stream=opts.get(b'uncompressed'),
2907 2905 )
2908 2906
2909 2907 if dr.local():
2910 2908 repo = dr.local()
2911 2909 if qbase:
2912 2910 ui.note(
2913 2911 _(
2914 2912 b'stripping applied patches from destination '
2915 2913 b'repository\n'
2916 2914 )
2917 2915 )
2918 2916 strip(ui, repo, [qbase], update=False, backup=None)
2919 2917 if not opts.get(b'noupdate'):
2920 2918 ui.note(_(b'updating destination repository\n'))
2921 2919 hg.update(repo, repo.changelog.tip())
2922 2920
2923 2921
2924 2922 @command(
2925 2923 b"qcommit|qci",
2926 2924 commands.table[b"commit|ci"][1],
2927 2925 _(b'hg qcommit [OPTION]... [FILE]...'),
2928 2926 helpcategory=command.CATEGORY_COMMITTING,
2929 2927 inferrepo=True,
2930 2928 )
2931 2929 def commit(ui, repo, *pats, **opts):
2932 2930 """commit changes in the queue repository (DEPRECATED)
2933 2931
2934 2932 This command is deprecated; use :hg:`commit --mq` instead."""
2935 2933 q = repo.mq
2936 2934 r = q.qrepo()
2937 2935 if not r:
2938 2936 raise error.Abort(b'no queue repository')
2939 2937 commands.commit(r.ui, r, *pats, **opts)
2940 2938
2941 2939
2942 2940 @command(
2943 2941 b"qseries",
2944 2942 [
2945 2943 (b'm', b'missing', None, _(b'print patches not in series')),
2946 2944 ]
2947 2945 + seriesopts,
2948 2946 _(b'hg qseries [-ms]'),
2949 2947 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2950 2948 )
2951 2949 def series(ui, repo, **opts):
2952 2950 """print the entire series file
2953 2951
2954 2952 Returns 0 on success."""
2955 2953 repo.mq.qseries(
2956 2954 repo, missing=opts.get('missing'), summary=opts.get('summary')
2957 2955 )
2958 2956 return 0
2959 2957
2960 2958
2961 2959 @command(
2962 2960 b"qtop",
2963 2961 seriesopts,
2964 2962 _(b'hg qtop [-s]'),
2965 2963 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2966 2964 )
2967 2965 def top(ui, repo, **opts):
2968 2966 """print the name of the current patch
2969 2967
2970 2968 Returns 0 on success."""
2971 2969 q = repo.mq
2972 2970 if q.applied:
2973 2971 t = q.seriesend(True)
2974 2972 else:
2975 2973 t = 0
2976 2974
2977 2975 if t:
2978 2976 q.qseries(
2979 2977 repo,
2980 2978 start=t - 1,
2981 2979 length=1,
2982 2980 status=b'A',
2983 2981 summary=opts.get('summary'),
2984 2982 )
2985 2983 else:
2986 2984 ui.write(_(b"no patches applied\n"))
2987 2985 return 1
2988 2986
2989 2987
2990 2988 @command(
2991 2989 b"qnext",
2992 2990 seriesopts,
2993 2991 _(b'hg qnext [-s]'),
2994 2992 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2995 2993 )
2996 2994 def next(ui, repo, **opts):
2997 2995 """print the name of the next pushable patch
2998 2996
2999 2997 Returns 0 on success."""
3000 2998 q = repo.mq
3001 2999 end = q.seriesend()
3002 3000 if end == len(q.series):
3003 3001 ui.write(_(b"all patches applied\n"))
3004 3002 return 1
3005 3003 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3006 3004
3007 3005
3008 3006 @command(
3009 3007 b"qprev",
3010 3008 seriesopts,
3011 3009 _(b'hg qprev [-s]'),
3012 3010 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3013 3011 )
3014 3012 def prev(ui, repo, **opts):
3015 3013 """print the name of the preceding applied patch
3016 3014
3017 3015 Returns 0 on success."""
3018 3016 q = repo.mq
3019 3017 l = len(q.applied)
3020 3018 if l == 1:
3021 3019 ui.write(_(b"only one patch applied\n"))
3022 3020 return 1
3023 3021 if not l:
3024 3022 ui.write(_(b"no patches applied\n"))
3025 3023 return 1
3026 3024 idx = q.series.index(q.applied[-2].name)
3027 3025 q.qseries(
3028 3026 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3029 3027 )
3030 3028
3031 3029
3032 3030 def setupheaderopts(ui, opts):
3033 3031 if not opts.get(b'user') and opts.get(b'currentuser'):
3034 3032 opts[b'user'] = ui.username()
3035 3033 if not opts.get(b'date') and opts.get(b'currentdate'):
3036 3034 opts[b'date'] = b"%d %d" % dateutil.makedate()
3037 3035
3038 3036
3039 3037 @command(
3040 3038 b"qnew",
3041 3039 [
3042 3040 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3043 3041 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3044 3042 (b'g', b'git', None, _(b'use git extended diff format')),
3045 3043 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3046 3044 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3047 3045 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3048 3046 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3049 3047 ]
3050 3048 + cmdutil.walkopts
3051 3049 + cmdutil.commitopts,
3052 3050 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3053 3051 helpcategory=command.CATEGORY_COMMITTING,
3054 3052 helpbasic=True,
3055 3053 inferrepo=True,
3056 3054 )
3057 3055 def new(ui, repo, patch, *args, **opts):
3058 3056 """create a new patch
3059 3057
3060 3058 qnew creates a new patch on top of the currently-applied patch (if
3061 3059 any). The patch will be initialized with any outstanding changes
3062 3060 in the working directory. You may also use -I/--include,
3063 3061 -X/--exclude, and/or a list of files after the patch name to add
3064 3062 only changes to matching files to the new patch, leaving the rest
3065 3063 as uncommitted modifications.
3066 3064
3067 3065 -u/--user and -d/--date can be used to set the (given) user and
3068 3066 date, respectively. -U/--currentuser and -D/--currentdate set user
3069 3067 to current user and date to current date.
3070 3068
3071 3069 -e/--edit, -m/--message or -l/--logfile set the patch header as
3072 3070 well as the commit message. If none is specified, the header is
3073 3071 empty and the commit message is '[mq]: PATCH'.
3074 3072
3075 3073 Use the -g/--git option to keep the patch in the git extended diff
3076 3074 format. Read the diffs help topic for more information on why this
3077 3075 is important for preserving permission changes and copy/rename
3078 3076 information.
3079 3077
3080 3078 Returns 0 on successful creation of a new patch.
3081 3079 """
3082 3080 opts = pycompat.byteskwargs(opts)
3083 3081 msg = cmdutil.logmessage(ui, opts)
3084 3082 q = repo.mq
3085 3083 opts[b'msg'] = msg
3086 3084 setupheaderopts(ui, opts)
3087 3085 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3088 3086 q.savedirty()
3089 3087 return 0
3090 3088
3091 3089
3092 3090 @command(
3093 3091 b"qrefresh",
3094 3092 [
3095 3093 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3096 3094 (b'g', b'git', None, _(b'use git extended diff format')),
3097 3095 (
3098 3096 b's',
3099 3097 b'short',
3100 3098 None,
3101 3099 _(b'refresh only files already in the patch and specified files'),
3102 3100 ),
3103 3101 (
3104 3102 b'U',
3105 3103 b'currentuser',
3106 3104 None,
3107 3105 _(b'add/update author field in patch with current user'),
3108 3106 ),
3109 3107 (
3110 3108 b'u',
3111 3109 b'user',
3112 3110 b'',
3113 3111 _(b'add/update author field in patch with given user'),
3114 3112 _(b'USER'),
3115 3113 ),
3116 3114 (
3117 3115 b'D',
3118 3116 b'currentdate',
3119 3117 None,
3120 3118 _(b'add/update date field in patch with current date'),
3121 3119 ),
3122 3120 (
3123 3121 b'd',
3124 3122 b'date',
3125 3123 b'',
3126 3124 _(b'add/update date field in patch with given date'),
3127 3125 _(b'DATE'),
3128 3126 ),
3129 3127 ]
3130 3128 + cmdutil.walkopts
3131 3129 + cmdutil.commitopts,
3132 3130 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3133 3131 helpcategory=command.CATEGORY_COMMITTING,
3134 3132 helpbasic=True,
3135 3133 inferrepo=True,
3136 3134 )
3137 3135 def refresh(ui, repo, *pats, **opts):
3138 3136 """update the current patch
3139 3137
3140 3138 If any file patterns are provided, the refreshed patch will
3141 3139 contain only the modifications that match those patterns; the
3142 3140 remaining modifications will remain in the working directory.
3143 3141
3144 3142 If -s/--short is specified, files currently included in the patch
3145 3143 will be refreshed just like matched files and remain in the patch.
3146 3144
3147 3145 If -e/--edit is specified, Mercurial will start your configured editor for
3148 3146 you to enter a message. In case qrefresh fails, you will find a backup of
3149 3147 your message in ``.hg/last-message.txt``.
3150 3148
3151 3149 hg add/remove/copy/rename work as usual, though you might want to
3152 3150 use git-style patches (-g/--git or [diff] git=1) to track copies
3153 3151 and renames. See the diffs help topic for more information on the
3154 3152 git diff format.
3155 3153
3156 3154 Returns 0 on success.
3157 3155 """
3158 3156 opts = pycompat.byteskwargs(opts)
3159 3157 q = repo.mq
3160 3158 message = cmdutil.logmessage(ui, opts)
3161 3159 setupheaderopts(ui, opts)
3162 3160 with repo.wlock():
3163 3161 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3164 3162 q.savedirty()
3165 3163 return ret
3166 3164
3167 3165
3168 3166 @command(
3169 3167 b"qdiff",
3170 3168 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3171 3169 _(b'hg qdiff [OPTION]... [FILE]...'),
3172 3170 helpcategory=command.CATEGORY_FILE_CONTENTS,
3173 3171 helpbasic=True,
3174 3172 inferrepo=True,
3175 3173 )
3176 3174 def diff(ui, repo, *pats, **opts):
3177 3175 """diff of the current patch and subsequent modifications
3178 3176
3179 3177 Shows a diff which includes the current patch as well as any
3180 3178 changes which have been made in the working directory since the
3181 3179 last refresh (thus showing what the current patch would become
3182 3180 after a qrefresh).
3183 3181
3184 3182 Use :hg:`diff` if you only want to see the changes made since the
3185 3183 last qrefresh, or :hg:`export qtip` if you want to see changes
3186 3184 made by the current patch without including changes made since the
3187 3185 qrefresh.
3188 3186
3189 3187 Returns 0 on success.
3190 3188 """
3191 3189 ui.pager(b'qdiff')
3192 3190 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3193 3191 return 0
3194 3192
3195 3193
3196 3194 @command(
3197 3195 b'qfold',
3198 3196 [
3199 3197 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3200 3198 (b'k', b'keep', None, _(b'keep folded patch files')),
3201 3199 ]
3202 3200 + cmdutil.commitopts,
3203 3201 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3204 3202 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3205 3203 )
3206 3204 def fold(ui, repo, *files, **opts):
3207 3205 """fold the named patches into the current patch
3208 3206
3209 3207 Patches must not yet be applied. Each patch will be successively
3210 3208 applied to the current patch in the order given. If all the
3211 3209 patches apply successfully, the current patch will be refreshed
3212 3210 with the new cumulative patch, and the folded patches will be
3213 3211 deleted. With -k/--keep, the folded patch files will not be
3214 3212 removed afterwards.
3215 3213
3216 3214 The header for each folded patch will be concatenated with the
3217 3215 current patch header, separated by a line of ``* * *``.
3218 3216
3219 3217 Returns 0 on success."""
3220 3218 opts = pycompat.byteskwargs(opts)
3221 3219 q = repo.mq
3222 3220 if not files:
3223 3221 raise error.Abort(_(b'qfold requires at least one patch name'))
3224 3222 if not q.checktoppatch(repo)[0]:
3225 3223 raise error.Abort(_(b'no patches applied'))
3226 3224
3227 3225 with repo.wlock():
3228 3226 q.checklocalchanges(repo)
3229 3227
3230 3228 message = cmdutil.logmessage(ui, opts)
3231 3229
3232 3230 parent = q.lookup(b'qtip')
3233 3231 patches = []
3234 3232 messages = []
3235 3233 for f in files:
3236 3234 p = q.lookup(f)
3237 3235 if p in patches or p == parent:
3238 3236 ui.warn(_(b'skipping already folded patch %s\n') % p)
3239 3237 if q.isapplied(p):
3240 3238 raise error.Abort(
3241 3239 _(b'qfold cannot fold already applied patch %s') % p
3242 3240 )
3243 3241 patches.append(p)
3244 3242
3245 3243 for p in patches:
3246 3244 if not message:
3247 3245 ph = patchheader(q.join(p), q.plainmode)
3248 3246 if ph.message:
3249 3247 messages.append(ph.message)
3250 3248 pf = q.join(p)
3251 3249 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3252 3250 if not patchsuccess:
3253 3251 raise error.Abort(_(b'error folding patch %s') % p)
3254 3252
3255 3253 if not message:
3256 3254 ph = patchheader(q.join(parent), q.plainmode)
3257 3255 message = ph.message
3258 3256 for msg in messages:
3259 3257 if msg:
3260 3258 if message:
3261 3259 message.append(b'* * *')
3262 3260 message.extend(msg)
3263 3261 message = b'\n'.join(message)
3264 3262
3265 3263 diffopts = q.patchopts(q.diffopts(), *patches)
3266 3264 q.refresh(
3267 3265 repo,
3268 3266 msg=message,
3269 3267 git=diffopts.git,
3270 3268 edit=opts.get(b'edit'),
3271 3269 editform=b'mq.qfold',
3272 3270 )
3273 3271 q.delete(repo, patches, opts)
3274 3272 q.savedirty()
3275 3273
3276 3274
3277 3275 @command(
3278 3276 b"qgoto",
3279 3277 [
3280 3278 (
3281 3279 b'',
3282 3280 b'keep-changes',
3283 3281 None,
3284 3282 _(b'tolerate non-conflicting local changes'),
3285 3283 ),
3286 3284 (b'f', b'force', None, _(b'overwrite any local changes')),
3287 3285 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3288 3286 ],
3289 3287 _(b'hg qgoto [OPTION]... PATCH'),
3290 3288 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3291 3289 )
3292 3290 def goto(ui, repo, patch, **opts):
3293 3291 """push or pop patches until named patch is at top of stack
3294 3292
3295 3293 Returns 0 on success."""
3296 3294 opts = pycompat.byteskwargs(opts)
3297 3295 opts = fixkeepchangesopts(ui, opts)
3298 3296 q = repo.mq
3299 3297 patch = q.lookup(patch)
3300 3298 nobackup = opts.get(b'no_backup')
3301 3299 keepchanges = opts.get(b'keep_changes')
3302 3300 if q.isapplied(patch):
3303 3301 ret = q.pop(
3304 3302 repo,
3305 3303 patch,
3306 3304 force=opts.get(b'force'),
3307 3305 nobackup=nobackup,
3308 3306 keepchanges=keepchanges,
3309 3307 )
3310 3308 else:
3311 3309 ret = q.push(
3312 3310 repo,
3313 3311 patch,
3314 3312 force=opts.get(b'force'),
3315 3313 nobackup=nobackup,
3316 3314 keepchanges=keepchanges,
3317 3315 )
3318 3316 q.savedirty()
3319 3317 return ret
3320 3318
3321 3319
3322 3320 @command(
3323 3321 b"qguard",
3324 3322 [
3325 3323 (b'l', b'list', None, _(b'list all patches and guards')),
3326 3324 (b'n', b'none', None, _(b'drop all guards')),
3327 3325 ],
3328 3326 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3329 3327 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3330 3328 )
3331 3329 def guard(ui, repo, *args, **opts):
3332 3330 """set or print guards for a patch
3333 3331
3334 3332 Guards control whether a patch can be pushed. A patch with no
3335 3333 guards is always pushed. A patch with a positive guard ("+foo") is
3336 3334 pushed only if the :hg:`qselect` command has activated it. A patch with
3337 3335 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3338 3336 has activated it.
3339 3337
3340 3338 With no arguments, print the currently active guards.
3341 3339 With arguments, set guards for the named patch.
3342 3340
3343 3341 .. note::
3344 3342
3345 3343 Specifying negative guards now requires '--'.
3346 3344
3347 3345 To set guards on another patch::
3348 3346
3349 3347 hg qguard other.patch -- +2.6.17 -stable
3350 3348
3351 3349 Returns 0 on success.
3352 3350 """
3353 3351
3354 3352 def status(idx):
3355 3353 guards = q.seriesguards[idx] or [b'unguarded']
3356 3354 if q.series[idx] in applied:
3357 3355 state = b'applied'
3358 3356 elif q.pushable(idx)[0]:
3359 3357 state = b'unapplied'
3360 3358 else:
3361 3359 state = b'guarded'
3362 3360 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3363 3361 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3364 3362
3365 3363 for i, guard in enumerate(guards):
3366 3364 if guard.startswith(b'+'):
3367 3365 ui.write(guard, label=b'qguard.positive')
3368 3366 elif guard.startswith(b'-'):
3369 3367 ui.write(guard, label=b'qguard.negative')
3370 3368 else:
3371 3369 ui.write(guard, label=b'qguard.unguarded')
3372 3370 if i != len(guards) - 1:
3373 3371 ui.write(b' ')
3374 3372 ui.write(b'\n')
3375 3373
3376 3374 q = repo.mq
3377 3375 applied = {p.name for p in q.applied}
3378 3376 patch = None
3379 3377 args = list(args)
3380 3378 if opts.get('list'):
3381 3379 if args or opts.get('none'):
3382 3380 raise error.Abort(
3383 3381 _(b'cannot mix -l/--list with options or arguments')
3384 3382 )
3385 3383 for i in range(len(q.series)):
3386 3384 status(i)
3387 3385 return
3388 3386 if not args or args[0][0:1] in b'-+':
3389 3387 if not q.applied:
3390 3388 raise error.Abort(_(b'no patches applied'))
3391 3389 patch = q.applied[-1].name
3392 3390 if patch is None and args[0][0:1] not in b'-+':
3393 3391 patch = args.pop(0)
3394 3392 if patch is None:
3395 3393 raise error.Abort(_(b'no patch to work with'))
3396 3394 if args or opts.get('none'):
3397 3395 idx = q.findseries(patch)
3398 3396 if idx is None:
3399 3397 raise error.Abort(_(b'no patch named %s') % patch)
3400 3398 q.setguards(idx, args)
3401 3399 q.savedirty()
3402 3400 else:
3403 3401 status(q.series.index(q.lookup(patch)))
3404 3402
3405 3403
3406 3404 @command(
3407 3405 b"qheader",
3408 3406 [],
3409 3407 _(b'hg qheader [PATCH]'),
3410 3408 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3411 3409 )
3412 3410 def header(ui, repo, patch=None):
3413 3411 """print the header of the topmost or specified patch
3414 3412
3415 3413 Returns 0 on success."""
3416 3414 q = repo.mq
3417 3415
3418 3416 if patch:
3419 3417 patch = q.lookup(patch)
3420 3418 else:
3421 3419 if not q.applied:
3422 3420 ui.write(_(b'no patches applied\n'))
3423 3421 return 1
3424 3422 patch = q.lookup(b'qtip')
3425 3423 ph = patchheader(q.join(patch), q.plainmode)
3426 3424
3427 3425 ui.write(b'\n'.join(ph.message) + b'\n')
3428 3426
3429 3427
3430 3428 def lastsavename(path):
3431 3429 (directory, base) = os.path.split(path)
3432 3430 names = os.listdir(directory)
3433 3431 namere = re.compile(b"%s.([0-9]+)" % base)
3434 3432 maxindex = None
3435 3433 maxname = None
3436 3434 for f in names:
3437 3435 m = namere.match(f)
3438 3436 if m:
3439 3437 index = int(m.group(1))
3440 3438 if maxindex is None or index > maxindex:
3441 3439 maxindex = index
3442 3440 maxname = f
3443 3441 if maxname:
3444 3442 return (os.path.join(directory, maxname), maxindex)
3445 3443 return (None, None)
3446 3444
3447 3445
3448 3446 def savename(path):
3449 3447 (last, index) = lastsavename(path)
3450 3448 if last is None:
3451 3449 index = 0
3452 3450 newpath = path + b".%d" % (index + 1)
3453 3451 return newpath
3454 3452
3455 3453
3456 3454 @command(
3457 3455 b"qpush",
3458 3456 [
3459 3457 (
3460 3458 b'',
3461 3459 b'keep-changes',
3462 3460 None,
3463 3461 _(b'tolerate non-conflicting local changes'),
3464 3462 ),
3465 3463 (b'f', b'force', None, _(b'apply on top of local changes')),
3466 3464 (
3467 3465 b'e',
3468 3466 b'exact',
3469 3467 None,
3470 3468 _(b'apply the target patch to its recorded parent'),
3471 3469 ),
3472 3470 (b'l', b'list', None, _(b'list patch name in commit text')),
3473 3471 (b'a', b'all', None, _(b'apply all patches')),
3474 3472 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3475 3473 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3476 3474 (
3477 3475 b'',
3478 3476 b'move',
3479 3477 None,
3480 3478 _(b'reorder patch series and apply only the patch'),
3481 3479 ),
3482 3480 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3483 3481 ],
3484 3482 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3485 3483 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3486 3484 helpbasic=True,
3487 3485 )
3488 3486 def push(ui, repo, patch=None, **opts):
3489 3487 """push the next patch onto the stack
3490 3488
3491 3489 By default, abort if the working directory contains uncommitted
3492 3490 changes. With --keep-changes, abort only if the uncommitted files
3493 3491 overlap with patched files. With -f/--force, backup and patch over
3494 3492 uncommitted changes.
3495 3493
3496 3494 Return 0 on success.
3497 3495 """
3498 3496 q = repo.mq
3499 3497 mergeq = None
3500 3498
3501 3499 opts = pycompat.byteskwargs(opts)
3502 3500 opts = fixkeepchangesopts(ui, opts)
3503 3501 if opts.get(b'merge'):
3504 3502 if opts.get(b'name'):
3505 3503 newpath = repo.vfs.join(opts.get(b'name'))
3506 3504 else:
3507 3505 newpath, i = lastsavename(q.path)
3508 3506 if not newpath:
3509 3507 ui.warn(_(b"no saved queues found, please use -n\n"))
3510 3508 return 1
3511 3509 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3512 3510 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3513 3511 ret = q.push(
3514 3512 repo,
3515 3513 patch,
3516 3514 force=opts.get(b'force'),
3517 3515 list=opts.get(b'list'),
3518 3516 mergeq=mergeq,
3519 3517 all=opts.get(b'all'),
3520 3518 move=opts.get(b'move'),
3521 3519 exact=opts.get(b'exact'),
3522 3520 nobackup=opts.get(b'no_backup'),
3523 3521 keepchanges=opts.get(b'keep_changes'),
3524 3522 )
3525 3523 return ret
3526 3524
3527 3525
3528 3526 @command(
3529 3527 b"qpop",
3530 3528 [
3531 3529 (b'a', b'all', None, _(b'pop all patches')),
3532 3530 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3533 3531 (
3534 3532 b'',
3535 3533 b'keep-changes',
3536 3534 None,
3537 3535 _(b'tolerate non-conflicting local changes'),
3538 3536 ),
3539 3537 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3540 3538 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3541 3539 ],
3542 3540 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3543 3541 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3544 3542 helpbasic=True,
3545 3543 )
3546 3544 def pop(ui, repo, patch=None, **opts):
3547 3545 """pop the current patch off the stack
3548 3546
3549 3547 Without argument, pops off the top of the patch stack. If given a
3550 3548 patch name, keeps popping off patches until the named patch is at
3551 3549 the top of the stack.
3552 3550
3553 3551 By default, abort if the working directory contains uncommitted
3554 3552 changes. With --keep-changes, abort only if the uncommitted files
3555 3553 overlap with patched files. With -f/--force, backup and discard
3556 3554 changes made to such files.
3557 3555
3558 3556 Return 0 on success.
3559 3557 """
3560 3558 opts = pycompat.byteskwargs(opts)
3561 3559 opts = fixkeepchangesopts(ui, opts)
3562 3560 localupdate = True
3563 3561 if opts.get(b'name'):
3564 3562 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3565 3563 ui.warn(_(b'using patch queue: %s\n') % q.path)
3566 3564 localupdate = False
3567 3565 else:
3568 3566 q = repo.mq
3569 3567 ret = q.pop(
3570 3568 repo,
3571 3569 patch,
3572 3570 force=opts.get(b'force'),
3573 3571 update=localupdate,
3574 3572 all=opts.get(b'all'),
3575 3573 nobackup=opts.get(b'no_backup'),
3576 3574 keepchanges=opts.get(b'keep_changes'),
3577 3575 )
3578 3576 q.savedirty()
3579 3577 return ret
3580 3578
3581 3579
3582 3580 @command(
3583 3581 b"qrename|qmv",
3584 3582 [],
3585 3583 _(b'hg qrename PATCH1 [PATCH2]'),
3586 3584 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3587 3585 )
3588 3586 def rename(ui, repo, patch, name=None, **opts):
3589 3587 """rename a patch
3590 3588
3591 3589 With one argument, renames the current patch to PATCH1.
3592 3590 With two arguments, renames PATCH1 to PATCH2.
3593 3591
3594 3592 Returns 0 on success."""
3595 3593 q = repo.mq
3596 3594 if not name:
3597 3595 name = patch
3598 3596 patch = None
3599 3597
3600 3598 if patch:
3601 3599 patch = q.lookup(patch)
3602 3600 else:
3603 3601 if not q.applied:
3604 3602 ui.write(_(b'no patches applied\n'))
3605 3603 return
3606 3604 patch = q.lookup(b'qtip')
3607 3605 absdest = q.join(name)
3608 3606 if os.path.isdir(absdest):
3609 3607 name = normname(os.path.join(name, os.path.basename(patch)))
3610 3608 absdest = q.join(name)
3611 3609 q.checkpatchname(name)
3612 3610
3613 3611 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3614 3612 i = q.findseries(patch)
3615 3613 guards = q.guard_re.findall(q.fullseries[i])
3616 3614 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3617 3615 q.parseseries()
3618 3616 q.seriesdirty = True
3619 3617
3620 3618 info = q.isapplied(patch)
3621 3619 if info:
3622 3620 q.applied[info[0]] = statusentry(info[1], name)
3623 3621 q.applieddirty = True
3624 3622
3625 3623 destdir = os.path.dirname(absdest)
3626 3624 if not os.path.isdir(destdir):
3627 3625 os.makedirs(destdir)
3628 3626 util.rename(q.join(patch), absdest)
3629 3627 r = q.qrepo()
3630 3628 if r and patch in r.dirstate:
3631 3629 with r.wlock(), r.dirstate.changing_files(r):
3632 3630 wctx = r[None]
3633 3631 if r.dirstate.get_entry(patch).added:
3634 3632 r.dirstate.set_untracked(patch)
3635 3633 r.dirstate.set_tracked(name)
3636 3634 else:
3637 3635 wctx.copy(patch, name)
3638 3636 wctx.forget([patch])
3639 3637
3640 3638 q.savedirty()
3641 3639
3642 3640
3643 3641 @command(
3644 3642 b"qrestore",
3645 3643 [
3646 3644 (b'd', b'delete', None, _(b'delete save entry')),
3647 3645 (b'u', b'update', None, _(b'update queue working directory')),
3648 3646 ],
3649 3647 _(b'hg qrestore [-d] [-u] REV'),
3650 3648 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3651 3649 )
3652 3650 def restore(ui, repo, rev, **opts):
3653 3651 """restore the queue state saved by a revision (DEPRECATED)
3654 3652
3655 3653 This command is deprecated, use :hg:`rebase` instead."""
3656 3654 rev = repo.lookup(rev)
3657 3655 q = repo.mq
3658 3656 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3659 3657 q.savedirty()
3660 3658 return 0
3661 3659
3662 3660
3663 3661 @command(
3664 3662 b"qsave",
3665 3663 [
3666 3664 (b'c', b'copy', None, _(b'copy patch directory')),
3667 3665 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3668 3666 (b'e', b'empty', None, _(b'clear queue status file')),
3669 3667 (b'f', b'force', None, _(b'force copy')),
3670 3668 ]
3671 3669 + cmdutil.commitopts,
3672 3670 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3673 3671 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3674 3672 )
3675 3673 def save(ui, repo, **opts):
3676 3674 """save current queue state (DEPRECATED)
3677 3675
3678 3676 This command is deprecated, use :hg:`rebase` instead."""
3679 3677 q = repo.mq
3680 3678 opts = pycompat.byteskwargs(opts)
3681 3679 message = cmdutil.logmessage(ui, opts)
3682 3680 ret = q.save(repo, msg=message)
3683 3681 if ret:
3684 3682 return ret
3685 3683 q.savedirty() # save to .hg/patches before copying
3686 3684 if opts.get(b'copy'):
3687 3685 path = q.path
3688 3686 if opts.get(b'name'):
3689 3687 newpath = os.path.join(q.basepath, opts.get(b'name'))
3690 3688 if os.path.exists(newpath):
3691 3689 if not os.path.isdir(newpath):
3692 3690 raise error.Abort(
3693 3691 _(b'destination %s exists and is not a directory')
3694 3692 % newpath
3695 3693 )
3696 3694 if not opts.get(b'force'):
3697 3695 raise error.Abort(
3698 3696 _(b'destination %s exists, use -f to force') % newpath
3699 3697 )
3700 3698 else:
3701 3699 newpath = savename(path)
3702 3700 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3703 3701 util.copyfiles(path, newpath)
3704 3702 if opts.get(b'empty'):
3705 3703 del q.applied[:]
3706 3704 q.applieddirty = True
3707 3705 q.savedirty()
3708 3706 return 0
3709 3707
3710 3708
3711 3709 @command(
3712 3710 b"qselect",
3713 3711 [
3714 3712 (b'n', b'none', None, _(b'disable all guards')),
3715 3713 (b's', b'series', None, _(b'list all guards in series file')),
3716 3714 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3717 3715 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3718 3716 ],
3719 3717 _(b'hg qselect [OPTION]... [GUARD]...'),
3720 3718 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3721 3719 )
3722 3720 def select(ui, repo, *args, **opts):
3723 3721 """set or print guarded patches to push
3724 3722
3725 3723 Use the :hg:`qguard` command to set or print guards on patch, then use
3726 3724 qselect to tell mq which guards to use. A patch will be pushed if
3727 3725 it has no guards or any positive guards match the currently
3728 3726 selected guard, but will not be pushed if any negative guards
3729 3727 match the current guard. For example::
3730 3728
3731 3729 qguard foo.patch -- -stable (negative guard)
3732 3730 qguard bar.patch +stable (positive guard)
3733 3731 qselect stable
3734 3732
3735 3733 This activates the "stable" guard. mq will skip foo.patch (because
3736 3734 it has a negative match) but push bar.patch (because it has a
3737 3735 positive match).
3738 3736
3739 3737 With no arguments, prints the currently active guards.
3740 3738 With one argument, sets the active guard.
3741 3739
3742 3740 Use -n/--none to deactivate guards (no other arguments needed).
3743 3741 When no guards are active, patches with positive guards are
3744 3742 skipped and patches with negative guards are pushed.
3745 3743
3746 3744 qselect can change the guards on applied patches. It does not pop
3747 3745 guarded patches by default. Use --pop to pop back to the last
3748 3746 applied patch that is not guarded. Use --reapply (which implies
3749 3747 --pop) to push back to the current patch afterwards, but skip
3750 3748 guarded patches.
3751 3749
3752 3750 Use -s/--series to print a list of all guards in the series file
3753 3751 (no other arguments needed). Use -v for more information.
3754 3752
3755 3753 Returns 0 on success."""
3756 3754
3757 3755 q = repo.mq
3758 3756 opts = pycompat.byteskwargs(opts)
3759 3757 guards = q.active()
3760 3758 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3761 3759 if args or opts.get(b'none'):
3762 3760 old_unapplied = q.unapplied(repo)
3763 3761 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3764 3762 q.setactive(args)
3765 3763 q.savedirty()
3766 3764 if not args:
3767 3765 ui.status(_(b'guards deactivated\n'))
3768 3766 if not opts.get(b'pop') and not opts.get(b'reapply'):
3769 3767 unapplied = q.unapplied(repo)
3770 3768 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3771 3769 if len(unapplied) != len(old_unapplied):
3772 3770 ui.status(
3773 3771 _(
3774 3772 b'number of unguarded, unapplied patches has '
3775 3773 b'changed from %d to %d\n'
3776 3774 )
3777 3775 % (len(old_unapplied), len(unapplied))
3778 3776 )
3779 3777 if len(guarded) != len(old_guarded):
3780 3778 ui.status(
3781 3779 _(
3782 3780 b'number of guarded, applied patches has changed '
3783 3781 b'from %d to %d\n'
3784 3782 )
3785 3783 % (len(old_guarded), len(guarded))
3786 3784 )
3787 3785 elif opts.get(b'series'):
3788 3786 guards = {}
3789 3787 noguards = 0
3790 3788 for gs in q.seriesguards:
3791 3789 if not gs:
3792 3790 noguards += 1
3793 3791 for g in gs:
3794 3792 guards.setdefault(g, 0)
3795 3793 guards[g] += 1
3796 3794 if ui.verbose:
3797 3795 guards[b'NONE'] = noguards
3798 3796 guards = list(guards.items())
3799 3797 guards.sort(key=lambda x: x[0][1:])
3800 3798 if guards:
3801 3799 ui.note(_(b'guards in series file:\n'))
3802 3800 for guard, count in guards:
3803 3801 ui.note(b'%2d ' % count)
3804 3802 ui.write(guard, b'\n')
3805 3803 else:
3806 3804 ui.note(_(b'no guards in series file\n'))
3807 3805 else:
3808 3806 if guards:
3809 3807 ui.note(_(b'active guards:\n'))
3810 3808 for g in guards:
3811 3809 ui.write(g, b'\n')
3812 3810 else:
3813 3811 ui.write(_(b'no active guards\n'))
3814 3812 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3815 3813 popped = False
3816 3814 if opts.get(b'pop') or opts.get(b'reapply'):
3817 3815 for i in range(len(q.applied)):
3818 3816 if not pushable(i):
3819 3817 ui.status(_(b'popping guarded patches\n'))
3820 3818 popped = True
3821 3819 if i == 0:
3822 3820 q.pop(repo, all=True)
3823 3821 else:
3824 3822 q.pop(repo, q.applied[i - 1].name)
3825 3823 break
3826 3824 if popped:
3827 3825 try:
3828 3826 if reapply:
3829 3827 ui.status(_(b'reapplying unguarded patches\n'))
3830 3828 q.push(repo, reapply)
3831 3829 finally:
3832 3830 q.savedirty()
3833 3831
3834 3832
3835 3833 @command(
3836 3834 b"qfinish",
3837 3835 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3838 3836 _(b'hg qfinish [-a] [REV]...'),
3839 3837 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3840 3838 )
3841 3839 def finish(ui, repo, *revrange, **opts):
3842 3840 """move applied patches into repository history
3843 3841
3844 3842 Finishes the specified revisions (corresponding to applied
3845 3843 patches) by moving them out of mq control into regular repository
3846 3844 history.
3847 3845
3848 3846 Accepts a revision range or the -a/--applied option. If --applied
3849 3847 is specified, all applied mq revisions are removed from mq
3850 3848 control. Otherwise, the given revisions must be at the base of the
3851 3849 stack of applied patches.
3852 3850
3853 3851 This can be especially useful if your changes have been applied to
3854 3852 an upstream repository, or if you are about to push your changes
3855 3853 to upstream.
3856 3854
3857 3855 Returns 0 on success.
3858 3856 """
3859 3857 if not opts.get('applied') and not revrange:
3860 3858 raise error.Abort(_(b'no revisions specified'))
3861 3859 elif opts.get('applied'):
3862 3860 revrange = (b'qbase::qtip',) + revrange
3863 3861
3864 3862 q = repo.mq
3865 3863 if not q.applied:
3866 3864 ui.status(_(b'no patches applied\n'))
3867 3865 return 0
3868 3866
3869 3867 revs = logcmdutil.revrange(repo, revrange)
3870 3868 if repo[b'.'].rev() in revs and repo[None].files():
3871 3869 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3872 3870 # queue.finish may changes phases but leave the responsibility to lock the
3873 3871 # repo to the caller to avoid deadlock with wlock. This command code is
3874 3872 # responsibility for this locking.
3875 3873 with repo.lock():
3876 3874 q.finish(repo, revs)
3877 3875 q.savedirty()
3878 3876 return 0
3879 3877
3880 3878
3881 3879 @command(
3882 3880 b"qqueue",
3883 3881 [
3884 3882 (b'l', b'list', False, _(b'list all available queues')),
3885 3883 (b'', b'active', False, _(b'print name of active queue')),
3886 3884 (b'c', b'create', False, _(b'create new queue')),
3887 3885 (b'', b'rename', False, _(b'rename active queue')),
3888 3886 (b'', b'delete', False, _(b'delete reference to queue')),
3889 3887 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3890 3888 ],
3891 3889 _(b'[OPTION] [QUEUE]'),
3892 3890 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3893 3891 )
3894 3892 def qqueue(ui, repo, name=None, **opts):
3895 3893 """manage multiple patch queues
3896 3894
3897 3895 Supports switching between different patch queues, as well as creating
3898 3896 new patch queues and deleting existing ones.
3899 3897
3900 3898 Omitting a queue name or specifying -l/--list will show you the registered
3901 3899 queues - by default the "normal" patches queue is registered. The currently
3902 3900 active queue will be marked with "(active)". Specifying --active will print
3903 3901 only the name of the active queue.
3904 3902
3905 3903 To create a new queue, use -c/--create. The queue is automatically made
3906 3904 active, except in the case where there are applied patches from the
3907 3905 currently active queue in the repository. Then the queue will only be
3908 3906 created and switching will fail.
3909 3907
3910 3908 To delete an existing queue, use --delete. You cannot delete the currently
3911 3909 active queue.
3912 3910
3913 3911 Returns 0 on success.
3914 3912 """
3915 3913 q = repo.mq
3916 3914 _defaultqueue = b'patches'
3917 3915 _allqueues = b'patches.queues'
3918 3916 _activequeue = b'patches.queue'
3919 3917
3920 3918 def _getcurrent():
3921 3919 cur = os.path.basename(q.path)
3922 3920 if cur.startswith(b'patches-'):
3923 3921 cur = cur[8:]
3924 3922 return cur
3925 3923
3926 3924 def _noqueues():
3927 3925 try:
3928 3926 fh = repo.vfs(_allqueues, b'r')
3929 3927 fh.close()
3930 3928 except IOError:
3931 3929 return True
3932 3930
3933 3931 return False
3934 3932
3935 3933 def _getqueues():
3936 3934 current = _getcurrent()
3937 3935
3938 3936 try:
3939 3937 fh = repo.vfs(_allqueues, b'r')
3940 3938 queues = [queue.strip() for queue in fh if queue.strip()]
3941 3939 fh.close()
3942 3940 if current not in queues:
3943 3941 queues.append(current)
3944 3942 except IOError:
3945 3943 queues = [_defaultqueue]
3946 3944
3947 3945 return sorted(queues)
3948 3946
3949 3947 def _setactive(name):
3950 3948 if q.applied:
3951 3949 raise error.Abort(
3952 3950 _(
3953 3951 b'new queue created, but cannot make active '
3954 3952 b'as patches are applied'
3955 3953 )
3956 3954 )
3957 3955 _setactivenocheck(name)
3958 3956
3959 3957 def _setactivenocheck(name):
3960 3958 fh = repo.vfs(_activequeue, b'w')
3961 3959 if name != b'patches':
3962 3960 fh.write(name)
3963 3961 fh.close()
3964 3962
3965 3963 def _addqueue(name):
3966 3964 fh = repo.vfs(_allqueues, b'a')
3967 3965 fh.write(b'%s\n' % (name,))
3968 3966 fh.close()
3969 3967
3970 3968 def _queuedir(name):
3971 3969 if name == b'patches':
3972 3970 return repo.vfs.join(b'patches')
3973 3971 else:
3974 3972 return repo.vfs.join(b'patches-' + name)
3975 3973
3976 3974 def _validname(name):
3977 3975 for n in name:
3978 3976 if n in b':\\/.':
3979 3977 return False
3980 3978 return True
3981 3979
3982 3980 def _delete(name):
3983 3981 if name not in existing:
3984 3982 raise error.Abort(_(b'cannot delete queue that does not exist'))
3985 3983
3986 3984 current = _getcurrent()
3987 3985
3988 3986 if name == current:
3989 3987 raise error.Abort(_(b'cannot delete currently active queue'))
3990 3988
3991 3989 fh = repo.vfs(b'patches.queues.new', b'w')
3992 3990 for queue in existing:
3993 3991 if queue == name:
3994 3992 continue
3995 3993 fh.write(b'%s\n' % (queue,))
3996 3994 fh.close()
3997 3995 repo.vfs.rename(b'patches.queues.new', _allqueues)
3998 3996
3999 3997 opts = pycompat.byteskwargs(opts)
4000 3998 if not name or opts.get(b'list') or opts.get(b'active'):
4001 3999 current = _getcurrent()
4002 4000 if opts.get(b'active'):
4003 4001 ui.write(b'%s\n' % (current,))
4004 4002 return
4005 4003 for queue in _getqueues():
4006 4004 ui.write(b'%s' % (queue,))
4007 4005 if queue == current and not ui.quiet:
4008 4006 ui.write(_(b' (active)\n'))
4009 4007 else:
4010 4008 ui.write(b'\n')
4011 4009 return
4012 4010
4013 4011 if not _validname(name):
4014 4012 raise error.Abort(
4015 4013 _(b'invalid queue name, may not contain the characters ":\\/."')
4016 4014 )
4017 4015
4018 4016 with repo.wlock():
4019 4017 existing = _getqueues()
4020 4018
4021 4019 if opts.get(b'create'):
4022 4020 if name in existing:
4023 4021 raise error.Abort(_(b'queue "%s" already exists') % name)
4024 4022 if _noqueues():
4025 4023 _addqueue(_defaultqueue)
4026 4024 _addqueue(name)
4027 4025 _setactive(name)
4028 4026 elif opts.get(b'rename'):
4029 4027 current = _getcurrent()
4030 4028 if name == current:
4031 4029 raise error.Abort(
4032 4030 _(b'can\'t rename "%s" to its current name') % name
4033 4031 )
4034 4032 if name in existing:
4035 4033 raise error.Abort(_(b'queue "%s" already exists') % name)
4036 4034
4037 4035 olddir = _queuedir(current)
4038 4036 newdir = _queuedir(name)
4039 4037
4040 4038 if os.path.exists(newdir):
4041 4039 raise error.Abort(
4042 4040 _(b'non-queue directory "%s" already exists') % newdir
4043 4041 )
4044 4042
4045 4043 fh = repo.vfs(b'patches.queues.new', b'w')
4046 4044 for queue in existing:
4047 4045 if queue == current:
4048 4046 fh.write(b'%s\n' % (name,))
4049 4047 if os.path.exists(olddir):
4050 4048 util.rename(olddir, newdir)
4051 4049 else:
4052 4050 fh.write(b'%s\n' % (queue,))
4053 4051 fh.close()
4054 4052 repo.vfs.rename(b'patches.queues.new', _allqueues)
4055 4053 _setactivenocheck(name)
4056 4054 elif opts.get(b'delete'):
4057 4055 _delete(name)
4058 4056 elif opts.get(b'purge'):
4059 4057 if name in existing:
4060 4058 _delete(name)
4061 4059 qdir = _queuedir(name)
4062 4060 if os.path.exists(qdir):
4063 4061 shutil.rmtree(qdir)
4064 4062 else:
4065 4063 if name not in existing:
4066 4064 raise error.Abort(_(b'use --create to create a new queue'))
4067 4065 _setactive(name)
4068 4066
4069 4067
4070 4068 def mqphasedefaults(repo, roots):
4071 4069 """callback used to set mq changeset as secret when no phase data exists"""
4072 4070 if repo.mq.applied:
4073 4071 if repo.ui.configbool(b'mq', b'secret'):
4074 4072 mqphase = phases.secret
4075 4073 else:
4076 4074 mqphase = phases.draft
4077 4075 qbase = repo[repo.mq.applied[0].node]
4078 4076 roots[mqphase].add(qbase.node())
4079 4077 return roots
4080 4078
4081 4079
4082 4080 def reposetup(ui, repo):
4083 4081 class mqrepo(repo.__class__):
4084 4082 @localrepo.unfilteredpropertycache
4085 4083 def mq(self):
4086 4084 return queue(self.ui, self.baseui, self.path)
4087 4085
4088 4086 def invalidateall(self):
4089 4087 super(mqrepo, self).invalidateall()
4090 4088 if localrepo.hasunfilteredcache(self, 'mq'):
4091 4089 # recreate mq in case queue path was changed
4092 4090 delattr(self.unfiltered(), 'mq')
4093 4091
4094 4092 def abortifwdirpatched(self, errmsg, force=False):
4095 4093 if self.mq.applied and self.mq.checkapplied and not force:
4096 4094 parents = self.dirstate.parents()
4097 4095 patches = [s.node for s in self.mq.applied]
4098 4096 if any(p in patches for p in parents):
4099 4097 raise error.Abort(errmsg)
4100 4098
4101 4099 def commit(
4102 4100 self,
4103 4101 text=b"",
4104 4102 user=None,
4105 4103 date=None,
4106 4104 match=None,
4107 4105 force=False,
4108 4106 editor=False,
4109 4107 extra=None,
4110 4108 ):
4111 4109 if extra is None:
4112 4110 extra = {}
4113 4111 self.abortifwdirpatched(
4114 4112 _(b'cannot commit over an applied mq patch'), force
4115 4113 )
4116 4114
4117 4115 return super(mqrepo, self).commit(
4118 4116 text, user, date, match, force, editor, extra
4119 4117 )
4120 4118
4121 4119 def checkpush(self, pushop):
4122 4120 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4123 4121 outapplied = [e.node for e in self.mq.applied]
4124 4122 if pushop.revs:
4125 4123 # Assume applied patches have no non-patch descendants and
4126 4124 # are not on remote already. Filtering any changeset not
4127 4125 # pushed.
4128 4126 heads = set(pushop.revs)
4129 4127 for node in reversed(outapplied):
4130 4128 if node in heads:
4131 4129 break
4132 4130 else:
4133 4131 outapplied.pop()
4134 4132 # looking for pushed and shared changeset
4135 4133 for node in outapplied:
4136 4134 if self[node].phase() < phases.secret:
4137 4135 raise error.Abort(_(b'source has mq patches applied'))
4138 4136 # no non-secret patches pushed
4139 4137 super(mqrepo, self).checkpush(pushop)
4140 4138
4141 4139 def _findtags(self):
4142 4140 '''augment tags from base class with patch tags'''
4143 4141 result = super(mqrepo, self)._findtags()
4144 4142
4145 4143 q = self.mq
4146 4144 if not q.applied:
4147 4145 return result
4148 4146
4149 4147 mqtags = [(patch.node, patch.name) for patch in q.applied]
4150 4148
4151 4149 try:
4152 4150 # for now ignore filtering business
4153 4151 self.unfiltered().changelog.rev(mqtags[-1][0])
4154 4152 except error.LookupError:
4155 4153 self.ui.warn(
4156 4154 _(b'mq status file refers to unknown node %s\n')
4157 4155 % short(mqtags[-1][0])
4158 4156 )
4159 4157 return result
4160 4158
4161 4159 # do not add fake tags for filtered revisions
4162 4160 included = self.changelog.hasnode
4163 4161 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4164 4162 if not mqtags:
4165 4163 return result
4166 4164
4167 4165 mqtags.append((mqtags[-1][0], b'qtip'))
4168 4166 mqtags.append((mqtags[0][0], b'qbase'))
4169 4167 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4170 4168 tags = result[0]
4171 4169 for patch in mqtags:
4172 4170 if patch[1] in tags:
4173 4171 self.ui.warn(
4174 4172 _(b'tag %s overrides mq patch of the same name\n')
4175 4173 % patch[1]
4176 4174 )
4177 4175 else:
4178 4176 tags[patch[1]] = patch[0]
4179 4177
4180 4178 return result
4181 4179
4182 4180 if repo.local():
4183 4181 repo.__class__ = mqrepo
4184 4182
4185 4183 repo._phasedefaults.append(mqphasedefaults)
4186 4184
4187 4185
4188 4186 def mqimport(orig, ui, repo, *args, **kwargs):
4189 4187 if hasattr(repo, 'abortifwdirpatched') and not kwargs.get(
4190 4188 'no_commit', False
4191 4189 ):
4192 4190 repo.abortifwdirpatched(
4193 4191 _(b'cannot import over an applied patch'), kwargs.get('force')
4194 4192 )
4195 4193 return orig(ui, repo, *args, **kwargs)
4196 4194
4197 4195
4198 4196 def mqinit(orig, ui, *args, **kwargs):
4199 4197 mq = kwargs.pop('mq', None)
4200 4198
4201 4199 if not mq:
4202 4200 return orig(ui, *args, **kwargs)
4203 4201
4204 4202 if args:
4205 4203 repopath = args[0]
4206 4204 if not hg.islocal(repopath):
4207 4205 raise error.Abort(
4208 4206 _(b'only a local queue repository may be initialized')
4209 4207 )
4210 4208 else:
4211 4209 repopath = cmdutil.findrepo(encoding.getcwd())
4212 4210 if not repopath:
4213 4211 raise error.Abort(
4214 4212 _(b'there is no Mercurial repository here (.hg not found)')
4215 4213 )
4216 4214 repo = hg.repository(ui, repopath)
4217 4215 return qinit(ui, repo, True)
4218 4216
4219 4217
4220 4218 def mqcommand(orig, ui, repo, *args, **kwargs):
4221 4219 """Add --mq option to operate on patch repository instead of main"""
4222 4220
4223 4221 # some commands do not like getting unknown options
4224 4222 mq = kwargs.pop('mq', None)
4225 4223
4226 4224 if not mq:
4227 4225 return orig(ui, repo, *args, **kwargs)
4228 4226
4229 4227 q = repo.mq
4230 4228 r = q.qrepo()
4231 4229 if not r:
4232 4230 raise error.Abort(_(b'no queue repository'))
4233 4231 return orig(r.ui, r, *args, **kwargs)
4234 4232
4235 4233
4236 4234 def summaryhook(ui, repo):
4237 4235 q = repo.mq
4238 4236 m = []
4239 4237 a, u = len(q.applied), len(q.unapplied(repo))
4240 4238 if a:
4241 4239 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4242 4240 if u:
4243 4241 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4244 4242 if m:
4245 4243 # i18n: column positioning for "hg summary"
4246 4244 ui.write(_(b"mq: %s\n") % b', '.join(m))
4247 4245 else:
4248 4246 # i18n: column positioning for "hg summary"
4249 4247 ui.note(_(b"mq: (empty queue)\n"))
4250 4248
4251 4249
4252 4250 revsetpredicate = registrar.revsetpredicate()
4253 4251
4254 4252
4255 4253 @revsetpredicate(b'mq()')
4256 4254 def revsetmq(repo, subset, x):
4257 4255 """Changesets managed by MQ."""
4258 4256 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4259 4257 applied = {repo[r.node].rev() for r in repo.mq.applied}
4260 4258 return smartset.baseset([r for r in subset if r in applied])
4261 4259
4262 4260
4263 4261 # tell hggettext to extract docstrings from these functions:
4264 4262 i18nfunctions = [revsetmq]
4265 4263
4266 4264
4267 4265 def extsetup(ui):
4268 4266 # Ensure mq wrappers are called first, regardless of extension load order by
4269 4267 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4270 4268 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4271 4269
4272 4270 extensions.wrapcommand(commands.table, b'import', mqimport)
4273 4271 cmdutil.summaryhooks.add(b'mq', summaryhook)
4274 4272
4275 4273 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4276 4274 entry[1].extend(mqopt)
4277 4275
4278 4276 def dotable(cmdtable):
4279 4277 for cmd, entry in cmdtable.items():
4280 4278 cmd = cmdutil.parsealiases(cmd)[0]
4281 4279 func = entry[0]
4282 4280 if func.norepo:
4283 4281 continue
4284 4282 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4285 4283 entry[1].extend(mqopt)
4286 4284
4287 4285 dotable(commands.table)
4288 4286
4289 4287 thismodule = sys.modules["hgext.mq"]
4290 4288 for extname, extmodule in extensions.extensions():
4291 4289 if extmodule != thismodule:
4292 4290 dotable(getattr(extmodule, 'cmdtable', {}))
4293 4291
4294 4292
4295 4293 colortable = {
4296 4294 b'qguard.negative': b'red',
4297 4295 b'qguard.positive': b'yellow',
4298 4296 b'qguard.unguarded': b'green',
4299 4297 b'qseries.applied': b'blue bold underline',
4300 4298 b'qseries.guarded': b'black bold',
4301 4299 b'qseries.missing': b'red bold',
4302 4300 b'qseries.unapplied': b'black bold',
4303 4301 }
@@ -1,2401 +1,2400
1 1 # phabricator.py - simple Phabricator integration
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """simple Phabricator integration (EXPERIMENTAL)
8 8
9 9 This extension provides a ``phabsend`` command which sends a stack of
10 10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 12 to update statuses in batch.
13 13
14 14 A "phabstatus" view for :hg:`show` is also provided; it displays status
15 15 information of Phabricator differentials associated with unfinished
16 16 changesets.
17 17
18 18 By default, Phabricator requires ``Test Plan`` which might prevent some
19 19 changeset from being sent. The requirement could be disabled by changing
20 20 ``differential.require-test-plan-field`` config server side.
21 21
22 22 Config::
23 23
24 24 [phabricator]
25 25 # Phabricator URL
26 26 url = https://phab.example.com/
27 27
28 28 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
29 29 # callsign is "FOO".
30 30 callsign = FOO
31 31
32 32 # curl command to use. If not set (default), use builtin HTTP library to
33 33 # communicate. If set, use the specified curl command. This could be useful
34 34 # if you need to specify advanced options that is not easily supported by
35 35 # the internal library.
36 36 curlcmd = curl --connect-timeout 2 --retry 3 --silent
37 37
38 38 # retry failed command N time (default 0). Useful when using the extension
39 39 # over flakly connection.
40 40 #
41 41 # We wait `retry.interval` between each retry, in seconds.
42 42 # (default 1 second).
43 43 retry = 3
44 44 retry.interval = 10
45 45
46 46 # the retry option can combine well with the http.timeout one.
47 47 #
48 48 # For example to give up on http request after 20 seconds:
49 49 [http]
50 50 timeout=20
51 51
52 52 [auth]
53 53 example.schemes = https
54 54 example.prefix = phab.example.com
55 55
56 56 # API token. Get it from https://$HOST/conduit/login/
57 57 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
58 58 """
59 59
60 60
61 61 import base64
62 62 import contextlib
63 63 import hashlib
64 64 import io
65 65 import itertools
66 66 import json
67 67 import mimetypes
68 68 import operator
69 69 import re
70 70 import time
71 71
72 72 from mercurial.node import bin, short
73 73 from mercurial.i18n import _
74 from mercurial.pycompat import getattr
75 74 from mercurial.thirdparty import attr
76 75 from mercurial import (
77 76 cmdutil,
78 77 context,
79 78 copies,
80 79 encoding,
81 80 error,
82 81 exthelper,
83 82 graphmod,
84 83 httpconnection as httpconnectionmod,
85 84 localrepo,
86 85 logcmdutil,
87 86 match,
88 87 mdiff,
89 88 obsutil,
90 89 parser,
91 90 patch,
92 91 phases,
93 92 pycompat,
94 93 rewriteutil,
95 94 scmutil,
96 95 smartset,
97 96 tags,
98 97 templatefilters,
99 98 templateutil,
100 99 url as urlmod,
101 100 util,
102 101 )
103 102 from mercurial.utils import (
104 103 procutil,
105 104 stringutil,
106 105 urlutil,
107 106 )
108 107 from . import show
109 108
110 109
111 110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 112 # be specifying the version(s) of Mercurial they are tested with, or
114 113 # leave the attribute unspecified.
115 114 testedwith = b'ships-with-hg-core'
116 115
117 116 eh = exthelper.exthelper()
118 117
119 118 cmdtable = eh.cmdtable
120 119 command = eh.command
121 120 configtable = eh.configtable
122 121 templatekeyword = eh.templatekeyword
123 122 uisetup = eh.finaluisetup
124 123
125 124 # developer config: phabricator.batchsize
126 125 eh.configitem(
127 126 b'phabricator',
128 127 b'batchsize',
129 128 default=12,
130 129 )
131 130 eh.configitem(
132 131 b'phabricator',
133 132 b'callsign',
134 133 default=None,
135 134 )
136 135 eh.configitem(
137 136 b'phabricator',
138 137 b'curlcmd',
139 138 default=None,
140 139 )
141 140 # developer config: phabricator.debug
142 141 eh.configitem(
143 142 b'phabricator',
144 143 b'debug',
145 144 default=False,
146 145 )
147 146 # developer config: phabricator.repophid
148 147 eh.configitem(
149 148 b'phabricator',
150 149 b'repophid',
151 150 default=None,
152 151 )
153 152 eh.configitem(
154 153 b'phabricator',
155 154 b'retry',
156 155 default=0,
157 156 )
158 157 eh.configitem(
159 158 b'phabricator',
160 159 b'retry.interval',
161 160 default=1,
162 161 )
163 162 eh.configitem(
164 163 b'phabricator',
165 164 b'url',
166 165 default=None,
167 166 )
168 167 eh.configitem(
169 168 b'phabsend',
170 169 b'confirm',
171 170 default=False,
172 171 )
173 172 eh.configitem(
174 173 b'phabimport',
175 174 b'secret',
176 175 default=False,
177 176 )
178 177 eh.configitem(
179 178 b'phabimport',
180 179 b'obsolete',
181 180 default=False,
182 181 )
183 182
184 183 colortable = {
185 184 b'phabricator.action.created': b'green',
186 185 b'phabricator.action.skipped': b'magenta',
187 186 b'phabricator.action.updated': b'magenta',
188 187 b'phabricator.drev': b'bold',
189 188 b'phabricator.status.abandoned': b'magenta dim',
190 189 b'phabricator.status.accepted': b'green bold',
191 190 b'phabricator.status.closed': b'green',
192 191 b'phabricator.status.needsreview': b'yellow',
193 192 b'phabricator.status.needsrevision': b'red',
194 193 b'phabricator.status.changesplanned': b'red',
195 194 }
196 195
197 196 _VCR_FLAGS = [
198 197 (
199 198 b'',
200 199 b'test-vcr',
201 200 b'',
202 201 _(
203 202 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
204 203 b', otherwise will mock all http requests using the specified vcr file.'
205 204 b' (ADVANCED)'
206 205 ),
207 206 ),
208 207 ]
209 208
210 209
211 210 @eh.wrapfunction(localrepo, "loadhgrc")
212 211 def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
213 212 """Load ``.arcconfig`` content into a ui instance on repository open."""
214 213 result = False
215 214 arcconfig = {}
216 215
217 216 try:
218 217 # json.loads only accepts bytes from 3.6+
219 218 rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
220 219 # json.loads only returns unicode strings
221 220 arcconfig = pycompat.rapply(
222 221 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
223 222 pycompat.json_loads(rawparams),
224 223 )
225 224
226 225 result = True
227 226 except ValueError:
228 227 ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
229 228 except IOError:
230 229 pass
231 230
232 231 cfg = util.sortdict()
233 232
234 233 if b"repository.callsign" in arcconfig:
235 234 cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
236 235
237 236 if b"phabricator.uri" in arcconfig:
238 237 cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
239 238
240 239 if cfg:
241 240 ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
242 241
243 242 return (
244 243 orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
245 244 ) # Load .hg/hgrc
246 245
247 246
248 247 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
249 248 fullflags = flags + _VCR_FLAGS
250 249
251 250 def hgmatcher(r1, r2):
252 251 if r1.uri != r2.uri or r1.method != r2.method:
253 252 return False
254 253 r1params = util.urlreq.parseqs(r1.body)
255 254 r2params = util.urlreq.parseqs(r2.body)
256 255 for key in r1params:
257 256 if key not in r2params:
258 257 return False
259 258 value = r1params[key][0]
260 259 # we want to compare json payloads without worrying about ordering
261 260 if value.startswith(b'{') and value.endswith(b'}'):
262 261 r1json = pycompat.json_loads(value)
263 262 r2json = pycompat.json_loads(r2params[key][0])
264 263 if r1json != r2json:
265 264 return False
266 265 elif r2params[key][0] != value:
267 266 return False
268 267 return True
269 268
270 269 def sanitiserequest(request):
271 270 request.body = re.sub(
272 271 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
273 272 )
274 273 return request
275 274
276 275 def sanitiseresponse(response):
277 276 if 'set-cookie' in response['headers']:
278 277 del response['headers']['set-cookie']
279 278 return response
280 279
281 280 def decorate(fn):
282 281 def inner(*args, **kwargs):
283 282 vcr = kwargs.pop('test_vcr')
284 283 if vcr:
285 284 cassette = pycompat.fsdecode(vcr)
286 285 import hgdemandimport
287 286
288 287 with hgdemandimport.deactivated():
289 288 # pytype: disable=import-error
290 289 import vcr as vcrmod
291 290 import vcr.stubs as stubs
292 291
293 292 # pytype: enable=import-error
294 293
295 294 vcr = vcrmod.VCR(
296 295 serializer='json',
297 296 before_record_request=sanitiserequest,
298 297 before_record_response=sanitiseresponse,
299 298 custom_patches=[
300 299 (
301 300 urlmod,
302 301 'httpconnection',
303 302 stubs.VCRHTTPConnection,
304 303 ),
305 304 (
306 305 urlmod,
307 306 'httpsconnection',
308 307 stubs.VCRHTTPSConnection,
309 308 ),
310 309 ],
311 310 )
312 311 vcr.register_matcher('hgmatcher', hgmatcher)
313 312 with vcr.use_cassette(cassette, match_on=['hgmatcher']):
314 313 return fn(*args, **kwargs)
315 314 return fn(*args, **kwargs)
316 315
317 316 cmd = util.checksignature(inner, depth=2)
318 317 cmd.__name__ = fn.__name__
319 318 cmd.__doc__ = fn.__doc__
320 319
321 320 return command(
322 321 name,
323 322 fullflags,
324 323 spec,
325 324 helpcategory=helpcategory,
326 325 optionalrepo=optionalrepo,
327 326 )(cmd)
328 327
329 328 return decorate
330 329
331 330
332 331 def _debug(ui, *msg, **opts):
333 332 """write debug output for Phabricator if ``phabricator.debug`` is set
334 333
335 334 Specifically, this avoids dumping Conduit and HTTP auth chatter that is
336 335 printed with the --debug argument.
337 336 """
338 337 if ui.configbool(b"phabricator", b"debug"):
339 338 flag = ui.debugflag
340 339 try:
341 340 ui.debugflag = True
342 341 ui.write(*msg, **opts)
343 342 finally:
344 343 ui.debugflag = flag
345 344
346 345
347 346 def urlencodenested(params):
348 347 """like urlencode, but works with nested parameters.
349 348
350 349 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
351 350 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
352 351 urlencode. Note: the encoding is consistent with PHP's http_build_query.
353 352 """
354 353 flatparams = util.sortdict()
355 354
356 355 def process(prefix: bytes, obj):
357 356 if isinstance(obj, bool):
358 357 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
359 358 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
360 359 # .items() will only be called for a dict type
361 360 # pytype: disable=attribute-error
362 361 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
363 362 # pytype: enable=attribute-error
364 363 if items is None:
365 364 flatparams[prefix] = obj
366 365 else:
367 366 for k, v in items(obj):
368 367 if prefix:
369 368 process(b'%s[%s]' % (prefix, k), v)
370 369 else:
371 370 process(k, v)
372 371
373 372 process(b'', params)
374 373 return urlutil.urlreq.urlencode(flatparams)
375 374
376 375
377 376 def readurltoken(ui):
378 377 """return conduit url, token and make sure they exist
379 378
380 379 Currently read from [auth] config section. In the future, it might
381 380 make sense to read from .arcconfig and .arcrc as well.
382 381 """
383 382 url = ui.config(b'phabricator', b'url')
384 383 if not url:
385 384 raise error.Abort(
386 385 _(b'config %s.%s is required') % (b'phabricator', b'url')
387 386 )
388 387
389 388 res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
390 389 token = None
391 390
392 391 if res:
393 392 group, auth = res
394 393
395 394 ui.debug(b"using auth.%s.* for authentication\n" % group)
396 395
397 396 token = auth.get(b'phabtoken')
398 397
399 398 if not token:
400 399 raise error.Abort(
401 400 _(b'Can\'t find conduit token associated to %s') % (url,)
402 401 )
403 402
404 403 return url, token
405 404
406 405
407 406 def callconduit(ui, name, params):
408 407 """call Conduit API, params is a dict. return json.loads result, or None"""
409 408 host, token = readurltoken(ui)
410 409 url, authinfo = urlutil.url(b'/'.join([host, b'api', name])).authinfo()
411 410 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
412 411 params = params.copy()
413 412 params[b'__conduit__'] = {
414 413 b'token': token,
415 414 }
416 415 rawdata = {
417 416 b'params': templatefilters.json(params),
418 417 b'output': b'json',
419 418 b'__conduit__': 1,
420 419 }
421 420 data = urlencodenested(rawdata)
422 421 curlcmd = ui.config(b'phabricator', b'curlcmd')
423 422 if curlcmd:
424 423 sin, sout = procutil.popen2(
425 424 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
426 425 )
427 426 sin.write(data)
428 427 sin.close()
429 428 body = sout.read()
430 429 else:
431 430 urlopener = urlmod.opener(ui, authinfo)
432 431 request = util.urlreq.request(pycompat.strurl(url), data=data)
433 432 max_try = ui.configint(b'phabricator', b'retry') + 1
434 433 timeout = ui.configwith(float, b'http', b'timeout')
435 434 for try_count in range(max_try):
436 435 try:
437 436 with contextlib.closing(
438 437 urlopener.open(request, timeout=timeout)
439 438 ) as rsp:
440 439 body = rsp.read()
441 440 break
442 441 except util.urlerr.urlerror as err:
443 442 if try_count == max_try - 1:
444 443 raise
445 444 ui.debug(
446 445 b'Conduit Request failed (try %d/%d): %r\n'
447 446 % (try_count + 1, max_try, err)
448 447 )
449 448 # failing request might come from overloaded server
450 449 retry_interval = ui.configint(b'phabricator', b'retry.interval')
451 450 time.sleep(retry_interval)
452 451 ui.debug(b'Conduit Response: %s\n' % body)
453 452 parsed = pycompat.rapply(
454 453 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
455 454 # json.loads only accepts bytes from py3.6+
456 455 pycompat.json_loads(encoding.unifromlocal(body)),
457 456 )
458 457 if parsed.get(b'error_code'):
459 458 msg = _(b'Conduit Error (%s): %s') % (
460 459 parsed[b'error_code'],
461 460 parsed[b'error_info'],
462 461 )
463 462 raise error.Abort(msg)
464 463 return parsed[b'result']
465 464
466 465
467 466 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
468 467 def debugcallconduit(ui, repo, name):
469 468 """call Conduit API
470 469
471 470 Call parameters are read from stdin as a JSON blob. Result will be written
472 471 to stdout as a JSON blob.
473 472 """
474 473 # json.loads only accepts bytes from 3.6+
475 474 rawparams = encoding.unifromlocal(ui.fin.read())
476 475 # json.loads only returns unicode strings
477 476 params = pycompat.rapply(
478 477 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
479 478 pycompat.json_loads(rawparams),
480 479 )
481 480 # json.dumps only accepts unicode strings
482 481 result = pycompat.rapply(
483 482 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
484 483 callconduit(ui, name, params),
485 484 )
486 485 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
487 486 ui.write(b'%s\n' % encoding.unitolocal(s))
488 487
489 488
490 489 def getrepophid(repo):
491 490 """given callsign, return repository PHID or None"""
492 491 # developer config: phabricator.repophid
493 492 repophid = repo.ui.config(b'phabricator', b'repophid')
494 493 if repophid:
495 494 return repophid
496 495 callsign = repo.ui.config(b'phabricator', b'callsign')
497 496 if not callsign:
498 497 return None
499 498 query = callconduit(
500 499 repo.ui,
501 500 b'diffusion.repository.search',
502 501 {b'constraints': {b'callsigns': [callsign]}},
503 502 )
504 503 if len(query[b'data']) == 0:
505 504 return None
506 505 repophid = query[b'data'][0][b'phid']
507 506 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
508 507 return repophid
509 508
510 509
511 510 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
512 511 _differentialrevisiondescre = re.compile(
513 512 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
514 513 )
515 514
516 515
517 516 def getoldnodedrevmap(repo, nodelist):
518 517 """find previous nodes that has been sent to Phabricator
519 518
520 519 return {node: (oldnode, Differential diff, Differential Revision ID)}
521 520 for node in nodelist with known previous sent versions, or associated
522 521 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
523 522 be ``None``.
524 523
525 524 Examines commit messages like "Differential Revision:" to get the
526 525 association information.
527 526
528 527 If such commit message line is not found, examines all precursors and their
529 528 tags. Tags with format like "D1234" are considered a match and the node
530 529 with that tag, and the number after "D" (ex. 1234) will be returned.
531 530
532 531 The ``old node``, if not None, is guaranteed to be the last diff of
533 532 corresponding Differential Revision, and exist in the repo.
534 533 """
535 534 unfi = repo.unfiltered()
536 535 has_node = unfi.changelog.index.has_node
537 536
538 537 result = {} # {node: (oldnode?, lastdiff?, drev)}
539 538 # ordered for test stability when printing new -> old mapping below
540 539 toconfirm = util.sortdict() # {node: (force, {precnode}, drev)}
541 540 for node in nodelist:
542 541 ctx = unfi[node]
543 542 # For tags like "D123", put them into "toconfirm" to verify later
544 543 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
545 544 for n in precnodes:
546 545 if has_node(n):
547 546 for tag in unfi.nodetags(n):
548 547 m = _differentialrevisiontagre.match(tag)
549 548 if m:
550 549 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
551 550 break
552 551 else:
553 552 continue # move to next predecessor
554 553 break # found a tag, stop
555 554 else:
556 555 # Check commit message
557 556 m = _differentialrevisiondescre.search(ctx.description())
558 557 if m:
559 558 toconfirm[node] = (1, set(precnodes), int(m.group('id')))
560 559
561 560 # Double check if tags are genuine by collecting all old nodes from
562 561 # Phabricator, and expect precursors overlap with it.
563 562 if toconfirm:
564 563 drevs = [drev for force, precs, drev in toconfirm.values()]
565 564 alldiffs = callconduit(
566 565 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
567 566 )
568 567
569 568 def getnodes(d, precset):
570 569 # Ignore other nodes that were combined into the Differential
571 570 # that aren't predecessors of the current local node.
572 571 return [n for n in getlocalcommits(d) if n in precset]
573 572
574 573 for newnode, (force, precset, drev) in toconfirm.items():
575 574 diffs = [
576 575 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
577 576 ]
578 577
579 578 # local predecessors known by Phabricator
580 579 phprecset = {n for d in diffs for n in getnodes(d, precset)}
581 580
582 581 # Ignore if precursors (Phabricator and local repo) do not overlap,
583 582 # and force is not set (when commit message says nothing)
584 583 if not force and not phprecset:
585 584 tagname = b'D%d' % drev
586 585 tags.tag(
587 586 repo,
588 587 tagname,
589 588 repo.nullid,
590 589 message=None,
591 590 user=None,
592 591 date=None,
593 592 local=True,
594 593 )
595 594 unfi.ui.warn(
596 595 _(
597 596 b'D%d: local tag removed - does not match '
598 597 b'Differential history\n'
599 598 )
600 599 % drev
601 600 )
602 601 continue
603 602
604 603 # Find the last node using Phabricator metadata, and make sure it
605 604 # exists in the repo
606 605 oldnode = lastdiff = None
607 606 if diffs:
608 607 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
609 608 oldnodes = getnodes(lastdiff, precset)
610 609
611 610 _debug(
612 611 unfi.ui,
613 612 b"%s mapped to old nodes %s\n"
614 613 % (
615 614 short(newnode),
616 615 stringutil.pprint([short(n) for n in sorted(oldnodes)]),
617 616 ),
618 617 )
619 618
620 619 # If this commit was the result of `hg fold` after submission,
621 620 # and now resubmitted with --fold, the easiest thing to do is
622 621 # to leave the node clear. This only results in creating a new
623 622 # diff for the _same_ Differential Revision if this commit is
624 623 # the first or last in the selected range. If we picked a node
625 624 # from the list instead, it would have to be the lowest if at
626 625 # the beginning of the --fold range, or the highest at the end.
627 626 # Otherwise, one or more of the nodes wouldn't be considered in
628 627 # the diff, and the Differential wouldn't be properly updated.
629 628 # If this commit is the result of `hg split` in the same
630 629 # scenario, there is a single oldnode here (and multiple
631 630 # newnodes mapped to it). That makes it the same as the normal
632 631 # case, as the edges of the newnode range cleanly maps to one
633 632 # oldnode each.
634 633 if len(oldnodes) == 1:
635 634 oldnode = oldnodes[0]
636 635 if oldnode and not has_node(oldnode):
637 636 oldnode = None
638 637
639 638 result[newnode] = (oldnode, lastdiff, drev)
640 639
641 640 return result
642 641
643 642
644 643 def getdrevmap(repo, revs):
645 644 """Return a dict mapping each rev in `revs` to their Differential Revision
646 645 ID or None.
647 646 """
648 647 result = {}
649 648 for rev in revs:
650 649 result[rev] = None
651 650 ctx = repo[rev]
652 651 # Check commit message
653 652 m = _differentialrevisiondescre.search(ctx.description())
654 653 if m:
655 654 result[rev] = int(m.group('id'))
656 655 continue
657 656 # Check tags
658 657 for tag in repo.nodetags(ctx.node()):
659 658 m = _differentialrevisiontagre.match(tag)
660 659 if m:
661 660 result[rev] = int(m.group(1))
662 661 break
663 662
664 663 return result
665 664
666 665
667 666 def getdiff(basectx, ctx, diffopts):
668 667 """plain-text diff without header (user, commit message, etc)"""
669 668 output = util.stringio()
670 669 for chunk, _label in patch.diffui(
671 670 ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts
672 671 ):
673 672 output.write(chunk)
674 673 return output.getvalue()
675 674
676 675
677 676 class DiffChangeType:
678 677 ADD = 1
679 678 CHANGE = 2
680 679 DELETE = 3
681 680 MOVE_AWAY = 4
682 681 COPY_AWAY = 5
683 682 MOVE_HERE = 6
684 683 COPY_HERE = 7
685 684 MULTICOPY = 8
686 685
687 686
688 687 class DiffFileType:
689 688 TEXT = 1
690 689 IMAGE = 2
691 690 BINARY = 3
692 691
693 692
694 693 @attr.s
695 694 class phabhunk(dict):
696 695 """Represents a Differential hunk, which is owned by a Differential change"""
697 696
698 697 oldOffset = attr.ib(default=0) # camelcase-required
699 698 oldLength = attr.ib(default=0) # camelcase-required
700 699 newOffset = attr.ib(default=0) # camelcase-required
701 700 newLength = attr.ib(default=0) # camelcase-required
702 701 corpus = attr.ib(default='')
703 702 # These get added to the phabchange's equivalents
704 703 addLines = attr.ib(default=0) # camelcase-required
705 704 delLines = attr.ib(default=0) # camelcase-required
706 705
707 706
708 707 @attr.s
709 708 class phabchange:
710 709 """Represents a Differential change, owns Differential hunks and owned by a
711 710 Differential diff. Each one represents one file in a diff.
712 711 """
713 712
714 713 currentPath = attr.ib(default=None) # camelcase-required
715 714 oldPath = attr.ib(default=None) # camelcase-required
716 715 awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required
717 716 metadata = attr.ib(default=attr.Factory(dict))
718 717 oldProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
719 718 newProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
720 719 type = attr.ib(default=DiffChangeType.CHANGE)
721 720 fileType = attr.ib(default=DiffFileType.TEXT) # camelcase-required
722 721 commitHash = attr.ib(default=None) # camelcase-required
723 722 addLines = attr.ib(default=0) # camelcase-required
724 723 delLines = attr.ib(default=0) # camelcase-required
725 724 hunks = attr.ib(default=attr.Factory(list))
726 725
727 726 def copynewmetadatatoold(self):
728 727 for key in list(self.metadata.keys()):
729 728 newkey = key.replace(b'new:', b'old:')
730 729 self.metadata[newkey] = self.metadata[key]
731 730
732 731 def addoldmode(self, value):
733 732 self.oldProperties[b'unix:filemode'] = value
734 733
735 734 def addnewmode(self, value):
736 735 self.newProperties[b'unix:filemode'] = value
737 736
738 737 def addhunk(self, hunk):
739 738 if not isinstance(hunk, phabhunk):
740 739 raise error.Abort(b'phabchange.addhunk only takes phabhunks')
741 740 self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
742 741 # It's useful to include these stats since the Phab web UI shows them,
743 742 # and uses them to estimate how large a change a Revision is. Also used
744 743 # in email subjects for the [+++--] bit.
745 744 self.addLines += hunk.addLines
746 745 self.delLines += hunk.delLines
747 746
748 747
749 748 @attr.s
750 749 class phabdiff:
751 750 """Represents a Differential diff, owns Differential changes. Corresponds
752 751 to a commit.
753 752 """
754 753
755 754 # Doesn't seem to be any reason to send this (output of uname -n)
756 755 sourceMachine = attr.ib(default=b'') # camelcase-required
757 756 sourcePath = attr.ib(default=b'/') # camelcase-required
758 757 sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required
759 758 sourceControlPath = attr.ib(default=b'/') # camelcase-required
760 759 sourceControlSystem = attr.ib(default=b'hg') # camelcase-required
761 760 branch = attr.ib(default=b'default')
762 761 bookmark = attr.ib(default=None)
763 762 creationMethod = attr.ib(default=b'phabsend') # camelcase-required
764 763 lintStatus = attr.ib(default=b'none') # camelcase-required
765 764 unitStatus = attr.ib(default=b'none') # camelcase-required
766 765 changes = attr.ib(default=attr.Factory(dict))
767 766 repositoryPHID = attr.ib(default=None) # camelcase-required
768 767
769 768 def addchange(self, change):
770 769 if not isinstance(change, phabchange):
771 770 raise error.Abort(b'phabdiff.addchange only takes phabchanges')
772 771 self.changes[change.currentPath] = pycompat.byteskwargs(
773 772 attr.asdict(change)
774 773 )
775 774
776 775
777 776 def maketext(pchange, basectx, ctx, fname):
778 777 """populate the phabchange for a text file"""
779 778 repo = ctx.repo()
780 779 fmatcher = match.exact([fname])
781 780 diffopts = mdiff.diffopts(git=True, context=32767)
782 781 _pfctx, _fctx, header, fhunks = next(
783 782 patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts)
784 783 )
785 784
786 785 for fhunk in fhunks:
787 786 (oldOffset, oldLength, newOffset, newLength), lines = fhunk
788 787 corpus = b''.join(lines[1:])
789 788 shunk = list(header)
790 789 shunk.extend(lines)
791 790 _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
792 791 patch.diffstatdata(util.iterlines(shunk))
793 792 )
794 793 pchange.addhunk(
795 794 phabhunk(
796 795 oldOffset,
797 796 oldLength,
798 797 newOffset,
799 798 newLength,
800 799 corpus,
801 800 addLines,
802 801 delLines,
803 802 )
804 803 )
805 804
806 805
807 806 def uploadchunks(fctx, fphid):
808 807 """upload large binary files as separate chunks.
809 808 Phab requests chunking over 8MiB, and splits into 4MiB chunks
810 809 """
811 810 ui = fctx.repo().ui
812 811 chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
813 812 with ui.makeprogress(
814 813 _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
815 814 ) as progress:
816 815 for chunk in chunks:
817 816 progress.increment()
818 817 if chunk[b'complete']:
819 818 continue
820 819 bstart = int(chunk[b'byteStart'])
821 820 bend = int(chunk[b'byteEnd'])
822 821 callconduit(
823 822 ui,
824 823 b'file.uploadchunk',
825 824 {
826 825 b'filePHID': fphid,
827 826 b'byteStart': bstart,
828 827 b'data': base64.b64encode(fctx.data()[bstart:bend]),
829 828 b'dataEncoding': b'base64',
830 829 },
831 830 )
832 831
833 832
834 833 def uploadfile(fctx):
835 834 """upload binary files to Phabricator"""
836 835 repo = fctx.repo()
837 836 ui = repo.ui
838 837 fname = fctx.path()
839 838 size = fctx.size()
840 839 fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
841 840
842 841 # an allocate call is required first to see if an upload is even required
843 842 # (Phab might already have it) and to determine if chunking is needed
844 843 allocateparams = {
845 844 b'name': fname,
846 845 b'contentLength': size,
847 846 b'contentHash': fhash,
848 847 }
849 848 filealloc = callconduit(ui, b'file.allocate', allocateparams)
850 849 fphid = filealloc[b'filePHID']
851 850
852 851 if filealloc[b'upload']:
853 852 ui.write(_(b'uploading %s\n') % bytes(fctx))
854 853 if not fphid:
855 854 uploadparams = {
856 855 b'name': fname,
857 856 b'data_base64': base64.b64encode(fctx.data()),
858 857 }
859 858 fphid = callconduit(ui, b'file.upload', uploadparams)
860 859 else:
861 860 uploadchunks(fctx, fphid)
862 861 else:
863 862 ui.debug(b'server already has %s\n' % bytes(fctx))
864 863
865 864 if not fphid:
866 865 raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
867 866
868 867 return fphid
869 868
870 869
871 870 def addoldbinary(pchange, oldfctx, fctx):
872 871 """add the metadata for the previous version of a binary file to the
873 872 phabchange for the new version
874 873
875 874 ``oldfctx`` is the previous version of the file; ``fctx`` is the new
876 875 version of the file, or None if the file is being removed.
877 876 """
878 877 if not fctx or fctx.cmp(oldfctx):
879 878 # Files differ, add the old one
880 879 pchange.metadata[b'old:file:size'] = oldfctx.size()
881 880 mimeguess, _enc = mimetypes.guess_type(
882 881 encoding.unifromlocal(oldfctx.path())
883 882 )
884 883 if mimeguess:
885 884 pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
886 885 mimeguess
887 886 )
888 887 fphid = uploadfile(oldfctx)
889 888 pchange.metadata[b'old:binary-phid'] = fphid
890 889 else:
891 890 # If it's left as IMAGE/BINARY web UI might try to display it
892 891 pchange.fileType = DiffFileType.TEXT
893 892 pchange.copynewmetadatatoold()
894 893
895 894
896 895 def makebinary(pchange, fctx):
897 896 """populate the phabchange for a binary file"""
898 897 pchange.fileType = DiffFileType.BINARY
899 898 fphid = uploadfile(fctx)
900 899 pchange.metadata[b'new:binary-phid'] = fphid
901 900 pchange.metadata[b'new:file:size'] = fctx.size()
902 901 mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
903 902 if mimeguess:
904 903 mimeguess = pycompat.bytestr(mimeguess)
905 904 pchange.metadata[b'new:file:mime-type'] = mimeguess
906 905 if mimeguess.startswith(b'image/'):
907 906 pchange.fileType = DiffFileType.IMAGE
908 907
909 908
910 909 # Copied from mercurial/patch.py
911 910 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
912 911
913 912
914 913 def notutf8(fctx):
915 914 """detect non-UTF-8 text files since Phabricator requires them to be marked
916 915 as binary
917 916 """
918 917 try:
919 918 fctx.data().decode('utf-8')
920 919 return False
921 920 except UnicodeDecodeError:
922 921 fctx.repo().ui.write(
923 922 _(b'file %s detected as non-UTF-8, marked as binary\n')
924 923 % fctx.path()
925 924 )
926 925 return True
927 926
928 927
929 928 def addremoved(pdiff, basectx, ctx, removed):
930 929 """add removed files to the phabdiff. Shouldn't include moves"""
931 930 for fname in removed:
932 931 pchange = phabchange(
933 932 currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
934 933 )
935 934 oldfctx = basectx.p1()[fname]
936 935 pchange.addoldmode(gitmode[oldfctx.flags()])
937 936 if not (oldfctx.isbinary() or notutf8(oldfctx)):
938 937 maketext(pchange, basectx, ctx, fname)
939 938
940 939 pdiff.addchange(pchange)
941 940
942 941
943 942 def addmodified(pdiff, basectx, ctx, modified):
944 943 """add modified files to the phabdiff"""
945 944 for fname in modified:
946 945 fctx = ctx[fname]
947 946 oldfctx = basectx.p1()[fname]
948 947 pchange = phabchange(currentPath=fname, oldPath=fname)
949 948 filemode = gitmode[fctx.flags()]
950 949 originalmode = gitmode[oldfctx.flags()]
951 950 if filemode != originalmode:
952 951 pchange.addoldmode(originalmode)
953 952 pchange.addnewmode(filemode)
954 953
955 954 if (
956 955 fctx.isbinary()
957 956 or notutf8(fctx)
958 957 or oldfctx.isbinary()
959 958 or notutf8(oldfctx)
960 959 ):
961 960 makebinary(pchange, fctx)
962 961 addoldbinary(pchange, oldfctx, fctx)
963 962 else:
964 963 maketext(pchange, basectx, ctx, fname)
965 964
966 965 pdiff.addchange(pchange)
967 966
968 967
969 968 def addadded(pdiff, basectx, ctx, added, removed):
970 969 """add file adds to the phabdiff, both new files and copies/moves"""
971 970 # Keep track of files that've been recorded as moved/copied, so if there are
972 971 # additional copies we can mark them (moves get removed from removed)
973 972 copiedchanges = {}
974 973 movedchanges = {}
975 974
976 975 copy = {}
977 976 if basectx != ctx:
978 977 copy = copies.pathcopies(basectx.p1(), ctx)
979 978
980 979 for fname in added:
981 980 fctx = ctx[fname]
982 981 oldfctx = None
983 982 pchange = phabchange(currentPath=fname)
984 983
985 984 filemode = gitmode[fctx.flags()]
986 985
987 986 if copy:
988 987 originalfname = copy.get(fname, fname)
989 988 else:
990 989 originalfname = fname
991 990 if fctx.renamed():
992 991 originalfname = fctx.renamed()[0]
993 992
994 993 renamed = fname != originalfname
995 994
996 995 if renamed:
997 996 oldfctx = basectx.p1()[originalfname]
998 997 originalmode = gitmode[oldfctx.flags()]
999 998 pchange.oldPath = originalfname
1000 999
1001 1000 if originalfname in removed:
1002 1001 origpchange = phabchange(
1003 1002 currentPath=originalfname,
1004 1003 oldPath=originalfname,
1005 1004 type=DiffChangeType.MOVE_AWAY,
1006 1005 awayPaths=[fname],
1007 1006 )
1008 1007 movedchanges[originalfname] = origpchange
1009 1008 removed.remove(originalfname)
1010 1009 pchange.type = DiffChangeType.MOVE_HERE
1011 1010 elif originalfname in movedchanges:
1012 1011 movedchanges[originalfname].type = DiffChangeType.MULTICOPY
1013 1012 movedchanges[originalfname].awayPaths.append(fname)
1014 1013 pchange.type = DiffChangeType.COPY_HERE
1015 1014 else: # pure copy
1016 1015 if originalfname not in copiedchanges:
1017 1016 origpchange = phabchange(
1018 1017 currentPath=originalfname, type=DiffChangeType.COPY_AWAY
1019 1018 )
1020 1019 copiedchanges[originalfname] = origpchange
1021 1020 else:
1022 1021 origpchange = copiedchanges[originalfname]
1023 1022 origpchange.awayPaths.append(fname)
1024 1023 pchange.type = DiffChangeType.COPY_HERE
1025 1024
1026 1025 if filemode != originalmode:
1027 1026 pchange.addoldmode(originalmode)
1028 1027 pchange.addnewmode(filemode)
1029 1028 else: # Brand-new file
1030 1029 pchange.addnewmode(gitmode[fctx.flags()])
1031 1030 pchange.type = DiffChangeType.ADD
1032 1031
1033 1032 if (
1034 1033 fctx.isbinary()
1035 1034 or notutf8(fctx)
1036 1035 or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
1037 1036 ):
1038 1037 makebinary(pchange, fctx)
1039 1038 if renamed:
1040 1039 addoldbinary(pchange, oldfctx, fctx)
1041 1040 else:
1042 1041 maketext(pchange, basectx, ctx, fname)
1043 1042
1044 1043 pdiff.addchange(pchange)
1045 1044
1046 1045 for _path, copiedchange in copiedchanges.items():
1047 1046 pdiff.addchange(copiedchange)
1048 1047 for _path, movedchange in movedchanges.items():
1049 1048 pdiff.addchange(movedchange)
1050 1049
1051 1050
1052 1051 def creatediff(basectx, ctx):
1053 1052 """create a Differential Diff"""
1054 1053 repo = ctx.repo()
1055 1054 repophid = getrepophid(repo)
1056 1055 # Create a "Differential Diff" via "differential.creatediff" API
1057 1056 pdiff = phabdiff(
1058 1057 sourceControlBaseRevision=b'%s' % basectx.p1().hex(),
1059 1058 branch=b'%s' % ctx.branch(),
1060 1059 )
1061 1060 modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx)
1062 1061 # addadded will remove moved files from removed, so addremoved won't get
1063 1062 # them
1064 1063 addadded(pdiff, basectx, ctx, added, removed)
1065 1064 addmodified(pdiff, basectx, ctx, modified)
1066 1065 addremoved(pdiff, basectx, ctx, removed)
1067 1066 if repophid:
1068 1067 pdiff.repositoryPHID = repophid
1069 1068 diff = callconduit(
1070 1069 repo.ui,
1071 1070 b'differential.creatediff',
1072 1071 pycompat.byteskwargs(attr.asdict(pdiff)),
1073 1072 )
1074 1073 if not diff:
1075 1074 if basectx != ctx:
1076 1075 msg = _(b'cannot create diff for %s::%s') % (basectx, ctx)
1077 1076 else:
1078 1077 msg = _(b'cannot create diff for %s') % ctx
1079 1078 raise error.Abort(msg)
1080 1079 return diff
1081 1080
1082 1081
1083 1082 def writediffproperties(ctxs, diff):
1084 1083 """write metadata to diff so patches could be applied losslessly
1085 1084
1086 1085 ``ctxs`` is the list of commits that created the diff, in ascending order.
1087 1086 The list is generally a single commit, but may be several when using
1088 1087 ``phabsend --fold``.
1089 1088 """
1090 1089 # creatediff returns with a diffid but query returns with an id
1091 1090 diffid = diff.get(b'diffid', diff.get(b'id'))
1092 1091 basectx = ctxs[0]
1093 1092 tipctx = ctxs[-1]
1094 1093
1095 1094 params = {
1096 1095 b'diff_id': diffid,
1097 1096 b'name': b'hg:meta',
1098 1097 b'data': templatefilters.json(
1099 1098 {
1100 1099 b'user': tipctx.user(),
1101 1100 b'date': b'%d %d' % tipctx.date(),
1102 1101 b'branch': tipctx.branch(),
1103 1102 b'node': tipctx.hex(),
1104 1103 b'parent': basectx.p1().hex(),
1105 1104 }
1106 1105 ),
1107 1106 }
1108 1107 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1109 1108
1110 1109 commits = {}
1111 1110 for ctx in ctxs:
1112 1111 commits[ctx.hex()] = {
1113 1112 b'author': stringutil.person(ctx.user()),
1114 1113 b'authorEmail': stringutil.email(ctx.user()),
1115 1114 b'time': int(ctx.date()[0]),
1116 1115 b'commit': ctx.hex(),
1117 1116 b'parents': [ctx.p1().hex()],
1118 1117 b'branch': ctx.branch(),
1119 1118 }
1120 1119 params = {
1121 1120 b'diff_id': diffid,
1122 1121 b'name': b'local:commits',
1123 1122 b'data': templatefilters.json(commits),
1124 1123 }
1125 1124 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1126 1125
1127 1126
1128 1127 def createdifferentialrevision(
1129 1128 ctxs,
1130 1129 revid=None,
1131 1130 parentrevphid=None,
1132 1131 oldbasenode=None,
1133 1132 oldnode=None,
1134 1133 olddiff=None,
1135 1134 actions=None,
1136 1135 comment=None,
1137 1136 ):
1138 1137 """create or update a Differential Revision
1139 1138
1140 1139 If revid is None, create a new Differential Revision, otherwise update
1141 1140 revid. If parentrevphid is not None, set it as a dependency.
1142 1141
1143 1142 If there is a single commit for the new Differential Revision, ``ctxs`` will
1144 1143 be a list of that single context. Otherwise, it is a list that covers the
1145 1144 range of changes for the differential, where ``ctxs[0]`` is the first change
1146 1145 to include and ``ctxs[-1]`` is the last.
1147 1146
1148 1147 If oldnode is not None, check if the patch content (without commit message
1149 1148 and metadata) has changed before creating another diff. For a Revision with
1150 1149 a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a
1151 1150 Revision covering multiple commits, ``oldbasenode`` corresponds to
1152 1151 ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode``
1153 1152 corresponds to ``ctxs[-1]``.
1154 1153
1155 1154 If actions is not None, they will be appended to the transaction.
1156 1155 """
1157 1156 ctx = ctxs[-1]
1158 1157 basectx = ctxs[0]
1159 1158
1160 1159 repo = ctx.repo()
1161 1160 if oldnode:
1162 1161 diffopts = mdiff.diffopts(git=True, context=32767)
1163 1162 unfi = repo.unfiltered()
1164 1163 oldctx = unfi[oldnode]
1165 1164 oldbasectx = unfi[oldbasenode]
1166 1165 neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff(
1167 1166 oldbasectx, oldctx, diffopts
1168 1167 )
1169 1168 else:
1170 1169 neednewdiff = True
1171 1170
1172 1171 transactions = []
1173 1172 if neednewdiff:
1174 1173 diff = creatediff(basectx, ctx)
1175 1174 transactions.append({b'type': b'update', b'value': diff[b'phid']})
1176 1175 if comment:
1177 1176 transactions.append({b'type': b'comment', b'value': comment})
1178 1177 else:
1179 1178 # Even if we don't need to upload a new diff because the patch content
1180 1179 # does not change. We might still need to update its metadata so
1181 1180 # pushers could know the correct node metadata.
1182 1181 assert olddiff
1183 1182 diff = olddiff
1184 1183 writediffproperties(ctxs, diff)
1185 1184
1186 1185 # Set the parent Revision every time, so commit re-ordering is picked-up
1187 1186 if parentrevphid:
1188 1187 transactions.append(
1189 1188 {b'type': b'parents.set', b'value': [parentrevphid]}
1190 1189 )
1191 1190
1192 1191 if actions:
1193 1192 transactions += actions
1194 1193
1195 1194 # When folding multiple local commits into a single review, arcanist will
1196 1195 # take the summary line of the first commit as the title, and then
1197 1196 # concatenate the rest of the remaining messages (including each of their
1198 1197 # first lines) to the rest of the first commit message (each separated by
1199 1198 # an empty line), and use that as the summary field. Do the same here.
1200 1199 # For commits with only a one line message, there is no summary field, as
1201 1200 # this gets assigned to the title.
1202 1201 fields = util.sortdict() # sorted for stable wire protocol in tests
1203 1202
1204 1203 for i, _ctx in enumerate(ctxs):
1205 1204 # Parse commit message and update related fields.
1206 1205 desc = _ctx.description()
1207 1206 info = callconduit(
1208 1207 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
1209 1208 )
1210 1209
1211 1210 for k in [b'title', b'summary', b'testPlan']:
1212 1211 v = info[b'fields'].get(k)
1213 1212 if not v:
1214 1213 continue
1215 1214
1216 1215 if i == 0:
1217 1216 # Title, summary and test plan (if present) are taken verbatim
1218 1217 # for the first commit.
1219 1218 fields[k] = v.rstrip()
1220 1219 continue
1221 1220 elif k == b'title':
1222 1221 # Add subsequent titles (i.e. the first line of the commit
1223 1222 # message) back to the summary.
1224 1223 k = b'summary'
1225 1224
1226 1225 # Append any current field to the existing composite field
1227 1226 fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()]))
1228 1227
1229 1228 for k, v in fields.items():
1230 1229 transactions.append({b'type': k, b'value': v})
1231 1230
1232 1231 params = {b'transactions': transactions}
1233 1232 if revid is not None:
1234 1233 # Update an existing Differential Revision
1235 1234 params[b'objectIdentifier'] = revid
1236 1235
1237 1236 revision = callconduit(repo.ui, b'differential.revision.edit', params)
1238 1237 if not revision:
1239 1238 if len(ctxs) == 1:
1240 1239 msg = _(b'cannot create revision for %s') % ctx
1241 1240 else:
1242 1241 msg = _(b'cannot create revision for %s::%s') % (basectx, ctx)
1243 1242 raise error.Abort(msg)
1244 1243
1245 1244 return revision, diff
1246 1245
1247 1246
1248 1247 def userphids(ui, names):
1249 1248 """convert user names to PHIDs"""
1250 1249 names = [name.lower() for name in names]
1251 1250 query = {b'constraints': {b'usernames': names}}
1252 1251 result = callconduit(ui, b'user.search', query)
1253 1252 # username not found is not an error of the API. So check if we have missed
1254 1253 # some names here.
1255 1254 data = result[b'data']
1256 1255 resolved = {entry[b'fields'][b'username'].lower() for entry in data}
1257 1256 unresolved = set(names) - resolved
1258 1257 if unresolved:
1259 1258 raise error.Abort(
1260 1259 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
1261 1260 )
1262 1261 return [entry[b'phid'] for entry in data]
1263 1262
1264 1263
1265 1264 def _print_phabsend_action(ui, ctx, newrevid, action):
1266 1265 """print the ``action`` that occurred when posting ``ctx`` for review
1267 1266
1268 1267 This is a utility function for the sending phase of ``phabsend``, which
1269 1268 makes it easier to show a status for all local commits with `--fold``.
1270 1269 """
1271 1270 actiondesc = ui.label(
1272 1271 {
1273 1272 b'created': _(b'created'),
1274 1273 b'skipped': _(b'skipped'),
1275 1274 b'updated': _(b'updated'),
1276 1275 }[action],
1277 1276 b'phabricator.action.%s' % action,
1278 1277 )
1279 1278 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
1280 1279 summary = cmdutil.format_changeset_summary(ui, ctx, b'phabsend')
1281 1280 ui.write(_(b'%s - %s - %s\n') % (drevdesc, actiondesc, summary))
1282 1281
1283 1282
1284 1283 def _amend_diff_properties(unfi, drevid, newnodes, diff):
1285 1284 """update the local commit list for the ``diff`` associated with ``drevid``
1286 1285
1287 1286 This is a utility function for the amend phase of ``phabsend``, which
1288 1287 converts failures to warning messages.
1289 1288 """
1290 1289 _debug(
1291 1290 unfi.ui,
1292 1291 b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]),
1293 1292 )
1294 1293
1295 1294 try:
1296 1295 writediffproperties([unfi[newnode] for newnode in newnodes], diff)
1297 1296 except util.urlerr.urlerror:
1298 1297 # If it fails just warn and keep going, otherwise the DREV
1299 1298 # associations will be lost
1300 1299 unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid)
1301 1300
1302 1301
1303 1302 @vcrcommand(
1304 1303 b'phabsend',
1305 1304 [
1306 1305 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
1307 1306 (b'', b'amend', True, _(b'update commit messages')),
1308 1307 (b'', b'reviewer', [], _(b'specify reviewers')),
1309 1308 (b'', b'blocker', [], _(b'specify blocking reviewers')),
1310 1309 (
1311 1310 b'm',
1312 1311 b'comment',
1313 1312 b'',
1314 1313 _(b'add a comment to Revisions with new/updated Diffs'),
1315 1314 ),
1316 1315 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
1317 1316 (b'', b'fold', False, _(b'combine the revisions into one review')),
1318 1317 ],
1319 1318 _(b'REV [OPTIONS]'),
1320 1319 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1321 1320 )
1322 1321 def phabsend(ui, repo, *revs, **opts):
1323 1322 """upload changesets to Phabricator
1324 1323
1325 1324 If there are multiple revisions specified, they will be send as a stack
1326 1325 with a linear dependencies relationship using the order specified by the
1327 1326 revset.
1328 1327
1329 1328 For the first time uploading changesets, local tags will be created to
1330 1329 maintain the association. After the first time, phabsend will check
1331 1330 obsstore and tags information so it can figure out whether to update an
1332 1331 existing Differential Revision, or create a new one.
1333 1332
1334 1333 If --amend is set, update commit messages so they have the
1335 1334 ``Differential Revision`` URL, remove related tags. This is similar to what
1336 1335 arcanist will do, and is more desired in author-push workflows. Otherwise,
1337 1336 use local tags to record the ``Differential Revision`` association.
1338 1337
1339 1338 The --confirm option lets you confirm changesets before sending them. You
1340 1339 can also add following to your configuration file to make it default
1341 1340 behaviour::
1342 1341
1343 1342 [phabsend]
1344 1343 confirm = true
1345 1344
1346 1345 By default, a separate review will be created for each commit that is
1347 1346 selected, and will have the same parent/child relationship in Phabricator.
1348 1347 If ``--fold`` is set, multiple commits are rolled up into a single review
1349 1348 as if diffed from the parent of the first revision to the last. The commit
1350 1349 messages are concatenated in the summary field on Phabricator.
1351 1350
1352 1351 phabsend will check obsstore and the above association to decide whether to
1353 1352 update an existing Differential Revision, or create a new one.
1354 1353 """
1355 1354 opts = pycompat.byteskwargs(opts)
1356 1355 revs = list(revs) + opts.get(b'rev', [])
1357 1356 revs = logcmdutil.revrange(repo, revs)
1358 1357 revs.sort() # ascending order to preserve topological parent/child in phab
1359 1358
1360 1359 if not revs:
1361 1360 raise error.Abort(_(b'phabsend requires at least one changeset'))
1362 1361 if opts.get(b'amend'):
1363 1362 cmdutil.checkunfinished(repo)
1364 1363
1365 1364 ctxs = [repo[rev] for rev in revs]
1366 1365
1367 1366 if any(c for c in ctxs if c.obsolete()):
1368 1367 raise error.Abort(_(b"obsolete commits cannot be posted for review"))
1369 1368
1370 1369 # Ensure the local commits are an unbroken range. The semantics of the
1371 1370 # --fold option implies this, and the auto restacking of orphans requires
1372 1371 # it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
1373 1372 # get A' as a parent.
1374 1373 def _fail_nonlinear_revs(revs, revtype):
1375 1374 badnodes = [repo[r].node() for r in revs]
1376 1375 raise error.Abort(
1377 1376 _(b"cannot phabsend multiple %s revisions: %s")
1378 1377 % (revtype, scmutil.nodesummaries(repo, badnodes)),
1379 1378 hint=_(b"the revisions must form a linear chain"),
1380 1379 )
1381 1380
1382 1381 heads = repo.revs(b'heads(%ld)', revs)
1383 1382 if len(heads) > 1:
1384 1383 _fail_nonlinear_revs(heads, b"head")
1385 1384
1386 1385 roots = repo.revs(b'roots(%ld)', revs)
1387 1386 if len(roots) > 1:
1388 1387 _fail_nonlinear_revs(roots, b"root")
1389 1388
1390 1389 fold = opts.get(b'fold')
1391 1390 if fold:
1392 1391 if len(revs) == 1:
1393 1392 # TODO: just switch to --no-fold instead?
1394 1393 raise error.Abort(_(b"cannot fold a single revision"))
1395 1394
1396 1395 # There's no clear way to manage multiple commits with a Dxxx tag, so
1397 1396 # require the amend option. (We could append "_nnn", but then it
1398 1397 # becomes jumbled if earlier commits are added to an update.) It should
1399 1398 # lock the repo and ensure that the range is editable, but that would
1400 1399 # make the code pretty convoluted. The default behavior of `arc` is to
1401 1400 # create a new review anyway.
1402 1401 if not opts.get(b"amend"):
1403 1402 raise error.Abort(_(b"cannot fold with --no-amend"))
1404 1403
1405 1404 # It might be possible to bucketize the revisions by the DREV value, and
1406 1405 # iterate over those groups when posting, and then again when amending.
1407 1406 # But for simplicity, require all selected revisions to be for the same
1408 1407 # DREV (if present). Adding local revisions to an existing DREV is
1409 1408 # acceptable.
1410 1409 drevmatchers = [
1411 1410 _differentialrevisiondescre.search(ctx.description())
1412 1411 for ctx in ctxs
1413 1412 ]
1414 1413 if len({m.group('url') for m in drevmatchers if m}) > 1:
1415 1414 raise error.Abort(
1416 1415 _(b"cannot fold revisions with different DREV values")
1417 1416 )
1418 1417
1419 1418 # {newnode: (oldnode, olddiff, olddrev}
1420 1419 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
1421 1420
1422 1421 confirm = ui.configbool(b'phabsend', b'confirm')
1423 1422 confirm |= bool(opts.get(b'confirm'))
1424 1423 if confirm:
1425 1424 confirmed = _confirmbeforesend(repo, revs, oldmap)
1426 1425 if not confirmed:
1427 1426 raise error.Abort(_(b'phabsend cancelled'))
1428 1427
1429 1428 actions = []
1430 1429 reviewers = opts.get(b'reviewer', [])
1431 1430 blockers = opts.get(b'blocker', [])
1432 1431 phids = []
1433 1432 if reviewers:
1434 1433 phids.extend(userphids(repo.ui, reviewers))
1435 1434 if blockers:
1436 1435 phids.extend(
1437 1436 map(
1438 1437 lambda phid: b'blocking(%s)' % phid,
1439 1438 userphids(repo.ui, blockers),
1440 1439 )
1441 1440 )
1442 1441 if phids:
1443 1442 actions.append({b'type': b'reviewers.add', b'value': phids})
1444 1443
1445 1444 drevids = [] # [int]
1446 1445 diffmap = {} # {newnode: diff}
1447 1446
1448 1447 # Send patches one by one so we know their Differential Revision PHIDs and
1449 1448 # can provide dependency relationship
1450 1449 lastrevphid = None
1451 1450 for ctx in ctxs:
1452 1451 if fold:
1453 1452 ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev()))
1454 1453 else:
1455 1454 ui.debug(b'sending rev %d\n' % ctx.rev())
1456 1455
1457 1456 # Get Differential Revision ID
1458 1457 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
1459 1458 oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid
1460 1459
1461 1460 if fold:
1462 1461 oldbasenode, oldbasediff, oldbaserevid = oldmap.get(
1463 1462 ctxs[-1].node(), (None, None, None)
1464 1463 )
1465 1464
1466 1465 if oldnode != ctx.node() or opts.get(b'amend'):
1467 1466 # Create or update Differential Revision
1468 1467 revision, diff = createdifferentialrevision(
1469 1468 ctxs if fold else [ctx],
1470 1469 revid,
1471 1470 lastrevphid,
1472 1471 oldbasenode,
1473 1472 oldnode,
1474 1473 olddiff,
1475 1474 actions,
1476 1475 opts.get(b'comment'),
1477 1476 )
1478 1477
1479 1478 if fold:
1480 1479 for ctx in ctxs:
1481 1480 diffmap[ctx.node()] = diff
1482 1481 else:
1483 1482 diffmap[ctx.node()] = diff
1484 1483
1485 1484 newrevid = int(revision[b'object'][b'id'])
1486 1485 newrevphid = revision[b'object'][b'phid']
1487 1486 if revid:
1488 1487 action = b'updated'
1489 1488 else:
1490 1489 action = b'created'
1491 1490
1492 1491 # Create a local tag to note the association, if commit message
1493 1492 # does not have it already
1494 1493 if not fold:
1495 1494 m = _differentialrevisiondescre.search(ctx.description())
1496 1495 if not m or int(m.group('id')) != newrevid:
1497 1496 tagname = b'D%d' % newrevid
1498 1497 tags.tag(
1499 1498 repo,
1500 1499 tagname,
1501 1500 ctx.node(),
1502 1501 message=None,
1503 1502 user=None,
1504 1503 date=None,
1505 1504 local=True,
1506 1505 )
1507 1506 else:
1508 1507 # Nothing changed. But still set "newrevphid" so the next revision
1509 1508 # could depend on this one and "newrevid" for the summary line.
1510 1509 newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
1511 1510 newrevid = revid
1512 1511 action = b'skipped'
1513 1512
1514 1513 drevids.append(newrevid)
1515 1514 lastrevphid = newrevphid
1516 1515
1517 1516 if fold:
1518 1517 for c in ctxs:
1519 1518 if oldmap.get(c.node(), (None, None, None))[2]:
1520 1519 action = b'updated'
1521 1520 else:
1522 1521 action = b'created'
1523 1522 _print_phabsend_action(ui, c, newrevid, action)
1524 1523 break
1525 1524
1526 1525 _print_phabsend_action(ui, ctx, newrevid, action)
1527 1526
1528 1527 # Update commit messages and remove tags
1529 1528 if opts.get(b'amend'):
1530 1529 unfi = repo.unfiltered()
1531 1530 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
1532 1531 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
1533 1532 # Eagerly evaluate commits to restabilize before creating new
1534 1533 # commits. The selected revisions are excluded because they are
1535 1534 # automatically restacked as part of the submission process.
1536 1535 restack = [
1537 1536 c
1538 1537 for c in repo.set(
1539 1538 b"(%ld::) - (%ld) - unstable() - obsolete() - public()",
1540 1539 revs,
1541 1540 revs,
1542 1541 )
1543 1542 ]
1544 1543 wnode = unfi[b'.'].node()
1545 1544 mapping = {} # {oldnode: [newnode]}
1546 1545 newnodes = []
1547 1546
1548 1547 drevid = drevids[0]
1549 1548
1550 1549 for i, rev in enumerate(revs):
1551 1550 old = unfi[rev]
1552 1551 if not fold:
1553 1552 drevid = drevids[i]
1554 1553 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
1555 1554
1556 1555 newdesc = get_amended_desc(drev, old, fold)
1557 1556 # Make sure commit message contain "Differential Revision"
1558 1557 if (
1559 1558 old.description() != newdesc
1560 1559 or old.p1().node() in mapping
1561 1560 or old.p2().node() in mapping
1562 1561 ):
1563 1562 if old.phase() == phases.public:
1564 1563 ui.warn(
1565 1564 _(b"warning: not updating public commit %s\n")
1566 1565 % scmutil.formatchangeid(old)
1567 1566 )
1568 1567 continue
1569 1568 parents = [
1570 1569 mapping.get(old.p1().node(), (old.p1(),))[0],
1571 1570 mapping.get(old.p2().node(), (old.p2(),))[0],
1572 1571 ]
1573 1572 newdesc = rewriteutil.update_hash_refs(
1574 1573 repo,
1575 1574 newdesc,
1576 1575 mapping,
1577 1576 )
1578 1577 new = context.metadataonlyctx(
1579 1578 repo,
1580 1579 old,
1581 1580 parents=parents,
1582 1581 text=newdesc,
1583 1582 user=old.user(),
1584 1583 date=old.date(),
1585 1584 extra=old.extra(),
1586 1585 )
1587 1586
1588 1587 newnode = new.commit()
1589 1588
1590 1589 mapping[old.node()] = [newnode]
1591 1590
1592 1591 if fold:
1593 1592 # Defer updating the (single) Diff until all nodes are
1594 1593 # collected. No tags were created, so none need to be
1595 1594 # removed.
1596 1595 newnodes.append(newnode)
1597 1596 continue
1598 1597
1599 1598 _amend_diff_properties(
1600 1599 unfi, drevid, [newnode], diffmap[old.node()]
1601 1600 )
1602 1601
1603 1602 # Remove local tags since it's no longer necessary
1604 1603 tagname = b'D%d' % drevid
1605 1604 if tagname in repo.tags():
1606 1605 tags.tag(
1607 1606 repo,
1608 1607 tagname,
1609 1608 repo.nullid,
1610 1609 message=None,
1611 1610 user=None,
1612 1611 date=None,
1613 1612 local=True,
1614 1613 )
1615 1614 elif fold:
1616 1615 # When folding multiple commits into one review with
1617 1616 # --fold, track even the commits that weren't amended, so
1618 1617 # that their association isn't lost if the properties are
1619 1618 # rewritten below.
1620 1619 newnodes.append(old.node())
1621 1620
1622 1621 # If the submitted commits are public, no amend takes place so
1623 1622 # there are no newnodes and therefore no diff update to do.
1624 1623 if fold and newnodes:
1625 1624 diff = diffmap[old.node()]
1626 1625
1627 1626 # The diff object in diffmap doesn't have the local commits
1628 1627 # because that could be returned from differential.creatediff,
1629 1628 # not differential.querydiffs. So use the queried diff (if
1630 1629 # present), or force the amend (a new revision is being posted.)
1631 1630 if not olddiff or set(newnodes) != getlocalcommits(olddiff):
1632 1631 _debug(ui, b"updating local commit list for D%d\n" % drevid)
1633 1632 _amend_diff_properties(unfi, drevid, newnodes, diff)
1634 1633 else:
1635 1634 _debug(
1636 1635 ui,
1637 1636 b"local commit list for D%d is already up-to-date\n"
1638 1637 % drevid,
1639 1638 )
1640 1639 elif fold:
1641 1640 _debug(ui, b"no newnodes to update\n")
1642 1641
1643 1642 # Restack any children of first-time submissions that were orphaned
1644 1643 # in the process. The ctx won't report that it is an orphan until
1645 1644 # the cleanup takes place below.
1646 1645 for old in restack:
1647 1646 parents = [
1648 1647 mapping.get(old.p1().node(), (old.p1(),))[0],
1649 1648 mapping.get(old.p2().node(), (old.p2(),))[0],
1650 1649 ]
1651 1650 new = context.metadataonlyctx(
1652 1651 repo,
1653 1652 old,
1654 1653 parents=parents,
1655 1654 text=rewriteutil.update_hash_refs(
1656 1655 repo, old.description(), mapping
1657 1656 ),
1658 1657 user=old.user(),
1659 1658 date=old.date(),
1660 1659 extra=old.extra(),
1661 1660 )
1662 1661
1663 1662 newnode = new.commit()
1664 1663
1665 1664 # Don't obsolete unselected descendants of nodes that have not
1666 1665 # been changed in this transaction- that results in an error.
1667 1666 if newnode != old.node():
1668 1667 mapping[old.node()] = [newnode]
1669 1668 _debug(
1670 1669 ui,
1671 1670 b"restabilizing %s as %s\n"
1672 1671 % (short(old.node()), short(newnode)),
1673 1672 )
1674 1673 else:
1675 1674 _debug(
1676 1675 ui,
1677 1676 b"not restabilizing unchanged %s\n" % short(old.node()),
1678 1677 )
1679 1678
1680 1679 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
1681 1680 if wnode in mapping:
1682 1681 unfi.setparents(mapping[wnode][0])
1683 1682
1684 1683
1685 1684 # Map from "hg:meta" keys to header understood by "hg import". The order is
1686 1685 # consistent with "hg export" output.
1687 1686 _metanamemap = util.sortdict(
1688 1687 [
1689 1688 (b'user', b'User'),
1690 1689 (b'date', b'Date'),
1691 1690 (b'branch', b'Branch'),
1692 1691 (b'node', b'Node ID'),
1693 1692 (b'parent', b'Parent '),
1694 1693 ]
1695 1694 )
1696 1695
1697 1696
1698 1697 def _confirmbeforesend(repo, revs, oldmap):
1699 1698 url, token = readurltoken(repo.ui)
1700 1699 ui = repo.ui
1701 1700 for rev in revs:
1702 1701 ctx = repo[rev]
1703 1702 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
1704 1703 if drevid:
1705 1704 drevdesc = ui.label(b'D%d' % drevid, b'phabricator.drev')
1706 1705 else:
1707 1706 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
1708 1707
1709 1708 ui.write(
1710 1709 _(b'%s - %s\n')
1711 1710 % (
1712 1711 drevdesc,
1713 1712 cmdutil.format_changeset_summary(ui, ctx, b'phabsend'),
1714 1713 )
1715 1714 )
1716 1715
1717 1716 if ui.promptchoice(
1718 1717 _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
1719 1718 ):
1720 1719 return False
1721 1720
1722 1721 return True
1723 1722
1724 1723
1725 1724 _knownstatusnames = {
1726 1725 b'accepted',
1727 1726 b'needsreview',
1728 1727 b'needsrevision',
1729 1728 b'closed',
1730 1729 b'abandoned',
1731 1730 b'changesplanned',
1732 1731 }
1733 1732
1734 1733
1735 1734 def _getstatusname(drev):
1736 1735 """get normalized status name from a Differential Revision"""
1737 1736 return drev[b'statusName'].replace(b' ', b'').lower()
1738 1737
1739 1738
1740 1739 # Small language to specify differential revisions. Support symbols: (), :X,
1741 1740 # +, and -.
1742 1741
1743 1742 _elements = {
1744 1743 # token-type: binding-strength, primary, prefix, infix, suffix
1745 1744 b'(': (12, None, (b'group', 1, b')'), None, None),
1746 1745 b':': (8, None, (b'ancestors', 8), None, None),
1747 1746 b'&': (5, None, None, (b'and_', 5), None),
1748 1747 b'+': (4, None, None, (b'add', 4), None),
1749 1748 b'-': (4, None, None, (b'sub', 4), None),
1750 1749 b')': (0, None, None, None, None),
1751 1750 b'symbol': (0, b'symbol', None, None, None),
1752 1751 b'end': (0, None, None, None, None),
1753 1752 }
1754 1753
1755 1754
1756 1755 def _tokenize(text):
1757 1756 view = memoryview(text) # zero-copy slice
1758 1757 special = b'():+-& '
1759 1758 pos = 0
1760 1759 length = len(text)
1761 1760 while pos < length:
1762 1761 symbol = b''.join(
1763 1762 itertools.takewhile(
1764 1763 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
1765 1764 )
1766 1765 )
1767 1766 if symbol:
1768 1767 yield (b'symbol', symbol, pos)
1769 1768 pos += len(symbol)
1770 1769 else: # special char, ignore space
1771 1770 if text[pos : pos + 1] != b' ':
1772 1771 yield (text[pos : pos + 1], None, pos)
1773 1772 pos += 1
1774 1773 yield (b'end', None, pos)
1775 1774
1776 1775
1777 1776 def _parse(text):
1778 1777 tree, pos = parser.parser(_elements).parse(_tokenize(text))
1779 1778 if pos != len(text):
1780 1779 raise error.ParseError(b'invalid token', pos)
1781 1780 return tree
1782 1781
1783 1782
1784 1783 def _parsedrev(symbol):
1785 1784 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
1786 1785 if symbol.startswith(b'D') and symbol[1:].isdigit():
1787 1786 return int(symbol[1:])
1788 1787 if symbol.isdigit():
1789 1788 return int(symbol)
1790 1789
1791 1790
1792 1791 def _prefetchdrevs(tree):
1793 1792 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
1794 1793 drevs = set()
1795 1794 ancestordrevs = set()
1796 1795 op = tree[0]
1797 1796 if op == b'symbol':
1798 1797 r = _parsedrev(tree[1])
1799 1798 if r:
1800 1799 drevs.add(r)
1801 1800 elif op == b'ancestors':
1802 1801 r, a = _prefetchdrevs(tree[1])
1803 1802 drevs.update(r)
1804 1803 ancestordrevs.update(r)
1805 1804 ancestordrevs.update(a)
1806 1805 else:
1807 1806 for t in tree[1:]:
1808 1807 r, a = _prefetchdrevs(t)
1809 1808 drevs.update(r)
1810 1809 ancestordrevs.update(a)
1811 1810 return drevs, ancestordrevs
1812 1811
1813 1812
1814 1813 def querydrev(ui, spec):
1815 1814 """return a list of "Differential Revision" dicts
1816 1815
1817 1816 spec is a string using a simple query language, see docstring in phabread
1818 1817 for details.
1819 1818
1820 1819 A "Differential Revision dict" looks like:
1821 1820
1822 1821 {
1823 1822 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
1824 1823 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
1825 1824 "auxiliary": {
1826 1825 "phabricator:depends-on": [
1827 1826 "PHID-DREV-gbapp366kutjebt7agcd"
1828 1827 ]
1829 1828 "phabricator:projects": [],
1830 1829 },
1831 1830 "branch": "default",
1832 1831 "ccs": [],
1833 1832 "commits": [],
1834 1833 "dateCreated": "1499181406",
1835 1834 "dateModified": "1499182103",
1836 1835 "diffs": [
1837 1836 "3",
1838 1837 "4",
1839 1838 ],
1840 1839 "hashes": [],
1841 1840 "id": "2",
1842 1841 "lineCount": "2",
1843 1842 "phid": "PHID-DREV-672qvysjcczopag46qty",
1844 1843 "properties": {},
1845 1844 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
1846 1845 "reviewers": [],
1847 1846 "sourcePath": null
1848 1847 "status": "0",
1849 1848 "statusName": "Needs Review",
1850 1849 "summary": "",
1851 1850 "testPlan": "",
1852 1851 "title": "example",
1853 1852 "uri": "https://phab.example.com/D2",
1854 1853 }
1855 1854 """
1856 1855 # TODO: replace differential.query and differential.querydiffs with
1857 1856 # differential.diff.search because the former (and their output) are
1858 1857 # frozen, and planned to be deprecated and removed.
1859 1858
1860 1859 def fetch(params):
1861 1860 """params -> single drev or None"""
1862 1861 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
1863 1862 if key in prefetched:
1864 1863 return prefetched[key]
1865 1864 drevs = callconduit(ui, b'differential.query', params)
1866 1865 # Fill prefetched with the result
1867 1866 for drev in drevs:
1868 1867 prefetched[drev[b'phid']] = drev
1869 1868 prefetched[int(drev[b'id'])] = drev
1870 1869 if key not in prefetched:
1871 1870 raise error.Abort(
1872 1871 _(b'cannot get Differential Revision %r') % params
1873 1872 )
1874 1873 return prefetched[key]
1875 1874
1876 1875 def getstack(topdrevids):
1877 1876 """given a top, get a stack from the bottom, [id] -> [id]"""
1878 1877 visited = set()
1879 1878 result = []
1880 1879 queue = [{b'ids': [i]} for i in topdrevids]
1881 1880 while queue:
1882 1881 params = queue.pop()
1883 1882 drev = fetch(params)
1884 1883 if drev[b'id'] in visited:
1885 1884 continue
1886 1885 visited.add(drev[b'id'])
1887 1886 result.append(int(drev[b'id']))
1888 1887 auxiliary = drev.get(b'auxiliary', {})
1889 1888 depends = auxiliary.get(b'phabricator:depends-on', [])
1890 1889 for phid in depends:
1891 1890 queue.append({b'phids': [phid]})
1892 1891 result.reverse()
1893 1892 return smartset.baseset(result)
1894 1893
1895 1894 # Initialize prefetch cache
1896 1895 prefetched = {} # {id or phid: drev}
1897 1896
1898 1897 tree = _parse(spec)
1899 1898 drevs, ancestordrevs = _prefetchdrevs(tree)
1900 1899
1901 1900 # developer config: phabricator.batchsize
1902 1901 batchsize = ui.configint(b'phabricator', b'batchsize')
1903 1902
1904 1903 # Prefetch Differential Revisions in batch
1905 1904 tofetch = set(drevs)
1906 1905 for r in ancestordrevs:
1907 1906 tofetch.update(range(max(1, r - batchsize), r + 1))
1908 1907 if drevs:
1909 1908 fetch({b'ids': list(tofetch)})
1910 1909 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1911 1910
1912 1911 # Walk through the tree, return smartsets
1913 1912 def walk(tree):
1914 1913 op = tree[0]
1915 1914 if op == b'symbol':
1916 1915 drev = _parsedrev(tree[1])
1917 1916 if drev:
1918 1917 return smartset.baseset([drev])
1919 1918 elif tree[1] in _knownstatusnames:
1920 1919 drevs = [
1921 1920 r
1922 1921 for r in validids
1923 1922 if _getstatusname(prefetched[r]) == tree[1]
1924 1923 ]
1925 1924 return smartset.baseset(drevs)
1926 1925 else:
1927 1926 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1928 1927 elif op in {b'and_', b'add', b'sub'}:
1929 1928 assert len(tree) == 3
1930 1929 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1931 1930 elif op == b'group':
1932 1931 return walk(tree[1])
1933 1932 elif op == b'ancestors':
1934 1933 return getstack(walk(tree[1]))
1935 1934 else:
1936 1935 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1937 1936
1938 1937 return [prefetched[r] for r in walk(tree)]
1939 1938
1940 1939
1941 1940 def getdescfromdrev(drev):
1942 1941 """get description (commit message) from "Differential Revision"
1943 1942
1944 1943 This is similar to differential.getcommitmessage API. But we only care
1945 1944 about limited fields: title, summary, test plan, and URL.
1946 1945 """
1947 1946 title = drev[b'title']
1948 1947 summary = drev[b'summary'].rstrip()
1949 1948 testplan = drev[b'testPlan'].rstrip()
1950 1949 if testplan:
1951 1950 testplan = b'Test Plan:\n%s' % testplan
1952 1951 uri = b'Differential Revision: %s' % drev[b'uri']
1953 1952 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1954 1953
1955 1954
1956 1955 def get_amended_desc(drev, ctx, folded):
1957 1956 """similar to ``getdescfromdrev``, but supports a folded series of commits
1958 1957
1959 1958 This is used when determining if an individual commit needs to have its
1960 1959 message amended after posting it for review. The determination is made for
1961 1960 each individual commit, even when they were folded into one review.
1962 1961 """
1963 1962 if not folded:
1964 1963 return getdescfromdrev(drev)
1965 1964
1966 1965 uri = b'Differential Revision: %s' % drev[b'uri']
1967 1966
1968 1967 # Since the commit messages were combined when posting multiple commits
1969 1968 # with --fold, the fields can't be read from Phabricator here, or *all*
1970 1969 # affected local revisions will end up with the same commit message after
1971 1970 # the URI is amended in. Append in the DREV line, or update it if it
1972 1971 # exists. At worst, this means commit message or test plan updates on
1973 1972 # Phabricator aren't propagated back to the repository, but that seems
1974 1973 # reasonable for the case where local commits are effectively combined
1975 1974 # in Phabricator.
1976 1975 m = _differentialrevisiondescre.search(ctx.description())
1977 1976 if not m:
1978 1977 return b'\n\n'.join([ctx.description(), uri])
1979 1978
1980 1979 return _differentialrevisiondescre.sub(uri, ctx.description())
1981 1980
1982 1981
1983 1982 def getlocalcommits(diff):
1984 1983 """get the set of local commits from a diff object
1985 1984
1986 1985 See ``getdiffmeta()`` for an example diff object.
1987 1986 """
1988 1987 props = diff.get(b'properties') or {}
1989 1988 commits = props.get(b'local:commits') or {}
1990 1989 if len(commits) > 1:
1991 1990 return {bin(c) for c in commits.keys()}
1992 1991
1993 1992 # Storing the diff metadata predates storing `local:commits`, so continue
1994 1993 # to use that in the --no-fold case.
1995 1994 return {bin(getdiffmeta(diff).get(b'node', b'')) or None}
1996 1995
1997 1996
1998 1997 def getdiffmeta(diff):
1999 1998 """get commit metadata (date, node, user, p1) from a diff object
2000 1999
2001 2000 The metadata could be "hg:meta", sent by phabsend, like:
2002 2001
2003 2002 "properties": {
2004 2003 "hg:meta": {
2005 2004 "branch": "default",
2006 2005 "date": "1499571514 25200",
2007 2006 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
2008 2007 "user": "Foo Bar <foo@example.com>",
2009 2008 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
2010 2009 }
2011 2010 }
2012 2011
2013 2012 Or converted from "local:commits", sent by "arc", like:
2014 2013
2015 2014 "properties": {
2016 2015 "local:commits": {
2017 2016 "98c08acae292b2faf60a279b4189beb6cff1414d": {
2018 2017 "author": "Foo Bar",
2019 2018 "authorEmail": "foo@example.com"
2020 2019 "branch": "default",
2021 2020 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
2022 2021 "local": "1000",
2023 2022 "message": "...",
2024 2023 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
2025 2024 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
2026 2025 "summary": "...",
2027 2026 "tag": "",
2028 2027 "time": 1499546314,
2029 2028 }
2030 2029 }
2031 2030 }
2032 2031
2033 2032 Note: metadata extracted from "local:commits" will lose time zone
2034 2033 information.
2035 2034 """
2036 2035 props = diff.get(b'properties') or {}
2037 2036 meta = props.get(b'hg:meta')
2038 2037 if not meta:
2039 2038 if props.get(b'local:commits'):
2040 2039 commit = sorted(props[b'local:commits'].values())[0]
2041 2040 meta = {}
2042 2041 if b'author' in commit and b'authorEmail' in commit:
2043 2042 meta[b'user'] = b'%s <%s>' % (
2044 2043 commit[b'author'],
2045 2044 commit[b'authorEmail'],
2046 2045 )
2047 2046 if b'time' in commit:
2048 2047 meta[b'date'] = b'%d 0' % int(commit[b'time'])
2049 2048 if b'branch' in commit:
2050 2049 meta[b'branch'] = commit[b'branch']
2051 2050 node = commit.get(b'commit', commit.get(b'rev'))
2052 2051 if node:
2053 2052 meta[b'node'] = node
2054 2053 if len(commit.get(b'parents', ())) >= 1:
2055 2054 meta[b'parent'] = commit[b'parents'][0]
2056 2055 else:
2057 2056 meta = {}
2058 2057 if b'date' not in meta and b'dateCreated' in diff:
2059 2058 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
2060 2059 if b'branch' not in meta and diff.get(b'branch'):
2061 2060 meta[b'branch'] = diff[b'branch']
2062 2061 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
2063 2062 meta[b'parent'] = diff[b'sourceControlBaseRevision']
2064 2063 return meta
2065 2064
2066 2065
2067 2066 def _getdrevs(ui, stack, specs):
2068 2067 """convert user supplied DREVSPECs into "Differential Revision" dicts
2069 2068
2070 2069 See ``hg help phabread`` for how to specify each DREVSPEC.
2071 2070 """
2072 2071 if len(specs) > 0:
2073 2072
2074 2073 def _formatspec(s):
2075 2074 if stack:
2076 2075 s = b':(%s)' % s
2077 2076 return b'(%s)' % s
2078 2077
2079 2078 spec = b'+'.join(pycompat.maplist(_formatspec, specs))
2080 2079
2081 2080 drevs = querydrev(ui, spec)
2082 2081 if drevs:
2083 2082 return drevs
2084 2083
2085 2084 raise error.Abort(_(b"empty DREVSPEC set"))
2086 2085
2087 2086
2088 2087 def readpatch(ui, drevs, write):
2089 2088 """generate plain-text patch readable by 'hg import'
2090 2089
2091 2090 write takes a list of (DREV, bytes), where DREV is the differential number
2092 2091 (as bytes, without the "D" prefix) and the bytes are the text of a patch
2093 2092 to be imported. drevs is what "querydrev" returns, results of
2094 2093 "differential.query".
2095 2094 """
2096 2095 # Prefetch hg:meta property for all diffs
2097 2096 diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
2098 2097 diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
2099 2098
2100 2099 patches = []
2101 2100
2102 2101 # Generate patch for each drev
2103 2102 for drev in drevs:
2104 2103 ui.note(_(b'reading D%s\n') % drev[b'id'])
2105 2104
2106 2105 diffid = max(int(v) for v in drev[b'diffs'])
2107 2106 body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
2108 2107 desc = getdescfromdrev(drev)
2109 2108 header = b'# HG changeset patch\n'
2110 2109
2111 2110 # Try to preserve metadata from hg:meta property. Write hg patch
2112 2111 # headers that can be read by the "import" command. See patchheadermap
2113 2112 # and extract in mercurial/patch.py for supported headers.
2114 2113 meta = getdiffmeta(diffs[b'%d' % diffid])
2115 2114 for k in _metanamemap.keys():
2116 2115 if k in meta:
2117 2116 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
2118 2117
2119 2118 content = b'%s%s\n%s' % (header, desc, body)
2120 2119 patches.append((drev[b'id'], content))
2121 2120
2122 2121 # Write patches to the supplied callback
2123 2122 write(patches)
2124 2123
2125 2124
2126 2125 @vcrcommand(
2127 2126 b'phabread',
2128 2127 [(b'', b'stack', False, _(b'read dependencies'))],
2129 2128 _(b'DREVSPEC... [OPTIONS]'),
2130 2129 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2131 2130 optionalrepo=True,
2132 2131 )
2133 2132 def phabread(ui, repo, *specs, **opts):
2134 2133 """print patches from Phabricator suitable for importing
2135 2134
2136 2135 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
2137 2136 the number ``123``. It could also have common operators like ``+``, ``-``,
2138 2137 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
2139 2138 select a stack. If multiple DREVSPEC values are given, the result is the
2140 2139 union of each individually evaluated value. No attempt is currently made
2141 2140 to reorder the values to run from parent to child.
2142 2141
2143 2142 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
2144 2143 could be used to filter patches by status. For performance reason, they
2145 2144 only represent a subset of non-status selections and cannot be used alone.
2146 2145
2147 2146 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
2148 2147 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
2149 2148 stack up to D9.
2150 2149
2151 2150 If --stack is given, follow dependencies information and read all patches.
2152 2151 It is equivalent to the ``:`` operator.
2153 2152 """
2154 2153 opts = pycompat.byteskwargs(opts)
2155 2154 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2156 2155
2157 2156 def _write(patches):
2158 2157 for drev, content in patches:
2159 2158 ui.write(content)
2160 2159
2161 2160 readpatch(ui, drevs, _write)
2162 2161
2163 2162
2164 2163 @vcrcommand(
2165 2164 b'phabimport',
2166 2165 [(b'', b'stack', False, _(b'import dependencies as well'))],
2167 2166 _(b'DREVSPEC... [OPTIONS]'),
2168 2167 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2169 2168 )
2170 2169 def phabimport(ui, repo, *specs, **opts):
2171 2170 """import patches from Phabricator for the specified Differential Revisions
2172 2171
2173 2172 The patches are read and applied starting at the parent of the working
2174 2173 directory.
2175 2174
2176 2175 See ``hg help phabread`` for how to specify DREVSPEC.
2177 2176 """
2178 2177 opts = pycompat.byteskwargs(opts)
2179 2178
2180 2179 # --bypass avoids losing exec and symlink bits when importing on Windows,
2181 2180 # and allows importing with a dirty wdir. It also aborts instead of leaving
2182 2181 # rejects.
2183 2182 opts[b'bypass'] = True
2184 2183
2185 2184 # Mandatory default values, synced with commands.import
2186 2185 opts[b'strip'] = 1
2187 2186 opts[b'prefix'] = b''
2188 2187 # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone()
2189 2188 opts[b'obsolete'] = False
2190 2189
2191 2190 if ui.configbool(b'phabimport', b'secret'):
2192 2191 opts[b'secret'] = True
2193 2192 if ui.configbool(b'phabimport', b'obsolete'):
2194 2193 opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone()
2195 2194
2196 2195 def _write(patches):
2197 2196 parents = repo[None].parents()
2198 2197
2199 2198 with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'):
2200 2199 for drev, contents in patches:
2201 2200 ui.status(_(b'applying patch from D%s\n') % drev)
2202 2201
2203 2202 with patch.extract(ui, io.BytesIO(contents)) as patchdata:
2204 2203 msg, node, rej = cmdutil.tryimportone(
2205 2204 ui,
2206 2205 repo,
2207 2206 patchdata,
2208 2207 parents,
2209 2208 opts,
2210 2209 [],
2211 2210 None, # Never update wdir to another revision
2212 2211 )
2213 2212
2214 2213 if not node:
2215 2214 raise error.Abort(_(b'D%s: no diffs found') % drev)
2216 2215
2217 2216 ui.note(msg + b'\n')
2218 2217 parents = [repo[node]]
2219 2218
2220 2219 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2221 2220
2222 2221 readpatch(repo.ui, drevs, _write)
2223 2222
2224 2223
2225 2224 @vcrcommand(
2226 2225 b'phabupdate',
2227 2226 [
2228 2227 (b'', b'accept', False, _(b'accept revisions')),
2229 2228 (b'', b'reject', False, _(b'reject revisions')),
2230 2229 (b'', b'request-review', False, _(b'request review on revisions')),
2231 2230 (b'', b'abandon', False, _(b'abandon revisions')),
2232 2231 (b'', b'reclaim', False, _(b'reclaim revisions')),
2233 2232 (b'', b'close', False, _(b'close revisions')),
2234 2233 (b'', b'reopen', False, _(b'reopen revisions')),
2235 2234 (b'', b'plan-changes', False, _(b'plan changes for revisions')),
2236 2235 (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
2237 2236 (b'', b'commandeer', False, _(b'commandeer revisions')),
2238 2237 (b'm', b'comment', b'', _(b'comment on the last revision')),
2239 2238 (b'r', b'rev', b'', _(b'local revision to update'), _(b'REV')),
2240 2239 ],
2241 2240 _(b'[DREVSPEC...| -r REV...] [OPTIONS]'),
2242 2241 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2243 2242 optionalrepo=True,
2244 2243 )
2245 2244 def phabupdate(ui, repo, *specs, **opts):
2246 2245 """update Differential Revision in batch
2247 2246
2248 2247 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
2249 2248 """
2250 2249 opts = pycompat.byteskwargs(opts)
2251 2250 transactions = [
2252 2251 b'abandon',
2253 2252 b'accept',
2254 2253 b'close',
2255 2254 b'commandeer',
2256 2255 b'plan-changes',
2257 2256 b'reclaim',
2258 2257 b'reject',
2259 2258 b'reopen',
2260 2259 b'request-review',
2261 2260 b'resign',
2262 2261 ]
2263 2262 flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
2264 2263 if len(flags) > 1:
2265 2264 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
2266 2265
2267 2266 actions = []
2268 2267 for f in flags:
2269 2268 actions.append({b'type': f, b'value': True})
2270 2269
2271 2270 revs = opts.get(b'rev')
2272 2271 if revs:
2273 2272 if not repo:
2274 2273 raise error.InputError(_(b'--rev requires a repository'))
2275 2274
2276 2275 if specs:
2277 2276 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2278 2277
2279 2278 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2280 2279 specs = []
2281 2280 unknown = []
2282 2281 for r, d in drevmap.items():
2283 2282 if d is None:
2284 2283 unknown.append(repo[r])
2285 2284 else:
2286 2285 specs.append(b'D%d' % d)
2287 2286 if unknown:
2288 2287 raise error.InputError(
2289 2288 _(b'selected revisions without a Differential: %s')
2290 2289 % scmutil.nodesummaries(repo, unknown)
2291 2290 )
2292 2291
2293 2292 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2294 2293 for i, drev in enumerate(drevs):
2295 2294 if i + 1 == len(drevs) and opts.get(b'comment'):
2296 2295 actions.append({b'type': b'comment', b'value': opts[b'comment']})
2297 2296 if actions:
2298 2297 params = {
2299 2298 b'objectIdentifier': drev[b'phid'],
2300 2299 b'transactions': actions,
2301 2300 }
2302 2301 callconduit(ui, b'differential.revision.edit', params)
2303 2302
2304 2303
2305 2304 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
2306 2305 def template_review(context, mapping):
2307 2306 """:phabreview: Object describing the review for this changeset.
2308 2307 Has attributes `url` and `id`.
2309 2308 """
2310 2309 ctx = context.resource(mapping, b'ctx')
2311 2310 m = _differentialrevisiondescre.search(ctx.description())
2312 2311 if m:
2313 2312 return templateutil.hybriddict(
2314 2313 {
2315 2314 b'url': m.group('url'),
2316 2315 b'id': b"D%s" % m.group('id'),
2317 2316 }
2318 2317 )
2319 2318 else:
2320 2319 tags = ctx.repo().nodetags(ctx.node())
2321 2320 for t in tags:
2322 2321 if _differentialrevisiontagre.match(t):
2323 2322 url = ctx.repo().ui.config(b'phabricator', b'url')
2324 2323 if not url.endswith(b'/'):
2325 2324 url += b'/'
2326 2325 url += t
2327 2326
2328 2327 return templateutil.hybriddict(
2329 2328 {
2330 2329 b'url': url,
2331 2330 b'id': t,
2332 2331 }
2333 2332 )
2334 2333 return None
2335 2334
2336 2335
2337 2336 @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
2338 2337 def template_status(context, mapping):
2339 2338 """:phabstatus: String. Status of Phabricator differential."""
2340 2339 ctx = context.resource(mapping, b'ctx')
2341 2340 repo = context.resource(mapping, b'repo')
2342 2341 ui = context.resource(mapping, b'ui')
2343 2342
2344 2343 rev = ctx.rev()
2345 2344 try:
2346 2345 drevid = getdrevmap(repo, [rev])[rev]
2347 2346 except KeyError:
2348 2347 return None
2349 2348 drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
2350 2349 for drev in drevs:
2351 2350 if int(drev[b'id']) == drevid:
2352 2351 return templateutil.hybriddict(
2353 2352 {
2354 2353 b'url': drev[b'uri'],
2355 2354 b'status': drev[b'statusName'],
2356 2355 }
2357 2356 )
2358 2357 return None
2359 2358
2360 2359
2361 2360 @show.showview(b'phabstatus', csettopic=b'work')
2362 2361 def phabstatusshowview(ui, repo, displayer):
2363 2362 """Phabricator differiential status"""
2364 2363 revs = repo.revs('sort(_underway(), topo)')
2365 2364 drevmap = getdrevmap(repo, revs)
2366 2365 unknownrevs, drevids, revsbydrevid = [], set(), {}
2367 2366 for rev, drevid in drevmap.items():
2368 2367 if drevid is not None:
2369 2368 drevids.add(drevid)
2370 2369 revsbydrevid.setdefault(drevid, set()).add(rev)
2371 2370 else:
2372 2371 unknownrevs.append(rev)
2373 2372
2374 2373 drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
2375 2374 drevsbyrev = {}
2376 2375 for drev in drevs:
2377 2376 for rev in revsbydrevid[int(drev[b'id'])]:
2378 2377 drevsbyrev[rev] = drev
2379 2378
2380 2379 def phabstatus(ctx):
2381 2380 drev = drevsbyrev[ctx.rev()]
2382 2381 status = ui.label(
2383 2382 b'%(statusName)s' % drev,
2384 2383 b'phabricator.status.%s' % _getstatusname(drev),
2385 2384 )
2386 2385 ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
2387 2386
2388 2387 revs -= smartset.baseset(unknownrevs)
2389 2388 revdag = graphmod.dagwalker(repo, revs)
2390 2389
2391 2390 ui.setconfig(b'experimental', b'graphshorten', True)
2392 2391 displayer._exthook = phabstatus
2393 2392 nodelen = show.longestshortest(repo, revs)
2394 2393 logcmdutil.displaygraph(
2395 2394 ui,
2396 2395 repo,
2397 2396 revdag,
2398 2397 displayer,
2399 2398 graphmod.asciiedges,
2400 2399 props={b'nodelen': nodelen},
2401 2400 )
@@ -1,549 +1,548
1 1 import collections
2 2 import errno
3 3 import mmap
4 4 import os
5 5 import struct
6 6 import time
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.pycompat import (
10 getattr,
11 10 open,
12 11 )
13 12 from mercurial.node import hex
14 13 from mercurial import (
15 14 policy,
16 15 util,
17 16 vfs as vfsmod,
18 17 )
19 18 from mercurial.utils import hashutil
20 19 from . import shallowutil
21 20
22 21 osutil = policy.importmod('osutil')
23 22
24 23 # The pack version supported by this implementation. This will need to be
25 24 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
26 25 # changing any of the int sizes, changing the delta algorithm, etc.
27 26 PACKVERSIONSIZE = 1
28 27 INDEXVERSIONSIZE = 2
29 28
30 29 FANOUTSTART = INDEXVERSIONSIZE
31 30
32 31 # Constant that indicates a fanout table entry hasn't been filled in. (This does
33 32 # not get serialized)
34 33 EMPTYFANOUT = -1
35 34
36 35 # The fanout prefix is the number of bytes that can be addressed by the fanout
37 36 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
38 37 # look in the fanout table (which will be 2^8 entries long).
39 38 SMALLFANOUTPREFIX = 1
40 39 LARGEFANOUTPREFIX = 2
41 40
42 41 # The number of entries in the index at which point we switch to a large fanout.
43 42 # It is chosen to balance the linear scan through a sparse fanout, with the
44 43 # size of the bisect in actual index.
45 44 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
46 45 # bisect) with (8 step fanout scan + 1 step bisect)
47 46 # 5 step bisect = log(2^16 / 8 / 255) # fanout
48 47 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
49 48 SMALLFANOUTCUTOFF = 2 ** 16 // 8
50 49
51 50 # The amount of time to wait between checking for new packs. This prevents an
52 51 # exception when data is moved to a new pack after the process has already
53 52 # loaded the pack list.
54 53 REFRESHRATE = 0.1
55 54
56 55
57 56 class _cachebackedpacks:
58 57 def __init__(self, packs, cachesize):
59 58 self._packs = set(packs)
60 59 self._lrucache = util.lrucachedict(cachesize)
61 60 self._lastpack = None
62 61
63 62 # Avoid cold start of the cache by populating the most recent packs
64 63 # in the cache.
65 64 for i in reversed(range(min(cachesize, len(packs)))):
66 65 self._movetofront(packs[i])
67 66
68 67 def _movetofront(self, pack):
69 68 # This effectively makes pack the first entry in the cache.
70 69 self._lrucache[pack] = True
71 70
72 71 def _registerlastpackusage(self):
73 72 if self._lastpack is not None:
74 73 self._movetofront(self._lastpack)
75 74 self._lastpack = None
76 75
77 76 def add(self, pack):
78 77 self._registerlastpackusage()
79 78
80 79 # This method will mostly be called when packs are not in cache.
81 80 # Therefore, adding pack to the cache.
82 81 self._movetofront(pack)
83 82 self._packs.add(pack)
84 83
85 84 def __iter__(self):
86 85 self._registerlastpackusage()
87 86
88 87 # Cache iteration is based on LRU.
89 88 for pack in self._lrucache:
90 89 self._lastpack = pack
91 90 yield pack
92 91
93 92 cachedpacks = {pack for pack in self._lrucache}
94 93 # Yield for paths not in the cache.
95 94 for pack in self._packs - cachedpacks:
96 95 self._lastpack = pack
97 96 yield pack
98 97
99 98 # Data not found in any pack.
100 99 self._lastpack = None
101 100
102 101
103 102 class basepackstore:
104 103 # Default cache size limit for the pack files.
105 104 DEFAULTCACHESIZE = 100
106 105
107 106 def __init__(self, ui, path):
108 107 self.ui = ui
109 108 self.path = path
110 109
111 110 # lastrefesh is 0 so we'll immediately check for new packs on the first
112 111 # failure.
113 112 self.lastrefresh = 0
114 113
115 114 packs = []
116 115 for filepath, __, __ in self._getavailablepackfilessorted():
117 116 try:
118 117 pack = self.getpack(filepath)
119 118 except Exception as ex:
120 119 # An exception may be thrown if the pack file is corrupted
121 120 # somehow. Log a warning but keep going in this case, just
122 121 # skipping this pack file.
123 122 #
124 123 # If this is an ENOENT error then don't even bother logging.
125 124 # Someone could have removed the file since we retrieved the
126 125 # list of paths.
127 126 if getattr(ex, 'errno', None) != errno.ENOENT:
128 127 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
129 128 continue
130 129 packs.append(pack)
131 130
132 131 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
133 132
134 133 def _getavailablepackfiles(self):
135 134 """For each pack file (a index/data file combo), yields:
136 135 (full path without extension, mtime, size)
137 136
138 137 mtime will be the mtime of the index/data file (whichever is newer)
139 138 size is the combined size of index/data file
140 139 """
141 140 indexsuffixlen = len(self.INDEXSUFFIX)
142 141 packsuffixlen = len(self.PACKSUFFIX)
143 142
144 143 ids = set()
145 144 sizes = collections.defaultdict(lambda: 0)
146 145 mtimes = collections.defaultdict(lambda: [])
147 146 try:
148 147 for filename, type, stat in osutil.listdir(self.path, stat=True):
149 148 id = None
150 149 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
151 150 id = filename[:-indexsuffixlen]
152 151 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
153 152 id = filename[:-packsuffixlen]
154 153
155 154 # Since we expect to have two files corresponding to each ID
156 155 # (the index file and the pack file), we can yield once we see
157 156 # it twice.
158 157 if id:
159 158 sizes[id] += stat.st_size # Sum both files' sizes together
160 159 mtimes[id].append(stat.st_mtime)
161 160 if id in ids:
162 161 yield (
163 162 os.path.join(self.path, id),
164 163 max(mtimes[id]),
165 164 sizes[id],
166 165 )
167 166 else:
168 167 ids.add(id)
169 168 except FileNotFoundError:
170 169 pass
171 170
172 171 def _getavailablepackfilessorted(self):
173 172 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
174 173 yielding newest files first.
175 174
176 175 This is desirable, since it is more likely newer packfiles have more
177 176 desirable data.
178 177 """
179 178 files = []
180 179 for path, mtime, size in self._getavailablepackfiles():
181 180 files.append((mtime, size, path))
182 181 files = sorted(files, reverse=True)
183 182 for mtime, size, path in files:
184 183 yield path, mtime, size
185 184
186 185 def gettotalsizeandcount(self):
187 186 """Returns the total disk size (in bytes) of all the pack files in
188 187 this store, and the count of pack files.
189 188
190 189 (This might be smaller than the total size of the ``self.path``
191 190 directory, since this only considers fuly-writen pack files, and not
192 191 temporary files or other detritus on the directory.)
193 192 """
194 193 totalsize = 0
195 194 count = 0
196 195 for __, __, size in self._getavailablepackfiles():
197 196 totalsize += size
198 197 count += 1
199 198 return totalsize, count
200 199
201 200 def getmetrics(self):
202 201 """Returns metrics on the state of this store."""
203 202 size, count = self.gettotalsizeandcount()
204 203 return {
205 204 b'numpacks': count,
206 205 b'totalpacksize': size,
207 206 }
208 207
209 208 def getpack(self, path):
210 209 raise NotImplementedError()
211 210
212 211 def getmissing(self, keys):
213 212 missing = keys
214 213 for pack in self.packs:
215 214 missing = pack.getmissing(missing)
216 215
217 216 # Ensures better performance of the cache by keeping the most
218 217 # recently accessed pack at the beginning in subsequent iterations.
219 218 if not missing:
220 219 return missing
221 220
222 221 if missing:
223 222 for pack in self.refresh():
224 223 missing = pack.getmissing(missing)
225 224
226 225 return missing
227 226
228 227 def markledger(self, ledger, options=None):
229 228 for pack in self.packs:
230 229 pack.markledger(ledger)
231 230
232 231 def markforrefresh(self):
233 232 """Tells the store that there may be new pack files, so the next time it
234 233 has a lookup miss it should check for new files."""
235 234 self.lastrefresh = 0
236 235
237 236 def refresh(self):
238 237 """Checks for any new packs on disk, adds them to the main pack list,
239 238 and returns a list of just the new packs."""
240 239 now = time.time()
241 240
242 241 # If we experience a lot of misses (like in the case of getmissing() on
243 242 # new objects), let's only actually check disk for new stuff every once
244 243 # in a while. Generally this code path should only ever matter when a
245 244 # repack is going on in the background, and that should be pretty rare
246 245 # to have that happen twice in quick succession.
247 246 newpacks = []
248 247 if now > self.lastrefresh + REFRESHRATE:
249 248 self.lastrefresh = now
250 249 previous = {p.path for p in self.packs}
251 250 for filepath, __, __ in self._getavailablepackfilessorted():
252 251 if filepath not in previous:
253 252 newpack = self.getpack(filepath)
254 253 newpacks.append(newpack)
255 254 self.packs.add(newpack)
256 255
257 256 return newpacks
258 257
259 258
260 259 class versionmixin:
261 260 # Mix-in for classes with multiple supported versions
262 261 VERSION = None
263 262 SUPPORTED_VERSIONS = [2]
264 263
265 264 def _checkversion(self, version):
266 265 if version in self.SUPPORTED_VERSIONS:
267 266 if self.VERSION is None:
268 267 # only affect this instance
269 268 self.VERSION = version
270 269 elif self.VERSION != version:
271 270 raise RuntimeError(b'inconsistent version: %d' % version)
272 271 else:
273 272 raise RuntimeError(b'unsupported version: %d' % version)
274 273
275 274
276 275 class basepack(versionmixin):
277 276 # The maximum amount we should read via mmap before remmaping so the old
278 277 # pages can be released (100MB)
279 278 MAXPAGEDIN = 100 * 1024 ** 2
280 279
281 280 SUPPORTED_VERSIONS = [2]
282 281
283 282 def __init__(self, path):
284 283 self.path = path
285 284 self.packpath = path + self.PACKSUFFIX
286 285 self.indexpath = path + self.INDEXSUFFIX
287 286
288 287 self.indexsize = os.stat(self.indexpath).st_size
289 288 self.datasize = os.stat(self.packpath).st_size
290 289
291 290 self._index = None
292 291 self._data = None
293 292 self.freememory() # initialize the mmap
294 293
295 294 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
296 295 self._checkversion(version)
297 296
298 297 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
299 298 self._checkversion(version)
300 299
301 300 if 0b10000000 & config:
302 301 self.params = indexparams(LARGEFANOUTPREFIX, version)
303 302 else:
304 303 self.params = indexparams(SMALLFANOUTPREFIX, version)
305 304
306 305 @util.propertycache
307 306 def _fanouttable(self):
308 307 params = self.params
309 308 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
310 309 fanouttable = []
311 310 for i in range(0, params.fanoutcount):
312 311 loc = i * 4
313 312 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
314 313 fanouttable.append(fanoutentry)
315 314 return fanouttable
316 315
317 316 @util.propertycache
318 317 def _indexend(self):
319 318 nodecount = struct.unpack_from(
320 319 b'!Q', self._index, self.params.indexstart - 8
321 320 )[0]
322 321 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
323 322
324 323 def freememory(self):
325 324 """Unmap and remap the memory to free it up after known expensive
326 325 operations. Return True if self._data and self._index were reloaded.
327 326 """
328 327 if self._index:
329 328 if self._pagedin < self.MAXPAGEDIN:
330 329 return False
331 330
332 331 self._index.close()
333 332 self._data.close()
334 333
335 334 # TODO: use an opener/vfs to access these paths
336 335 with open(self.indexpath, b'rb') as indexfp:
337 336 # memory-map the file, size 0 means whole file
338 337 self._index = mmap.mmap(
339 338 indexfp.fileno(), 0, access=mmap.ACCESS_READ
340 339 )
341 340 with open(self.packpath, b'rb') as datafp:
342 341 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
343 342
344 343 self._pagedin = 0
345 344 return True
346 345
347 346 def getmissing(self, keys):
348 347 raise NotImplementedError()
349 348
350 349 def markledger(self, ledger, options=None):
351 350 raise NotImplementedError()
352 351
353 352 def cleanup(self, ledger):
354 353 raise NotImplementedError()
355 354
356 355 def __iter__(self):
357 356 raise NotImplementedError()
358 357
359 358 def iterentries(self):
360 359 raise NotImplementedError()
361 360
362 361
363 362 class mutablebasepack(versionmixin):
364 363 def __init__(self, ui, packdir, version=2):
365 364 self._checkversion(version)
366 365 # TODO(augie): make this configurable
367 366 self._compressor = b'GZ'
368 367 opener = vfsmod.vfs(packdir)
369 368 opener.createmode = 0o444
370 369 self.opener = opener
371 370
372 371 self.entries = {}
373 372
374 373 shallowutil.mkstickygroupdir(ui, packdir)
375 374 self.packfp, self.packpath = opener.mkstemp(
376 375 suffix=self.PACKSUFFIX + b'-tmp'
377 376 )
378 377 self.idxfp, self.idxpath = opener.mkstemp(
379 378 suffix=self.INDEXSUFFIX + b'-tmp'
380 379 )
381 380 self.packfp = os.fdopen(self.packfp, 'wb+')
382 381 self.idxfp = os.fdopen(self.idxfp, 'wb+')
383 382 self.sha = hashutil.sha1()
384 383 self._closed = False
385 384
386 385 # The opener provides no way of doing permission fixup on files created
387 386 # via mkstemp, so we must fix it ourselves. We can probably fix this
388 387 # upstream in vfs.mkstemp so we don't need to use the private method.
389 388 opener._fixfilemode(opener.join(self.packpath))
390 389 opener._fixfilemode(opener.join(self.idxpath))
391 390
392 391 # Write header
393 392 # TODO: make it extensible (ex: allow specifying compression algorithm,
394 393 # a flexible key/value header, delta algorithm, fanout size, etc)
395 394 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
396 395 self.writeraw(versionbuf)
397 396
398 397 def __enter__(self):
399 398 return self
400 399
401 400 def __exit__(self, exc_type, exc_value, traceback):
402 401 if exc_type is None:
403 402 self.close()
404 403 else:
405 404 self.abort()
406 405
407 406 def abort(self):
408 407 # Unclean exit
409 408 self._cleantemppacks()
410 409
411 410 def writeraw(self, data):
412 411 self.packfp.write(data)
413 412 self.sha.update(data)
414 413
415 414 def close(self, ledger=None):
416 415 if self._closed:
417 416 return
418 417
419 418 try:
420 419 sha = hex(self.sha.digest())
421 420 self.packfp.close()
422 421 self.writeindex()
423 422
424 423 if len(self.entries) == 0:
425 424 # Empty pack
426 425 self._cleantemppacks()
427 426 self._closed = True
428 427 return None
429 428
430 429 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
431 430 try:
432 431 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
433 432 except Exception as ex:
434 433 try:
435 434 self.opener.unlink(sha + self.PACKSUFFIX)
436 435 except Exception:
437 436 pass
438 437 # Throw exception 'ex' explicitly since a normal 'raise' would
439 438 # potentially throw an exception from the unlink cleanup.
440 439 raise ex
441 440 except Exception:
442 441 # Clean up temp packs in all exception cases
443 442 self._cleantemppacks()
444 443 raise
445 444
446 445 self._closed = True
447 446 result = self.opener.join(sha)
448 447 if ledger:
449 448 ledger.addcreated(result)
450 449 return result
451 450
452 451 def _cleantemppacks(self):
453 452 try:
454 453 self.opener.unlink(self.packpath)
455 454 except Exception:
456 455 pass
457 456 try:
458 457 self.opener.unlink(self.idxpath)
459 458 except Exception:
460 459 pass
461 460
462 461 def writeindex(self):
463 462 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
464 463 if largefanout:
465 464 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
466 465 else:
467 466 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
468 467
469 468 fanouttable = [EMPTYFANOUT] * params.fanoutcount
470 469
471 470 # Precompute the location of each entry
472 471 locations = {}
473 472 count = 0
474 473 for node in sorted(self.entries):
475 474 location = count * self.INDEXENTRYLENGTH
476 475 locations[node] = location
477 476 count += 1
478 477
479 478 # Must use [0] on the unpack result since it's always a tuple.
480 479 fanoutkey = struct.unpack(
481 480 params.fanoutstruct, node[: params.fanoutprefix]
482 481 )[0]
483 482 if fanouttable[fanoutkey] == EMPTYFANOUT:
484 483 fanouttable[fanoutkey] = location
485 484
486 485 rawfanouttable = b''
487 486 last = 0
488 487 for offset in fanouttable:
489 488 offset = offset if offset != EMPTYFANOUT else last
490 489 last = offset
491 490 rawfanouttable += struct.pack(b'!I', offset)
492 491
493 492 rawentrieslength = struct.pack(b'!Q', len(self.entries))
494 493
495 494 # The index offset is the it's location in the file. So after the 2 byte
496 495 # header and the fanouttable.
497 496 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
498 497
499 498 self._writeheader(params)
500 499 self.idxfp.write(rawfanouttable)
501 500 self.idxfp.write(rawentrieslength)
502 501 self.idxfp.write(rawindex)
503 502 self.idxfp.close()
504 503
505 504 def createindex(self, nodelocations):
506 505 raise NotImplementedError()
507 506
508 507 def _writeheader(self, indexparams):
509 508 # Index header
510 509 # <version: 1 byte>
511 510 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
512 511 # <unused: 7 bit> # future use (compression, delta format, etc)
513 512 config = 0
514 513 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
515 514 config = 0b10000000
516 515 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
517 516
518 517
519 518 class indexparams:
520 519 __slots__ = (
521 520 'fanoutprefix',
522 521 'fanoutstruct',
523 522 'fanoutcount',
524 523 'fanoutsize',
525 524 'indexstart',
526 525 )
527 526
528 527 def __init__(self, prefixsize, version):
529 528 self.fanoutprefix = prefixsize
530 529
531 530 # The struct pack format for fanout table location (i.e. the format that
532 531 # converts the node prefix into an integer location in the fanout
533 532 # table).
534 533 if prefixsize == SMALLFANOUTPREFIX:
535 534 self.fanoutstruct = b'!B'
536 535 elif prefixsize == LARGEFANOUTPREFIX:
537 536 self.fanoutstruct = b'!H'
538 537 else:
539 538 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
540 539
541 540 # The number of fanout table entries
542 541 self.fanoutcount = 2 ** (prefixsize * 8)
543 542
544 543 # The total bytes used by the fanout table
545 544 self.fanoutsize = self.fanoutcount * 4
546 545
547 546 self.indexstart = FANOUTSTART + self.fanoutsize
548 547 # Skip the index length
549 548 self.indexstart += 8
@@ -1,396 +1,395
1 1 import threading
2 2
3 3 from mercurial.node import (
4 4 hex,
5 5 sha1nodeconstants,
6 6 )
7 from mercurial.pycompat import getattr
8 7 from mercurial import (
9 8 mdiff,
10 9 revlog,
11 10 )
12 11 from . import (
13 12 basestore,
14 13 constants,
15 14 shallowutil,
16 15 )
17 16
18 17
19 18 class ChainIndicies:
20 19 """A static class for easy reference to the delta chain indicies."""
21 20
22 21 # The filename of this revision delta
23 22 NAME = 0
24 23 # The mercurial file node for this revision delta
25 24 NODE = 1
26 25 # The filename of the delta base's revision. This is useful when delta
27 26 # between different files (like in the case of a move or copy, we can delta
28 27 # against the original file content).
29 28 BASENAME = 2
30 29 # The mercurial file node for the delta base revision. This is the nullid if
31 30 # this delta is a full text.
32 31 BASENODE = 3
33 32 # The actual delta or full text data.
34 33 DATA = 4
35 34
36 35
37 36 class unioncontentstore(basestore.baseunionstore):
38 37 def __init__(self, *args, **kwargs):
39 38 super(unioncontentstore, self).__init__(*args, **kwargs)
40 39
41 40 self.stores = args
42 41 self.writestore = kwargs.get('writestore')
43 42
44 43 # If allowincomplete==True then the union store can return partial
45 44 # delta chains, otherwise it will throw a KeyError if a full
46 45 # deltachain can't be found.
47 46 self.allowincomplete = kwargs.get('allowincomplete', False)
48 47
49 48 def get(self, name, node):
50 49 """Fetches the full text revision contents of the given name+node pair.
51 50 If the full text doesn't exist, throws a KeyError.
52 51
53 52 Under the hood, this uses getdeltachain() across all the stores to build
54 53 up a full chain to produce the full text.
55 54 """
56 55 chain = self.getdeltachain(name, node)
57 56
58 57 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
59 58 # If we didn't receive a full chain, throw
60 59 raise KeyError((name, hex(node)))
61 60
62 61 # The last entry in the chain is a full text, so we start our delta
63 62 # applies with that.
64 63 fulltext = chain.pop()[ChainIndicies.DATA]
65 64
66 65 text = fulltext
67 66 while chain:
68 67 delta = chain.pop()[ChainIndicies.DATA]
69 68 text = mdiff.patches(text, [delta])
70 69
71 70 return text
72 71
73 72 @basestore.baseunionstore.retriable
74 73 def getdelta(self, name, node):
75 74 """Return the single delta entry for the given name/node pair."""
76 75 for store in self.stores:
77 76 try:
78 77 return store.getdelta(name, node)
79 78 except KeyError:
80 79 pass
81 80
82 81 raise KeyError((name, hex(node)))
83 82
84 83 def getdeltachain(self, name, node):
85 84 """Returns the deltachain for the given name/node pair.
86 85
87 86 Returns an ordered list of:
88 87
89 88 [(name, node, deltabasename, deltabasenode, deltacontent),...]
90 89
91 90 where the chain is terminated by a full text entry with a nullid
92 91 deltabasenode.
93 92 """
94 93 chain = self._getpartialchain(name, node)
95 94 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
96 95 x, x, deltabasename, deltabasenode, x = chain[-1]
97 96 try:
98 97 morechain = self._getpartialchain(deltabasename, deltabasenode)
99 98 chain.extend(morechain)
100 99 except KeyError:
101 100 # If we allow incomplete chains, don't throw.
102 101 if not self.allowincomplete:
103 102 raise
104 103 break
105 104
106 105 return chain
107 106
108 107 @basestore.baseunionstore.retriable
109 108 def getmeta(self, name, node):
110 109 """Returns the metadata dict for given node."""
111 110 for store in self.stores:
112 111 try:
113 112 return store.getmeta(name, node)
114 113 except KeyError:
115 114 pass
116 115 raise KeyError((name, hex(node)))
117 116
118 117 def getmetrics(self):
119 118 metrics = [s.getmetrics() for s in self.stores]
120 119 return shallowutil.sumdicts(*metrics)
121 120
122 121 @basestore.baseunionstore.retriable
123 122 def _getpartialchain(self, name, node):
124 123 """Returns a partial delta chain for the given name/node pair.
125 124
126 125 A partial chain is a chain that may not be terminated in a full-text.
127 126 """
128 127 for store in self.stores:
129 128 try:
130 129 return store.getdeltachain(name, node)
131 130 except KeyError:
132 131 pass
133 132
134 133 raise KeyError((name, hex(node)))
135 134
136 135 def add(self, name, node, data):
137 136 raise RuntimeError(
138 137 b"cannot add content only to remotefilelog contentstore"
139 138 )
140 139
141 140 def getmissing(self, keys):
142 141 missing = keys
143 142 for store in self.stores:
144 143 if missing:
145 144 missing = store.getmissing(missing)
146 145 return missing
147 146
148 147 def addremotefilelognode(self, name, node, data):
149 148 if self.writestore:
150 149 self.writestore.addremotefilelognode(name, node, data)
151 150 else:
152 151 raise RuntimeError(b"no writable store configured")
153 152
154 153 def markledger(self, ledger, options=None):
155 154 for store in self.stores:
156 155 store.markledger(ledger, options)
157 156
158 157
159 158 class remotefilelogcontentstore(basestore.basestore):
160 159 def __init__(self, *args, **kwargs):
161 160 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
162 161 self._threaddata = threading.local()
163 162
164 163 def get(self, name, node):
165 164 # return raw revision text
166 165 data = self._getdata(name, node)
167 166
168 167 offset, size, flags = shallowutil.parsesizeflags(data)
169 168 content = data[offset : offset + size]
170 169
171 170 ancestormap = shallowutil.ancestormap(data)
172 171 p1, p2, linknode, copyfrom = ancestormap[node]
173 172 copyrev = None
174 173 if copyfrom:
175 174 copyrev = hex(p1)
176 175
177 176 self._updatemetacache(node, size, flags)
178 177
179 178 # lfs tracks renames in its own metadata, remove hg copy metadata,
180 179 # because copy metadata will be re-added by lfs flag processor.
181 180 if flags & revlog.REVIDX_EXTSTORED:
182 181 copyrev = copyfrom = None
183 182 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
184 183 return revision
185 184
186 185 def getdelta(self, name, node):
187 186 # Since remotefilelog content stores only contain full texts, just
188 187 # return that.
189 188 revision = self.get(name, node)
190 189 return (
191 190 revision,
192 191 name,
193 192 sha1nodeconstants.nullid,
194 193 self.getmeta(name, node),
195 194 )
196 195
197 196 def getdeltachain(self, name, node):
198 197 # Since remotefilelog content stores just contain full texts, we return
199 198 # a fake delta chain that just consists of a single full text revision.
200 199 # The nullid in the deltabasenode slot indicates that the revision is a
201 200 # fulltext.
202 201 revision = self.get(name, node)
203 202 return [(name, node, None, sha1nodeconstants.nullid, revision)]
204 203
205 204 def getmeta(self, name, node):
206 205 self._sanitizemetacache()
207 206 if node != self._threaddata.metacache[0]:
208 207 data = self._getdata(name, node)
209 208 offset, size, flags = shallowutil.parsesizeflags(data)
210 209 self._updatemetacache(node, size, flags)
211 210 return self._threaddata.metacache[1]
212 211
213 212 def add(self, name, node, data):
214 213 raise RuntimeError(
215 214 b"cannot add content only to remotefilelog contentstore"
216 215 )
217 216
218 217 def _sanitizemetacache(self):
219 218 metacache = getattr(self._threaddata, 'metacache', None)
220 219 if metacache is None:
221 220 self._threaddata.metacache = (None, None) # (node, meta)
222 221
223 222 def _updatemetacache(self, node, size, flags):
224 223 self._sanitizemetacache()
225 224 if node == self._threaddata.metacache[0]:
226 225 return
227 226 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
228 227 self._threaddata.metacache = (node, meta)
229 228
230 229
231 230 class remotecontentstore:
232 231 def __init__(self, ui, fileservice, shared):
233 232 self._fileservice = fileservice
234 233 # type(shared) is usually remotefilelogcontentstore
235 234 self._shared = shared
236 235
237 236 def get(self, name, node):
238 237 self._fileservice.prefetch(
239 238 [(name, hex(node))], force=True, fetchdata=True
240 239 )
241 240 return self._shared.get(name, node)
242 241
243 242 def getdelta(self, name, node):
244 243 revision = self.get(name, node)
245 244 return (
246 245 revision,
247 246 name,
248 247 sha1nodeconstants.nullid,
249 248 self._shared.getmeta(name, node),
250 249 )
251 250
252 251 def getdeltachain(self, name, node):
253 252 # Since our remote content stores just contain full texts, we return a
254 253 # fake delta chain that just consists of a single full text revision.
255 254 # The nullid in the deltabasenode slot indicates that the revision is a
256 255 # fulltext.
257 256 revision = self.get(name, node)
258 257 return [(name, node, None, sha1nodeconstants.nullid, revision)]
259 258
260 259 def getmeta(self, name, node):
261 260 self._fileservice.prefetch(
262 261 [(name, hex(node))], force=True, fetchdata=True
263 262 )
264 263 return self._shared.getmeta(name, node)
265 264
266 265 def add(self, name, node, data):
267 266 raise RuntimeError(b"cannot add to a remote store")
268 267
269 268 def getmissing(self, keys):
270 269 return keys
271 270
272 271 def markledger(self, ledger, options=None):
273 272 pass
274 273
275 274
276 275 class manifestrevlogstore:
277 276 def __init__(self, repo):
278 277 self._store = repo.store
279 278 self._svfs = repo.svfs
280 279 self._revlogs = dict()
281 280 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
282 281 self._repackstartlinkrev = 0
283 282
284 283 def get(self, name, node):
285 284 return self._revlog(name).rawdata(node)
286 285
287 286 def getdelta(self, name, node):
288 287 revision = self.get(name, node)
289 288 return revision, name, self._cl.nullid, self.getmeta(name, node)
290 289
291 290 def getdeltachain(self, name, node):
292 291 revision = self.get(name, node)
293 292 return [(name, node, None, self._cl.nullid, revision)]
294 293
295 294 def getmeta(self, name, node):
296 295 rl = self._revlog(name)
297 296 rev = rl.rev(node)
298 297 return {
299 298 constants.METAKEYFLAG: rl.flags(rev),
300 299 constants.METAKEYSIZE: rl.rawsize(rev),
301 300 }
302 301
303 302 def getancestors(self, name, node, known=None):
304 303 if known is None:
305 304 known = set()
306 305 if node in known:
307 306 return []
308 307
309 308 rl = self._revlog(name)
310 309 ancestors = {}
311 310 missing = {node}
312 311 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
313 312 ancnode = rl.node(ancrev)
314 313 missing.discard(ancnode)
315 314
316 315 p1, p2 = rl.parents(ancnode)
317 316 if p1 != self._cl.nullid and p1 not in known:
318 317 missing.add(p1)
319 318 if p2 != self._cl.nullid and p2 not in known:
320 319 missing.add(p2)
321 320
322 321 linknode = self._cl.node(rl.linkrev(ancrev))
323 322 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
324 323 if not missing:
325 324 break
326 325 return ancestors
327 326
328 327 def getnodeinfo(self, name, node):
329 328 cl = self._cl
330 329 rl = self._revlog(name)
331 330 parents = rl.parents(node)
332 331 linkrev = rl.linkrev(rl.rev(node))
333 332 return (parents[0], parents[1], cl.node(linkrev), None)
334 333
335 334 def add(self, *args):
336 335 raise RuntimeError(b"cannot add to a revlog store")
337 336
338 337 def _revlog(self, name):
339 338 rl = self._revlogs.get(name)
340 339 if rl is None:
341 340 revlogname = b'00manifesttree'
342 341 if name != b'':
343 342 revlogname = b'meta/%s/00manifest' % name
344 343 rl = revlog.revlog(self._svfs, radix=revlogname)
345 344 self._revlogs[name] = rl
346 345 return rl
347 346
348 347 def getmissing(self, keys):
349 348 missing = []
350 349 for name, node in keys:
351 350 mfrevlog = self._revlog(name)
352 351 if node not in mfrevlog.nodemap:
353 352 missing.append((name, node))
354 353
355 354 return missing
356 355
357 356 def setrepacklinkrevrange(self, startrev, endrev):
358 357 self._repackstartlinkrev = startrev
359 358 self._repackendlinkrev = endrev
360 359
361 360 def markledger(self, ledger, options=None):
362 361 if options and options.get(constants.OPTION_PACKSONLY):
363 362 return
364 363 treename = b''
365 364 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
366 365 startlinkrev = self._repackstartlinkrev
367 366 endlinkrev = self._repackendlinkrev
368 367 for rev in range(len(rl) - 1, -1, -1):
369 368 linkrev = rl.linkrev(rev)
370 369 if linkrev < startlinkrev:
371 370 break
372 371 if linkrev > endlinkrev:
373 372 continue
374 373 node = rl.node(rev)
375 374 ledger.markdataentry(self, treename, node)
376 375 ledger.markhistoryentry(self, treename, node)
377 376
378 377 for t, path, size in self._store.data_entries():
379 378 if path[:5] != b'meta/' or path[-2:] != b'.i':
380 379 continue
381 380
382 381 treename = path[5 : -len(b'/00manifest')]
383 382
384 383 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
385 384 for rev in range(len(rl) - 1, -1, -1):
386 385 linkrev = rl.linkrev(rev)
387 386 if linkrev < startlinkrev:
388 387 break
389 388 if linkrev > endlinkrev:
390 389 continue
391 390 node = rl.node(rev)
392 391 ledger.markdataentry(self, treename, node)
393 392 ledger.markhistoryentry(self, treename, node)
394 393
395 394 def cleanup(self, ledger):
396 395 pass
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now