##// END OF EJS Templates
pycompat: drop usage of hasattr/getattr/setattr/delatt proxy...
marmoute -
r51822:18c8c189 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,332 +1,331 b''
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
1 # cvs.py: CVS conversion code inspired by hg-cvs-import and git-cvsimport
2 #
2 #
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
3 # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import errno
8 import errno
9 import os
9 import os
10 import re
10 import re
11 import socket
11 import socket
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.pycompat import (
14 from mercurial.pycompat import (
15 getattr,
16 open,
15 open,
17 )
16 )
18 from mercurial import (
17 from mercurial import (
19 encoding,
18 encoding,
20 error,
19 error,
21 util,
20 util,
22 )
21 )
23 from mercurial.utils import (
22 from mercurial.utils import (
24 dateutil,
23 dateutil,
25 procutil,
24 procutil,
26 )
25 )
27
26
28 from . import (
27 from . import (
29 common,
28 common,
30 cvsps,
29 cvsps,
31 )
30 )
32
31
33 stringio = util.stringio
32 stringio = util.stringio
34 checktool = common.checktool
33 checktool = common.checktool
35 commit = common.commit
34 commit = common.commit
36 converter_source = common.converter_source
35 converter_source = common.converter_source
37 makedatetimestamp = common.makedatetimestamp
36 makedatetimestamp = common.makedatetimestamp
38 NoRepo = common.NoRepo
37 NoRepo = common.NoRepo
39
38
40
39
41 class convert_cvs(converter_source):
40 class convert_cvs(converter_source):
42 def __init__(self, ui, repotype, path, revs=None):
41 def __init__(self, ui, repotype, path, revs=None):
43 super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
42 super(convert_cvs, self).__init__(ui, repotype, path, revs=revs)
44
43
45 cvs = os.path.join(path, b"CVS")
44 cvs = os.path.join(path, b"CVS")
46 if not os.path.exists(cvs):
45 if not os.path.exists(cvs):
47 raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
46 raise NoRepo(_(b"%s does not look like a CVS checkout") % path)
48
47
49 checktool(b'cvs')
48 checktool(b'cvs')
50
49
51 self.changeset = None
50 self.changeset = None
52 self.files = {}
51 self.files = {}
53 self.tags = {}
52 self.tags = {}
54 self.lastbranch = {}
53 self.lastbranch = {}
55 self.socket = None
54 self.socket = None
56 self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
55 self.cvsroot = open(os.path.join(cvs, b"Root"), b'rb').read()[:-1]
57 self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
56 self.cvsrepo = open(os.path.join(cvs, b"Repository"), b'rb').read()[:-1]
58 self.encoding = encoding.encoding
57 self.encoding = encoding.encoding
59
58
60 self._connect()
59 self._connect()
61
60
62 def _parse(self):
61 def _parse(self):
63 if self.changeset is not None:
62 if self.changeset is not None:
64 return
63 return
65 self.changeset = {}
64 self.changeset = {}
66
65
67 maxrev = 0
66 maxrev = 0
68 if self.revs:
67 if self.revs:
69 if len(self.revs) > 1:
68 if len(self.revs) > 1:
70 raise error.Abort(
69 raise error.Abort(
71 _(
70 _(
72 b'cvs source does not support specifying '
71 b'cvs source does not support specifying '
73 b'multiple revs'
72 b'multiple revs'
74 )
73 )
75 )
74 )
76 # TODO: handle tags
75 # TODO: handle tags
77 try:
76 try:
78 # patchset number?
77 # patchset number?
79 maxrev = int(self.revs[0])
78 maxrev = int(self.revs[0])
80 except ValueError:
79 except ValueError:
81 raise error.Abort(
80 raise error.Abort(
82 _(b'revision %s is not a patchset number') % self.revs[0]
81 _(b'revision %s is not a patchset number') % self.revs[0]
83 )
82 )
84
83
85 d = encoding.getcwd()
84 d = encoding.getcwd()
86 try:
85 try:
87 os.chdir(self.path)
86 os.chdir(self.path)
88
87
89 cache = b'update'
88 cache = b'update'
90 if not self.ui.configbool(b'convert', b'cvsps.cache'):
89 if not self.ui.configbool(b'convert', b'cvsps.cache'):
91 cache = None
90 cache = None
92 db = cvsps.createlog(self.ui, cache=cache)
91 db = cvsps.createlog(self.ui, cache=cache)
93 db = cvsps.createchangeset(
92 db = cvsps.createchangeset(
94 self.ui,
93 self.ui,
95 db,
94 db,
96 fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
95 fuzz=int(self.ui.config(b'convert', b'cvsps.fuzz')),
97 mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
96 mergeto=self.ui.config(b'convert', b'cvsps.mergeto'),
98 mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
97 mergefrom=self.ui.config(b'convert', b'cvsps.mergefrom'),
99 )
98 )
100
99
101 for cs in db:
100 for cs in db:
102 if maxrev and cs.id > maxrev:
101 if maxrev and cs.id > maxrev:
103 break
102 break
104 id = b"%d" % cs.id
103 id = b"%d" % cs.id
105 cs.author = self.recode(cs.author)
104 cs.author = self.recode(cs.author)
106 self.lastbranch[cs.branch] = id
105 self.lastbranch[cs.branch] = id
107 cs.comment = self.recode(cs.comment)
106 cs.comment = self.recode(cs.comment)
108 if self.ui.configbool(b'convert', b'localtimezone'):
107 if self.ui.configbool(b'convert', b'localtimezone'):
109 cs.date = makedatetimestamp(cs.date[0])
108 cs.date = makedatetimestamp(cs.date[0])
110 date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
109 date = dateutil.datestr(cs.date, b'%Y-%m-%d %H:%M:%S %1%2')
111 self.tags.update(dict.fromkeys(cs.tags, id))
110 self.tags.update(dict.fromkeys(cs.tags, id))
112
111
113 files = {}
112 files = {}
114 for f in cs.entries:
113 for f in cs.entries:
115 files[f.file] = b"%s%s" % (
114 files[f.file] = b"%s%s" % (
116 b'.'.join([(b"%d" % x) for x in f.revision]),
115 b'.'.join([(b"%d" % x) for x in f.revision]),
117 [b'', b'(DEAD)'][f.dead],
116 [b'', b'(DEAD)'][f.dead],
118 )
117 )
119
118
120 # add current commit to set
119 # add current commit to set
121 c = commit(
120 c = commit(
122 author=cs.author,
121 author=cs.author,
123 date=date,
122 date=date,
124 parents=[(b"%d" % p.id) for p in cs.parents],
123 parents=[(b"%d" % p.id) for p in cs.parents],
125 desc=cs.comment,
124 desc=cs.comment,
126 branch=cs.branch or b'',
125 branch=cs.branch or b'',
127 )
126 )
128 self.changeset[id] = c
127 self.changeset[id] = c
129 self.files[id] = files
128 self.files[id] = files
130
129
131 self.heads = self.lastbranch.values()
130 self.heads = self.lastbranch.values()
132 finally:
131 finally:
133 os.chdir(d)
132 os.chdir(d)
134
133
135 def _connect(self):
134 def _connect(self):
136 root = self.cvsroot
135 root = self.cvsroot
137 conntype = None
136 conntype = None
138 user, host = None, None
137 user, host = None, None
139 cmd = [b'cvs', b'server']
138 cmd = [b'cvs', b'server']
140
139
141 self.ui.status(_(b"connecting to %s\n") % root)
140 self.ui.status(_(b"connecting to %s\n") % root)
142
141
143 if root.startswith(b":pserver:"):
142 if root.startswith(b":pserver:"):
144 root = root[9:]
143 root = root[9:]
145 m = re.match(
144 m = re.match(
146 br'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root
145 br'(?:(.*?)(?::(.*?))?@)?([^:/]*)(?::(\d*))?(.*)', root
147 )
146 )
148 if m:
147 if m:
149 conntype = b"pserver"
148 conntype = b"pserver"
150 user, passw, serv, port, root = m.groups()
149 user, passw, serv, port, root = m.groups()
151 if not user:
150 if not user:
152 user = b"anonymous"
151 user = b"anonymous"
153 if not port:
152 if not port:
154 port = 2401
153 port = 2401
155 else:
154 else:
156 port = int(port)
155 port = int(port)
157 format0 = b":pserver:%s@%s:%s" % (user, serv, root)
156 format0 = b":pserver:%s@%s:%s" % (user, serv, root)
158 format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
157 format1 = b":pserver:%s@%s:%d%s" % (user, serv, port, root)
159
158
160 if not passw:
159 if not passw:
161 passw = b"A"
160 passw = b"A"
162 cvspass = os.path.expanduser(b"~/.cvspass")
161 cvspass = os.path.expanduser(b"~/.cvspass")
163 try:
162 try:
164 pf = open(cvspass, b'rb')
163 pf = open(cvspass, b'rb')
165 for line in pf.read().splitlines():
164 for line in pf.read().splitlines():
166 part1, part2 = line.split(b' ', 1)
165 part1, part2 = line.split(b' ', 1)
167 # /1 :pserver:user@example.com:2401/cvsroot/foo
166 # /1 :pserver:user@example.com:2401/cvsroot/foo
168 # Ah<Z
167 # Ah<Z
169 if part1 == b'/1':
168 if part1 == b'/1':
170 part1, part2 = part2.split(b' ', 1)
169 part1, part2 = part2.split(b' ', 1)
171 format = format1
170 format = format1
172 # :pserver:user@example.com:/cvsroot/foo Ah<Z
171 # :pserver:user@example.com:/cvsroot/foo Ah<Z
173 else:
172 else:
174 format = format0
173 format = format0
175 if part1 == format:
174 if part1 == format:
176 passw = part2
175 passw = part2
177 break
176 break
178 pf.close()
177 pf.close()
179 except IOError as inst:
178 except IOError as inst:
180 if inst.errno != errno.ENOENT:
179 if inst.errno != errno.ENOENT:
181 if not getattr(inst, 'filename', None):
180 if not getattr(inst, 'filename', None):
182 inst.filename = cvspass
181 inst.filename = cvspass
183 raise
182 raise
184
183
185 sck = socket.socket()
184 sck = socket.socket()
186 sck.connect((serv, port))
185 sck.connect((serv, port))
187 sck.send(
186 sck.send(
188 b"\n".join(
187 b"\n".join(
189 [
188 [
190 b"BEGIN AUTH REQUEST",
189 b"BEGIN AUTH REQUEST",
191 root,
190 root,
192 user,
191 user,
193 passw,
192 passw,
194 b"END AUTH REQUEST",
193 b"END AUTH REQUEST",
195 b"",
194 b"",
196 ]
195 ]
197 )
196 )
198 )
197 )
199 if sck.recv(128) != b"I LOVE YOU\n":
198 if sck.recv(128) != b"I LOVE YOU\n":
200 raise error.Abort(_(b"CVS pserver authentication failed"))
199 raise error.Abort(_(b"CVS pserver authentication failed"))
201
200
202 self.writep = self.readp = sck.makefile('rwb')
201 self.writep = self.readp = sck.makefile('rwb')
203
202
204 if not conntype and root.startswith(b":local:"):
203 if not conntype and root.startswith(b":local:"):
205 conntype = b"local"
204 conntype = b"local"
206 root = root[7:]
205 root = root[7:]
207
206
208 if not conntype:
207 if not conntype:
209 # :ext:user@host/home/user/path/to/cvsroot
208 # :ext:user@host/home/user/path/to/cvsroot
210 if root.startswith(b":ext:"):
209 if root.startswith(b":ext:"):
211 root = root[5:]
210 root = root[5:]
212 m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
211 m = re.match(br'(?:([^@:/]+)@)?([^:/]+):?(.*)', root)
213 # Do not take Windows path "c:\foo\bar" for a connection strings
212 # Do not take Windows path "c:\foo\bar" for a connection strings
214 if os.path.isdir(root) or not m:
213 if os.path.isdir(root) or not m:
215 conntype = b"local"
214 conntype = b"local"
216 else:
215 else:
217 conntype = b"rsh"
216 conntype = b"rsh"
218 user, host, root = m.group(1), m.group(2), m.group(3)
217 user, host, root = m.group(1), m.group(2), m.group(3)
219
218
220 if conntype != b"pserver":
219 if conntype != b"pserver":
221 if conntype == b"rsh":
220 if conntype == b"rsh":
222 rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
221 rsh = encoding.environ.get(b"CVS_RSH") or b"ssh"
223 if user:
222 if user:
224 cmd = [rsh, b'-l', user, host] + cmd
223 cmd = [rsh, b'-l', user, host] + cmd
225 else:
224 else:
226 cmd = [rsh, host] + cmd
225 cmd = [rsh, host] + cmd
227
226
228 # popen2 does not support argument lists under Windows
227 # popen2 does not support argument lists under Windows
229 cmd = b' '.join(procutil.shellquote(arg) for arg in cmd)
228 cmd = b' '.join(procutil.shellquote(arg) for arg in cmd)
230 self.writep, self.readp = procutil.popen2(cmd)
229 self.writep, self.readp = procutil.popen2(cmd)
231
230
232 self.realroot = root
231 self.realroot = root
233
232
234 self.writep.write(b"Root %s\n" % root)
233 self.writep.write(b"Root %s\n" % root)
235 self.writep.write(
234 self.writep.write(
236 b"Valid-responses ok error Valid-requests Mode"
235 b"Valid-responses ok error Valid-requests Mode"
237 b" M Mbinary E Checked-in Created Updated"
236 b" M Mbinary E Checked-in Created Updated"
238 b" Merged Removed\n"
237 b" Merged Removed\n"
239 )
238 )
240 self.writep.write(b"valid-requests\n")
239 self.writep.write(b"valid-requests\n")
241 self.writep.flush()
240 self.writep.flush()
242 r = self.readp.readline()
241 r = self.readp.readline()
243 if not r.startswith(b"Valid-requests"):
242 if not r.startswith(b"Valid-requests"):
244 raise error.Abort(
243 raise error.Abort(
245 _(
244 _(
246 b'unexpected response from CVS server '
245 b'unexpected response from CVS server '
247 b'(expected "Valid-requests", but got %r)'
246 b'(expected "Valid-requests", but got %r)'
248 )
247 )
249 % r
248 % r
250 )
249 )
251 if b"UseUnchanged" in r:
250 if b"UseUnchanged" in r:
252 self.writep.write(b"UseUnchanged\n")
251 self.writep.write(b"UseUnchanged\n")
253 self.writep.flush()
252 self.writep.flush()
254 self.readp.readline()
253 self.readp.readline()
255
254
256 def getheads(self):
255 def getheads(self):
257 self._parse()
256 self._parse()
258 return self.heads
257 return self.heads
259
258
260 def getfile(self, name, rev):
259 def getfile(self, name, rev):
261 def chunkedread(fp, count):
260 def chunkedread(fp, count):
262 # file-objects returned by socket.makefile() do not handle
261 # file-objects returned by socket.makefile() do not handle
263 # large read() requests very well.
262 # large read() requests very well.
264 chunksize = 65536
263 chunksize = 65536
265 output = stringio()
264 output = stringio()
266 while count > 0:
265 while count > 0:
267 data = fp.read(min(count, chunksize))
266 data = fp.read(min(count, chunksize))
268 if not data:
267 if not data:
269 raise error.Abort(
268 raise error.Abort(
270 _(b"%d bytes missing from remote file") % count
269 _(b"%d bytes missing from remote file") % count
271 )
270 )
272 count -= len(data)
271 count -= len(data)
273 output.write(data)
272 output.write(data)
274 return output.getvalue()
273 return output.getvalue()
275
274
276 self._parse()
275 self._parse()
277 if rev.endswith(b"(DEAD)"):
276 if rev.endswith(b"(DEAD)"):
278 return None, None
277 return None, None
279
278
280 args = (b"-N -P -kk -r %s --" % rev).split()
279 args = (b"-N -P -kk -r %s --" % rev).split()
281 args.append(self.cvsrepo + b'/' + name)
280 args.append(self.cvsrepo + b'/' + name)
282 for x in args:
281 for x in args:
283 self.writep.write(b"Argument %s\n" % x)
282 self.writep.write(b"Argument %s\n" % x)
284 self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
283 self.writep.write(b"Directory .\n%s\nco\n" % self.realroot)
285 self.writep.flush()
284 self.writep.flush()
286
285
287 data = b""
286 data = b""
288 mode = None
287 mode = None
289 while True:
288 while True:
290 line = self.readp.readline()
289 line = self.readp.readline()
291 if line.startswith(b"Created ") or line.startswith(b"Updated "):
290 if line.startswith(b"Created ") or line.startswith(b"Updated "):
292 self.readp.readline() # path
291 self.readp.readline() # path
293 self.readp.readline() # entries
292 self.readp.readline() # entries
294 mode = self.readp.readline()[:-1]
293 mode = self.readp.readline()[:-1]
295 count = int(self.readp.readline()[:-1])
294 count = int(self.readp.readline()[:-1])
296 data = chunkedread(self.readp, count)
295 data = chunkedread(self.readp, count)
297 elif line.startswith(b" "):
296 elif line.startswith(b" "):
298 data += line[1:]
297 data += line[1:]
299 elif line.startswith(b"M "):
298 elif line.startswith(b"M "):
300 pass
299 pass
301 elif line.startswith(b"Mbinary "):
300 elif line.startswith(b"Mbinary "):
302 count = int(self.readp.readline()[:-1])
301 count = int(self.readp.readline()[:-1])
303 data = chunkedread(self.readp, count)
302 data = chunkedread(self.readp, count)
304 else:
303 else:
305 if line == b"ok\n":
304 if line == b"ok\n":
306 if mode is None:
305 if mode is None:
307 raise error.Abort(_(b'malformed response from CVS'))
306 raise error.Abort(_(b'malformed response from CVS'))
308 return (data, b"x" in mode and b"x" or b"")
307 return (data, b"x" in mode and b"x" or b"")
309 elif line.startswith(b"E "):
308 elif line.startswith(b"E "):
310 self.ui.warn(_(b"cvs server: %s\n") % line[2:])
309 self.ui.warn(_(b"cvs server: %s\n") % line[2:])
311 elif line.startswith(b"Remove"):
310 elif line.startswith(b"Remove"):
312 self.readp.readline()
311 self.readp.readline()
313 else:
312 else:
314 raise error.Abort(_(b"unknown CVS response: %s") % line)
313 raise error.Abort(_(b"unknown CVS response: %s") % line)
315
314
316 def getchanges(self, rev, full):
315 def getchanges(self, rev, full):
317 if full:
316 if full:
318 raise error.Abort(_(b"convert from cvs does not support --full"))
317 raise error.Abort(_(b"convert from cvs does not support --full"))
319 self._parse()
318 self._parse()
320 return sorted(self.files[rev].items()), {}, set()
319 return sorted(self.files[rev].items()), {}, set()
321
320
322 def getcommit(self, rev):
321 def getcommit(self, rev):
323 self._parse()
322 self._parse()
324 return self.changeset[rev]
323 return self.changeset[rev]
325
324
326 def gettags(self):
325 def gettags(self):
327 self._parse()
326 self._parse()
328 return self.tags
327 return self.tags
329
328
330 def getchangedfiles(self, rev, i):
329 def getchangedfiles(self, rev, i):
331 self._parse()
330 self._parse()
332 return sorted(self.files[rev])
331 return sorted(self.files[rev])
@@ -1,159 +1,157 b''
1 # -*- coding: utf-8 -*-
1 # -*- coding: utf-8 -*-
2
2
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 # This is a stripped-down version of the original bzr-svn transport.py,
4 # This is a stripped-down version of the original bzr-svn transport.py,
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6
6
7 # This program is free software; you can redistribute it and/or modify
7 # This program is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
10 # (at your option) any later version.
11
11
12 # This program is distributed in the hope that it will be useful,
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
15 # GNU General Public License for more details.
16
16
17 # You should have received a copy of the GNU General Public License
17 # You should have received a copy of the GNU General Public License
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
18 # along with this program; if not, see <http://www.gnu.org/licenses/>.
19
19
20 # pytype: disable=import-error
20 # pytype: disable=import-error
21 import svn.client
21 import svn.client
22 import svn.core
22 import svn.core
23 import svn.ra
23 import svn.ra
24
24
25 # pytype: enable=import-error
25 # pytype: enable=import-error
26
26
27 Pool = svn.core.Pool
27 Pool = svn.core.Pool
28 SubversionException = svn.core.SubversionException
28 SubversionException = svn.core.SubversionException
29
29
30 from mercurial.pycompat import getattr
31
32 # Some older versions of the Python bindings need to be
30 # Some older versions of the Python bindings need to be
33 # explicitly initialized. But what we want to do probably
31 # explicitly initialized. But what we want to do probably
34 # won't work worth a darn against those libraries anyway!
32 # won't work worth a darn against those libraries anyway!
35 svn.ra.initialize()
33 svn.ra.initialize()
36
34
37 svn_config = None
35 svn_config = None
38
36
39
37
40 def _create_auth_baton(pool):
38 def _create_auth_baton(pool):
41 """Create a Subversion authentication baton."""
39 """Create a Subversion authentication baton."""
42 import svn.client # pytype: disable=import-error
40 import svn.client # pytype: disable=import-error
43
41
44 # Give the client context baton a suite of authentication
42 # Give the client context baton a suite of authentication
45 # providers.h
43 # providers.h
46 providers = [
44 providers = [
47 svn.client.get_simple_provider(pool),
45 svn.client.get_simple_provider(pool),
48 svn.client.get_username_provider(pool),
46 svn.client.get_username_provider(pool),
49 svn.client.get_ssl_client_cert_file_provider(pool),
47 svn.client.get_ssl_client_cert_file_provider(pool),
50 svn.client.get_ssl_client_cert_pw_file_provider(pool),
48 svn.client.get_ssl_client_cert_pw_file_provider(pool),
51 svn.client.get_ssl_server_trust_file_provider(pool),
49 svn.client.get_ssl_server_trust_file_provider(pool),
52 ]
50 ]
53 # Platform-dependent authentication methods
51 # Platform-dependent authentication methods
54 getprovider = getattr(
52 getprovider = getattr(
55 svn.core, 'svn_auth_get_platform_specific_provider', None
53 svn.core, 'svn_auth_get_platform_specific_provider', None
56 )
54 )
57 if getprovider:
55 if getprovider:
58 # Available in svn >= 1.6
56 # Available in svn >= 1.6
59 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
57 for name in (b'gnome_keyring', b'keychain', b'kwallet', b'windows'):
60 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
58 for type in (b'simple', b'ssl_client_cert_pw', b'ssl_server_trust'):
61 p = getprovider(name, type, pool)
59 p = getprovider(name, type, pool)
62 if p:
60 if p:
63 providers.append(p)
61 providers.append(p)
64 else:
62 else:
65 if hasattr(svn.client, 'get_windows_simple_provider'):
63 if hasattr(svn.client, 'get_windows_simple_provider'):
66 providers.append(svn.client.get_windows_simple_provider(pool))
64 providers.append(svn.client.get_windows_simple_provider(pool))
67
65
68 return svn.core.svn_auth_open(providers, pool)
66 return svn.core.svn_auth_open(providers, pool)
69
67
70
68
71 class NotBranchError(SubversionException):
69 class NotBranchError(SubversionException):
72 pass
70 pass
73
71
74
72
75 class SvnRaTransport:
73 class SvnRaTransport:
76 """
74 """
77 Open an ra connection to a Subversion repository.
75 Open an ra connection to a Subversion repository.
78 """
76 """
79
77
80 def __init__(self, url=b"", ra=None):
78 def __init__(self, url=b"", ra=None):
81 self.pool = Pool()
79 self.pool = Pool()
82 self.svn_url = url
80 self.svn_url = url
83 self.username = b''
81 self.username = b''
84 self.password = b''
82 self.password = b''
85
83
86 # Only Subversion 1.4 has reparent()
84 # Only Subversion 1.4 has reparent()
87 if ra is None or not hasattr(svn.ra, 'reparent'):
85 if ra is None or not hasattr(svn.ra, 'reparent'):
88 self.client = svn.client.create_context(self.pool)
86 self.client = svn.client.create_context(self.pool)
89 ab = _create_auth_baton(self.pool)
87 ab = _create_auth_baton(self.pool)
90 self.client.auth_baton = ab
88 self.client.auth_baton = ab
91 global svn_config
89 global svn_config
92 if svn_config is None:
90 if svn_config is None:
93 svn_config = svn.core.svn_config_get_config(None)
91 svn_config = svn.core.svn_config_get_config(None)
94 self.client.config = svn_config
92 self.client.config = svn_config
95 try:
93 try:
96 self.ra = svn.client.open_ra_session(
94 self.ra = svn.client.open_ra_session(
97 self.svn_url, self.client, self.pool
95 self.svn_url, self.client, self.pool
98 )
96 )
99 except SubversionException as xxx_todo_changeme:
97 except SubversionException as xxx_todo_changeme:
100 (inst, num) = xxx_todo_changeme.args
98 (inst, num) = xxx_todo_changeme.args
101 if num in (
99 if num in (
102 svn.core.SVN_ERR_RA_ILLEGAL_URL,
100 svn.core.SVN_ERR_RA_ILLEGAL_URL,
103 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
101 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
104 svn.core.SVN_ERR_BAD_URL,
102 svn.core.SVN_ERR_BAD_URL,
105 ):
103 ):
106 raise NotBranchError(url)
104 raise NotBranchError(url)
107 raise
105 raise
108 else:
106 else:
109 self.ra = ra
107 self.ra = ra
110 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
108 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
111
109
112 class Reporter:
110 class Reporter:
113 def __init__(self, reporter_data):
111 def __init__(self, reporter_data):
114 self._reporter, self._baton = reporter_data
112 self._reporter, self._baton = reporter_data
115
113
116 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
114 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
117 svn.ra.reporter2_invoke_set_path(
115 svn.ra.reporter2_invoke_set_path(
118 self._reporter,
116 self._reporter,
119 self._baton,
117 self._baton,
120 path,
118 path,
121 revnum,
119 revnum,
122 start_empty,
120 start_empty,
123 lock_token,
121 lock_token,
124 pool,
122 pool,
125 )
123 )
126
124
127 def delete_path(self, path, pool=None):
125 def delete_path(self, path, pool=None):
128 svn.ra.reporter2_invoke_delete_path(
126 svn.ra.reporter2_invoke_delete_path(
129 self._reporter, self._baton, path, pool
127 self._reporter, self._baton, path, pool
130 )
128 )
131
129
132 def link_path(
130 def link_path(
133 self, path, url, revision, start_empty, lock_token, pool=None
131 self, path, url, revision, start_empty, lock_token, pool=None
134 ):
132 ):
135 svn.ra.reporter2_invoke_link_path(
133 svn.ra.reporter2_invoke_link_path(
136 self._reporter,
134 self._reporter,
137 self._baton,
135 self._baton,
138 path,
136 path,
139 url,
137 url,
140 revision,
138 revision,
141 start_empty,
139 start_empty,
142 lock_token,
140 lock_token,
143 pool,
141 pool,
144 )
142 )
145
143
146 def finish_report(self, pool=None):
144 def finish_report(self, pool=None):
147 svn.ra.reporter2_invoke_finish_report(
145 svn.ra.reporter2_invoke_finish_report(
148 self._reporter, self._baton, pool
146 self._reporter, self._baton, pool
149 )
147 )
150
148
151 def abort_report(self, pool=None):
149 def abort_report(self, pool=None):
152 svn.ra.reporter2_invoke_abort_report(
150 svn.ra.reporter2_invoke_abort_report(
153 self._reporter, self._baton, pool
151 self._reporter, self._baton, pool
154 )
152 )
155
153
156 def do_update(self, revnum, path, *args, **kwargs):
154 def do_update(self, revnum, path, *args, **kwargs):
157 return self.Reporter(
155 return self.Reporter(
158 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
156 svn.ra.do_update(self.ra, revnum, path, *args, **kwargs)
159 )
157 )
@@ -1,161 +1,160 b''
1 # factotum.py - Plan 9 factotum integration for Mercurial
1 # factotum.py - Plan 9 factotum integration for Mercurial
2 #
2 #
3 # Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
3 # Copyright (C) 2012 Steven Stallion <sstallion@gmail.com>
4 #
4 #
5 # This program is free software; you can redistribute it and/or modify it
5 # This program is free software; you can redistribute it and/or modify it
6 # under the terms of the GNU General Public License as published by the
6 # under the terms of the GNU General Public License as published by the
7 # Free Software Foundation; either version 2 of the License, or (at your
7 # Free Software Foundation; either version 2 of the License, or (at your
8 # option) any later version.
8 # option) any later version.
9 #
9 #
10 # This program is distributed in the hope that it will be useful, but
10 # This program is distributed in the hope that it will be useful, but
11 # WITHOUT ANY WARRANTY; without even the implied warranty of
11 # WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
13 # Public License for more details.
13 # Public License for more details.
14 #
14 #
15 # You should have received a copy of the GNU General Public License along
15 # You should have received a copy of the GNU General Public License along
16 # with this program; if not, write to the Free Software Foundation, Inc.,
16 # with this program; if not, write to the Free Software Foundation, Inc.,
17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
18
19 '''http authentication with factotum
19 '''http authentication with factotum
20
20
21 This extension allows the factotum(4) facility on Plan 9 from Bell Labs
21 This extension allows the factotum(4) facility on Plan 9 from Bell Labs
22 platforms to provide authentication information for HTTP access. Configuration
22 platforms to provide authentication information for HTTP access. Configuration
23 entries specified in the auth section as well as authentication information
23 entries specified in the auth section as well as authentication information
24 provided in the repository URL are fully supported. If no prefix is specified,
24 provided in the repository URL are fully supported. If no prefix is specified,
25 a value of "*" will be assumed.
25 a value of "*" will be assumed.
26
26
27 By default, keys are specified as::
27 By default, keys are specified as::
28
28
29 proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
29 proto=pass service=hg prefix=<prefix> user=<username> !password=<password>
30
30
31 If the factotum extension is unable to read the required key, one will be
31 If the factotum extension is unable to read the required key, one will be
32 requested interactively.
32 requested interactively.
33
33
34 A configuration section is available to customize runtime behavior. By
34 A configuration section is available to customize runtime behavior. By
35 default, these entries are::
35 default, these entries are::
36
36
37 [factotum]
37 [factotum]
38 executable = /bin/auth/factotum
38 executable = /bin/auth/factotum
39 mountpoint = /mnt/factotum
39 mountpoint = /mnt/factotum
40 service = hg
40 service = hg
41
41
42 The executable entry defines the full path to the factotum binary. The
42 The executable entry defines the full path to the factotum binary. The
43 mountpoint entry defines the path to the factotum file service. Lastly, the
43 mountpoint entry defines the path to the factotum file service. Lastly, the
44 service entry controls the service name used when reading keys.
44 service entry controls the service name used when reading keys.
45
45
46 '''
46 '''
47
47
48
48
49 import os
49 import os
50 from mercurial.i18n import _
50 from mercurial.i18n import _
51 from mercurial.pycompat import setattr
52 from mercurial.utils import procutil
51 from mercurial.utils import procutil
53 from mercurial import (
52 from mercurial import (
54 error,
53 error,
55 httpconnection,
54 httpconnection,
56 registrar,
55 registrar,
57 url,
56 url,
58 util,
57 util,
59 )
58 )
60
59
61 urlreq = util.urlreq
60 urlreq = util.urlreq
62 passwordmgr = url.passwordmgr
61 passwordmgr = url.passwordmgr
63
62
64 ERRMAX = 128
63 ERRMAX = 128
65
64
66 _executable = _mountpoint = _service = None
65 _executable = _mountpoint = _service = None
67
66
68 configtable = {}
67 configtable = {}
69 configitem = registrar.configitem(configtable)
68 configitem = registrar.configitem(configtable)
70
69
71 configitem(
70 configitem(
72 b'factotum',
71 b'factotum',
73 b'executable',
72 b'executable',
74 default=b'/bin/auth/factotum',
73 default=b'/bin/auth/factotum',
75 )
74 )
76 configitem(
75 configitem(
77 b'factotum',
76 b'factotum',
78 b'mountpoint',
77 b'mountpoint',
79 default=b'/mnt/factotum',
78 default=b'/mnt/factotum',
80 )
79 )
81 configitem(
80 configitem(
82 b'factotum',
81 b'factotum',
83 b'service',
82 b'service',
84 default=b'hg',
83 default=b'hg',
85 )
84 )
86
85
87
86
88 def auth_getkey(self, params):
87 def auth_getkey(self, params):
89 if not self.ui.interactive():
88 if not self.ui.interactive():
90 raise error.Abort(_(b'factotum not interactive'))
89 raise error.Abort(_(b'factotum not interactive'))
91 if b'user=' not in params:
90 if b'user=' not in params:
92 params = b'%s user?' % params
91 params = b'%s user?' % params
93 params = b'%s !password?' % params
92 params = b'%s !password?' % params
94 os.system(procutil.tonativestr(b"%s -g '%s'" % (_executable, params)))
93 os.system(procutil.tonativestr(b"%s -g '%s'" % (_executable, params)))
95
94
96
95
97 def auth_getuserpasswd(self, getkey, params):
96 def auth_getuserpasswd(self, getkey, params):
98 params = b'proto=pass %s' % params
97 params = b'proto=pass %s' % params
99 while True:
98 while True:
100 fd = os.open(b'%s/rpc' % _mountpoint, os.O_RDWR)
99 fd = os.open(b'%s/rpc' % _mountpoint, os.O_RDWR)
101 try:
100 try:
102 os.write(fd, b'start %s' % params)
101 os.write(fd, b'start %s' % params)
103 l = os.read(fd, ERRMAX).split()
102 l = os.read(fd, ERRMAX).split()
104 if l[0] == b'ok':
103 if l[0] == b'ok':
105 os.write(fd, b'read')
104 os.write(fd, b'read')
106 status, user, passwd = os.read(fd, ERRMAX).split(None, 2)
105 status, user, passwd = os.read(fd, ERRMAX).split(None, 2)
107 if status == b'ok':
106 if status == b'ok':
108 if passwd.startswith(b"'"):
107 if passwd.startswith(b"'"):
109 if passwd.endswith(b"'"):
108 if passwd.endswith(b"'"):
110 passwd = passwd[1:-1].replace(b"''", b"'")
109 passwd = passwd[1:-1].replace(b"''", b"'")
111 else:
110 else:
112 raise error.Abort(_(b'malformed password string'))
111 raise error.Abort(_(b'malformed password string'))
113 return (user, passwd)
112 return (user, passwd)
114 except (OSError, IOError):
113 except (OSError, IOError):
115 raise error.Abort(_(b'factotum not responding'))
114 raise error.Abort(_(b'factotum not responding'))
116 finally:
115 finally:
117 os.close(fd)
116 os.close(fd)
118 getkey(self, params)
117 getkey(self, params)
119
118
120
119
121 def monkeypatch_method(cls):
120 def monkeypatch_method(cls):
122 def decorator(func):
121 def decorator(func):
123 setattr(cls, func.__name__, func)
122 setattr(cls, func.__name__, func)
124 return func
123 return func
125
124
126 return decorator
125 return decorator
127
126
128
127
129 @monkeypatch_method(passwordmgr)
128 @monkeypatch_method(passwordmgr)
130 def find_user_password(self, realm, authuri):
129 def find_user_password(self, realm, authuri):
131 user, passwd = self.passwddb.find_user_password(realm, authuri)
130 user, passwd = self.passwddb.find_user_password(realm, authuri)
132 if user and passwd:
131 if user and passwd:
133 self._writedebug(user, passwd)
132 self._writedebug(user, passwd)
134 return (user, passwd)
133 return (user, passwd)
135
134
136 prefix = b''
135 prefix = b''
137 res = httpconnection.readauthforuri(self.ui, authuri, user)
136 res = httpconnection.readauthforuri(self.ui, authuri, user)
138 if res:
137 if res:
139 _, auth = res
138 _, auth = res
140 prefix = auth.get(b'prefix')
139 prefix = auth.get(b'prefix')
141 user, passwd = auth.get(b'username'), auth.get(b'password')
140 user, passwd = auth.get(b'username'), auth.get(b'password')
142 if not user or not passwd:
141 if not user or not passwd:
143 if not prefix:
142 if not prefix:
144 prefix = realm.split(b' ')[0].lower()
143 prefix = realm.split(b' ')[0].lower()
145 params = b'service=%s prefix=%s' % (_service, prefix)
144 params = b'service=%s prefix=%s' % (_service, prefix)
146 if user:
145 if user:
147 params = b'%s user=%s' % (params, user)
146 params = b'%s user=%s' % (params, user)
148 user, passwd = auth_getuserpasswd(self, auth_getkey, params)
147 user, passwd = auth_getuserpasswd(self, auth_getkey, params)
149
148
150 self.add_password(realm, authuri, user, passwd)
149 self.add_password(realm, authuri, user, passwd)
151 self._writedebug(user, passwd)
150 self._writedebug(user, passwd)
152 return (user, passwd)
151 return (user, passwd)
153
152
154
153
155 def uisetup(ui):
154 def uisetup(ui):
156 global _executable
155 global _executable
157 _executable = ui.config(b'factotum', b'executable')
156 _executable = ui.config(b'factotum', b'executable')
158 global _mountpoint
157 global _mountpoint
159 _mountpoint = ui.config(b'factotum', b'mountpoint')
158 _mountpoint = ui.config(b'factotum', b'mountpoint')
160 global _service
159 global _service
161 _service = ui.config(b'factotum', b'service')
160 _service = ui.config(b'factotum', b'service')
@@ -1,860 +1,858 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # context: context needed to annotate a file
3 # context: context needed to annotate a file
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import collections
9 import collections
10 import contextlib
10 import contextlib
11 import os
11 import os
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.pycompat import (
14 from mercurial.pycompat import (
15 getattr,
16 open,
15 open,
17 setattr,
18 )
16 )
19 from mercurial.node import (
17 from mercurial.node import (
20 bin,
18 bin,
21 hex,
19 hex,
22 short,
20 short,
23 )
21 )
24 from mercurial import (
22 from mercurial import (
25 error,
23 error,
26 linelog as linelogmod,
24 linelog as linelogmod,
27 lock as lockmod,
25 lock as lockmod,
28 mdiff,
26 mdiff,
29 pycompat,
27 pycompat,
30 scmutil,
28 scmutil,
31 util,
29 util,
32 )
30 )
33 from mercurial.utils import (
31 from mercurial.utils import (
34 hashutil,
32 hashutil,
35 stringutil,
33 stringutil,
36 )
34 )
37
35
38 from . import (
36 from . import (
39 error as faerror,
37 error as faerror,
40 revmap as revmapmod,
38 revmap as revmapmod,
41 )
39 )
42
40
43 # given path, get filelog, cached
41 # given path, get filelog, cached
44 @util.lrucachefunc
42 @util.lrucachefunc
45 def _getflog(repo, path):
43 def _getflog(repo, path):
46 return repo.file(path)
44 return repo.file(path)
47
45
48
46
49 # extracted from mercurial.context.basefilectx.annotate
47 # extracted from mercurial.context.basefilectx.annotate
50 def _parents(f, follow=True):
48 def _parents(f, follow=True):
51 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
49 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
52 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
50 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
53 # from the topmost introrev (= srcrev) down to p.linkrev() if it
51 # from the topmost introrev (= srcrev) down to p.linkrev() if it
54 # isn't an ancestor of the srcrev.
52 # isn't an ancestor of the srcrev.
55 f._changeid
53 f._changeid
56 pl = f.parents()
54 pl = f.parents()
57
55
58 # Don't return renamed parents if we aren't following.
56 # Don't return renamed parents if we aren't following.
59 if not follow:
57 if not follow:
60 pl = [p for p in pl if p.path() == f.path()]
58 pl = [p for p in pl if p.path() == f.path()]
61
59
62 # renamed filectx won't have a filelog yet, so set it
60 # renamed filectx won't have a filelog yet, so set it
63 # from the cache to save time
61 # from the cache to save time
64 for p in pl:
62 for p in pl:
65 if not '_filelog' in p.__dict__:
63 if not '_filelog' in p.__dict__:
66 p._filelog = _getflog(f._repo, p.path())
64 p._filelog = _getflog(f._repo, p.path())
67
65
68 return pl
66 return pl
69
67
70
68
71 # extracted from mercurial.context.basefilectx.annotate. slightly modified
69 # extracted from mercurial.context.basefilectx.annotate. slightly modified
72 # so it takes a fctx instead of a pair of text and fctx.
70 # so it takes a fctx instead of a pair of text and fctx.
73 def _decorate(fctx):
71 def _decorate(fctx):
74 text = fctx.data()
72 text = fctx.data()
75 linecount = text.count(b'\n')
73 linecount = text.count(b'\n')
76 if text and not text.endswith(b'\n'):
74 if text and not text.endswith(b'\n'):
77 linecount += 1
75 linecount += 1
78 return ([(fctx, i) for i in range(linecount)], text)
76 return ([(fctx, i) for i in range(linecount)], text)
79
77
80
78
81 # extracted from mercurial.context.basefilectx.annotate. slightly modified
79 # extracted from mercurial.context.basefilectx.annotate. slightly modified
82 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
80 # so it takes an extra "blocks" parameter calculated elsewhere, instead of
83 # calculating diff here.
81 # calculating diff here.
84 def _pair(parent, child, blocks):
82 def _pair(parent, child, blocks):
85 for (a1, a2, b1, b2), t in blocks:
83 for (a1, a2, b1, b2), t in blocks:
86 # Changed blocks ('!') or blocks made only of blank lines ('~')
84 # Changed blocks ('!') or blocks made only of blank lines ('~')
87 # belong to the child.
85 # belong to the child.
88 if t == b'=':
86 if t == b'=':
89 child[0][b1:b2] = parent[0][a1:a2]
87 child[0][b1:b2] = parent[0][a1:a2]
90 return child
88 return child
91
89
92
90
93 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
91 # like scmutil.revsingle, but with lru cache, so their states (like manifests)
94 # could be reused
92 # could be reused
95 _revsingle = util.lrucachefunc(scmutil.revsingle)
93 _revsingle = util.lrucachefunc(scmutil.revsingle)
96
94
97
95
98 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
96 def resolvefctx(repo, rev, path, resolverev=False, adjustctx=None):
99 """(repo, str, str) -> fctx
97 """(repo, str, str) -> fctx
100
98
101 get the filectx object from repo, rev, path, in an efficient way.
99 get the filectx object from repo, rev, path, in an efficient way.
102
100
103 if resolverev is True, "rev" is a revision specified by the revset
101 if resolverev is True, "rev" is a revision specified by the revset
104 language, otherwise "rev" is a nodeid, or a revision number that can
102 language, otherwise "rev" is a nodeid, or a revision number that can
105 be consumed by repo.__getitem__.
103 be consumed by repo.__getitem__.
106
104
107 if adjustctx is not None, the returned fctx will point to a changeset
105 if adjustctx is not None, the returned fctx will point to a changeset
108 that introduces the change (last modified the file). if adjustctx
106 that introduces the change (last modified the file). if adjustctx
109 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
107 is 'linkrev', trust the linkrev and do not adjust it. this is noticeably
110 faster for big repos but is incorrect for some cases.
108 faster for big repos but is incorrect for some cases.
111 """
109 """
112 if resolverev and not isinstance(rev, int) and rev is not None:
110 if resolverev and not isinstance(rev, int) and rev is not None:
113 ctx = _revsingle(repo, rev)
111 ctx = _revsingle(repo, rev)
114 else:
112 else:
115 ctx = repo[rev]
113 ctx = repo[rev]
116
114
117 # If we don't need to adjust the linkrev, create the filectx using the
115 # If we don't need to adjust the linkrev, create the filectx using the
118 # changectx instead of using ctx[path]. This means it already has the
116 # changectx instead of using ctx[path]. This means it already has the
119 # changectx information, so blame -u will be able to look directly at the
117 # changectx information, so blame -u will be able to look directly at the
120 # commitctx object instead of having to resolve it by going through the
118 # commitctx object instead of having to resolve it by going through the
121 # manifest. In a lazy-manifest world this can prevent us from downloading a
119 # manifest. In a lazy-manifest world this can prevent us from downloading a
122 # lot of data.
120 # lot of data.
123 if adjustctx is None:
121 if adjustctx is None:
124 # ctx.rev() is None means it's the working copy, which is a special
122 # ctx.rev() is None means it's the working copy, which is a special
125 # case.
123 # case.
126 if ctx.rev() is None:
124 if ctx.rev() is None:
127 fctx = ctx[path]
125 fctx = ctx[path]
128 else:
126 else:
129 fctx = repo.filectx(path, changeid=ctx.rev())
127 fctx = repo.filectx(path, changeid=ctx.rev())
130 else:
128 else:
131 fctx = ctx[path]
129 fctx = ctx[path]
132 if adjustctx == b'linkrev':
130 if adjustctx == b'linkrev':
133 introrev = fctx.linkrev()
131 introrev = fctx.linkrev()
134 else:
132 else:
135 introrev = fctx.introrev()
133 introrev = fctx.introrev()
136 if introrev != ctx.rev():
134 if introrev != ctx.rev():
137 fctx._changeid = introrev
135 fctx._changeid = introrev
138 fctx._changectx = repo[introrev]
136 fctx._changectx = repo[introrev]
139 return fctx
137 return fctx
140
138
141
139
142 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
140 # like mercurial.store.encodedir, but use linelog suffixes: .m, .l, .lock
143 def encodedir(path):
141 def encodedir(path):
144 return (
142 return (
145 path.replace(b'.hg/', b'.hg.hg/')
143 path.replace(b'.hg/', b'.hg.hg/')
146 .replace(b'.l/', b'.l.hg/')
144 .replace(b'.l/', b'.l.hg/')
147 .replace(b'.m/', b'.m.hg/')
145 .replace(b'.m/', b'.m.hg/')
148 .replace(b'.lock/', b'.lock.hg/')
146 .replace(b'.lock/', b'.lock.hg/')
149 )
147 )
150
148
151
149
152 def hashdiffopts(diffopts):
150 def hashdiffopts(diffopts):
153 diffoptstr = stringutil.pprint(
151 diffoptstr = stringutil.pprint(
154 sorted(
152 sorted(
155 (k, getattr(diffopts, pycompat.sysstr(k)))
153 (k, getattr(diffopts, pycompat.sysstr(k)))
156 for k in mdiff.diffopts.defaults
154 for k in mdiff.diffopts.defaults
157 )
155 )
158 )
156 )
159 return hex(hashutil.sha1(diffoptstr).digest())[:6]
157 return hex(hashutil.sha1(diffoptstr).digest())[:6]
160
158
161
159
162 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
160 _defaultdiffopthash = hashdiffopts(mdiff.defaultopts)
163
161
164
162
165 class annotateopts:
163 class annotateopts:
166 """like mercurial.mdiff.diffopts, but is for annotate
164 """like mercurial.mdiff.diffopts, but is for annotate
167
165
168 followrename: follow renames, like "hg annotate -f"
166 followrename: follow renames, like "hg annotate -f"
169 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
167 followmerge: follow p2 of a merge changeset, otherwise p2 is ignored
170 """
168 """
171
169
172 defaults = {
170 defaults = {
173 'diffopts': None,
171 'diffopts': None,
174 'followrename': True,
172 'followrename': True,
175 'followmerge': True,
173 'followmerge': True,
176 }
174 }
177
175
178 def __init__(self, **opts):
176 def __init__(self, **opts):
179 for k, v in self.defaults.items():
177 for k, v in self.defaults.items():
180 setattr(self, k, opts.get(k, v))
178 setattr(self, k, opts.get(k, v))
181
179
182 @util.propertycache
180 @util.propertycache
183 def shortstr(self):
181 def shortstr(self):
184 """represent opts in a short string, suitable for a directory name"""
182 """represent opts in a short string, suitable for a directory name"""
185 result = b''
183 result = b''
186 if not self.followrename:
184 if not self.followrename:
187 result += b'r0'
185 result += b'r0'
188 if not self.followmerge:
186 if not self.followmerge:
189 result += b'm0'
187 result += b'm0'
190 if self.diffopts is not None:
188 if self.diffopts is not None:
191 assert isinstance(self.diffopts, mdiff.diffopts)
189 assert isinstance(self.diffopts, mdiff.diffopts)
192 diffopthash = hashdiffopts(self.diffopts)
190 diffopthash = hashdiffopts(self.diffopts)
193 if diffopthash != _defaultdiffopthash:
191 if diffopthash != _defaultdiffopthash:
194 result += b'i' + diffopthash
192 result += b'i' + diffopthash
195 return result or b'default'
193 return result or b'default'
196
194
197
195
198 defaultopts = annotateopts()
196 defaultopts = annotateopts()
199
197
200
198
201 class _annotatecontext:
199 class _annotatecontext:
202 """do not use this class directly as it does not use lock to protect
200 """do not use this class directly as it does not use lock to protect
203 writes. use "with annotatecontext(...)" instead.
201 writes. use "with annotatecontext(...)" instead.
204 """
202 """
205
203
206 def __init__(self, repo, path, linelogpath, revmappath, opts):
204 def __init__(self, repo, path, linelogpath, revmappath, opts):
207 self.repo = repo
205 self.repo = repo
208 self.ui = repo.ui
206 self.ui = repo.ui
209 self.path = path
207 self.path = path
210 self.opts = opts
208 self.opts = opts
211 self.linelogpath = linelogpath
209 self.linelogpath = linelogpath
212 self.revmappath = revmappath
210 self.revmappath = revmappath
213 self._linelog = None
211 self._linelog = None
214 self._revmap = None
212 self._revmap = None
215 self._node2path = {} # {str: str}
213 self._node2path = {} # {str: str}
216
214
217 @property
215 @property
218 def linelog(self):
216 def linelog(self):
219 if self._linelog is None:
217 if self._linelog is None:
220 if os.path.exists(self.linelogpath):
218 if os.path.exists(self.linelogpath):
221 with open(self.linelogpath, b'rb') as f:
219 with open(self.linelogpath, b'rb') as f:
222 try:
220 try:
223 self._linelog = linelogmod.linelog.fromdata(f.read())
221 self._linelog = linelogmod.linelog.fromdata(f.read())
224 except linelogmod.LineLogError:
222 except linelogmod.LineLogError:
225 self._linelog = linelogmod.linelog()
223 self._linelog = linelogmod.linelog()
226 else:
224 else:
227 self._linelog = linelogmod.linelog()
225 self._linelog = linelogmod.linelog()
228 return self._linelog
226 return self._linelog
229
227
230 @property
228 @property
231 def revmap(self):
229 def revmap(self):
232 if self._revmap is None:
230 if self._revmap is None:
233 self._revmap = revmapmod.revmap(self.revmappath)
231 self._revmap = revmapmod.revmap(self.revmappath)
234 return self._revmap
232 return self._revmap
235
233
236 def close(self):
234 def close(self):
237 if self._revmap is not None:
235 if self._revmap is not None:
238 self._revmap.flush()
236 self._revmap.flush()
239 self._revmap = None
237 self._revmap = None
240 if self._linelog is not None:
238 if self._linelog is not None:
241 with open(self.linelogpath, b'wb') as f:
239 with open(self.linelogpath, b'wb') as f:
242 f.write(self._linelog.encode())
240 f.write(self._linelog.encode())
243 self._linelog = None
241 self._linelog = None
244
242
245 __del__ = close
243 __del__ = close
246
244
247 def rebuild(self):
245 def rebuild(self):
248 """delete linelog and revmap, useful for rebuilding"""
246 """delete linelog and revmap, useful for rebuilding"""
249 self.close()
247 self.close()
250 self._node2path.clear()
248 self._node2path.clear()
251 _unlinkpaths([self.revmappath, self.linelogpath])
249 _unlinkpaths([self.revmappath, self.linelogpath])
252
250
253 @property
251 @property
254 def lastnode(self):
252 def lastnode(self):
255 """return last node in revmap, or None if revmap is empty"""
253 """return last node in revmap, or None if revmap is empty"""
256 if self._revmap is None:
254 if self._revmap is None:
257 # fast path, read revmap without loading its full content
255 # fast path, read revmap without loading its full content
258 return revmapmod.getlastnode(self.revmappath)
256 return revmapmod.getlastnode(self.revmappath)
259 else:
257 else:
260 return self._revmap.rev2hsh(self._revmap.maxrev)
258 return self._revmap.rev2hsh(self._revmap.maxrev)
261
259
262 def isuptodate(self, master, strict=True):
260 def isuptodate(self, master, strict=True):
263 """return True if the revmap / linelog is up-to-date, or the file
261 """return True if the revmap / linelog is up-to-date, or the file
264 does not exist in the master revision. False otherwise.
262 does not exist in the master revision. False otherwise.
265
263
266 it tries to be fast and could return false negatives, because of the
264 it tries to be fast and could return false negatives, because of the
267 use of linkrev instead of introrev.
265 use of linkrev instead of introrev.
268
266
269 useful for both server and client to decide whether to update
267 useful for both server and client to decide whether to update
270 fastannotate cache or not.
268 fastannotate cache or not.
271
269
272 if strict is True, even if fctx exists in the revmap, but is not the
270 if strict is True, even if fctx exists in the revmap, but is not the
273 last node, isuptodate will return False. it's good for performance - no
271 last node, isuptodate will return False. it's good for performance - no
274 expensive check was done.
272 expensive check was done.
275
273
276 if strict is False, if fctx exists in the revmap, this function may
274 if strict is False, if fctx exists in the revmap, this function may
277 return True. this is useful for the client to skip downloading the
275 return True. this is useful for the client to skip downloading the
278 cache if the client's master is behind the server's.
276 cache if the client's master is behind the server's.
279 """
277 """
280 lastnode = self.lastnode
278 lastnode = self.lastnode
281 try:
279 try:
282 f = self._resolvefctx(master, resolverev=True)
280 f = self._resolvefctx(master, resolverev=True)
283 # choose linkrev instead of introrev as the check is meant to be
281 # choose linkrev instead of introrev as the check is meant to be
284 # *fast*.
282 # *fast*.
285 linknode = self.repo.changelog.node(f.linkrev())
283 linknode = self.repo.changelog.node(f.linkrev())
286 if not strict and lastnode and linknode != lastnode:
284 if not strict and lastnode and linknode != lastnode:
287 # check if f.node() is in the revmap. note: this loads the
285 # check if f.node() is in the revmap. note: this loads the
288 # revmap and can be slow.
286 # revmap and can be slow.
289 return self.revmap.hsh2rev(linknode) is not None
287 return self.revmap.hsh2rev(linknode) is not None
290 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
288 # avoid resolving old manifest, or slow adjustlinkrev to be fast,
291 # false negatives are acceptable in this case.
289 # false negatives are acceptable in this case.
292 return linknode == lastnode
290 return linknode == lastnode
293 except LookupError:
291 except LookupError:
294 # master does not have the file, or the revmap is ahead
292 # master does not have the file, or the revmap is ahead
295 return True
293 return True
296
294
297 def annotate(self, rev, master=None, showpath=False, showlines=False):
295 def annotate(self, rev, master=None, showpath=False, showlines=False):
298 """incrementally update the cache so it includes revisions in the main
296 """incrementally update the cache so it includes revisions in the main
299 branch till 'master'. and run annotate on 'rev', which may or may not be
297 branch till 'master'. and run annotate on 'rev', which may or may not be
300 included in the main branch.
298 included in the main branch.
301
299
302 if master is None, do not update linelog.
300 if master is None, do not update linelog.
303
301
304 the first value returned is the annotate result, it is [(node, linenum)]
302 the first value returned is the annotate result, it is [(node, linenum)]
305 by default. [(node, linenum, path)] if showpath is True.
303 by default. [(node, linenum, path)] if showpath is True.
306
304
307 if showlines is True, a second value will be returned, it is a list of
305 if showlines is True, a second value will be returned, it is a list of
308 corresponding line contents.
306 corresponding line contents.
309 """
307 """
310
308
311 # the fast path test requires commit hash, convert rev number to hash,
309 # the fast path test requires commit hash, convert rev number to hash,
312 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
310 # so it may hit the fast path. note: in the "fctx" mode, the "annotate"
313 # command could give us a revision number even if the user passes a
311 # command could give us a revision number even if the user passes a
314 # commit hash.
312 # commit hash.
315 if isinstance(rev, int):
313 if isinstance(rev, int):
316 rev = hex(self.repo.changelog.node(rev))
314 rev = hex(self.repo.changelog.node(rev))
317
315
318 # fast path: if rev is in the main branch already
316 # fast path: if rev is in the main branch already
319 directly, revfctx = self.canannotatedirectly(rev)
317 directly, revfctx = self.canannotatedirectly(rev)
320 if directly:
318 if directly:
321 if self.ui.debugflag:
319 if self.ui.debugflag:
322 self.ui.debug(
320 self.ui.debug(
323 b'fastannotate: %s: using fast path '
321 b'fastannotate: %s: using fast path '
324 b'(resolved fctx: %s)\n'
322 b'(resolved fctx: %s)\n'
325 % (
323 % (
326 self.path,
324 self.path,
327 stringutil.pprint(hasattr(revfctx, 'node')),
325 stringutil.pprint(hasattr(revfctx, 'node')),
328 )
326 )
329 )
327 )
330 return self.annotatedirectly(revfctx, showpath, showlines)
328 return self.annotatedirectly(revfctx, showpath, showlines)
331
329
332 # resolve master
330 # resolve master
333 masterfctx = None
331 masterfctx = None
334 if master:
332 if master:
335 try:
333 try:
336 masterfctx = self._resolvefctx(
334 masterfctx = self._resolvefctx(
337 master, resolverev=True, adjustctx=True
335 master, resolverev=True, adjustctx=True
338 )
336 )
339 except LookupError: # master does not have the file
337 except LookupError: # master does not have the file
340 pass
338 pass
341 else:
339 else:
342 if masterfctx in self.revmap: # no need to update linelog
340 if masterfctx in self.revmap: # no need to update linelog
343 masterfctx = None
341 masterfctx = None
344
342
345 # ... - @ <- rev (can be an arbitrary changeset,
343 # ... - @ <- rev (can be an arbitrary changeset,
346 # / not necessarily a descendant
344 # / not necessarily a descendant
347 # master -> o of master)
345 # master -> o of master)
348 # |
346 # |
349 # a merge -> o 'o': new changesets in the main branch
347 # a merge -> o 'o': new changesets in the main branch
350 # |\ '#': revisions in the main branch that
348 # |\ '#': revisions in the main branch that
351 # o * exist in linelog / revmap
349 # o * exist in linelog / revmap
352 # | . '*': changesets in side branches, or
350 # | . '*': changesets in side branches, or
353 # last master -> # . descendants of master
351 # last master -> # . descendants of master
354 # | .
352 # | .
355 # # * joint: '#', and is a parent of a '*'
353 # # * joint: '#', and is a parent of a '*'
356 # |/
354 # |/
357 # a joint -> # ^^^^ --- side branches
355 # a joint -> # ^^^^ --- side branches
358 # |
356 # |
359 # ^ --- main branch (in linelog)
357 # ^ --- main branch (in linelog)
360
358
361 # these DFSes are similar to the traditional annotate algorithm.
359 # these DFSes are similar to the traditional annotate algorithm.
362 # we cannot really reuse the code for perf reason.
360 # we cannot really reuse the code for perf reason.
363
361
364 # 1st DFS calculates merges, joint points, and needed.
362 # 1st DFS calculates merges, joint points, and needed.
365 # "needed" is a simple reference counting dict to free items in
363 # "needed" is a simple reference counting dict to free items in
366 # "hist", reducing its memory usage otherwise could be huge.
364 # "hist", reducing its memory usage otherwise could be huge.
367 initvisit = [revfctx]
365 initvisit = [revfctx]
368 if masterfctx:
366 if masterfctx:
369 if masterfctx.rev() is None:
367 if masterfctx.rev() is None:
370 raise error.Abort(
368 raise error.Abort(
371 _(b'cannot update linelog to wdir()'),
369 _(b'cannot update linelog to wdir()'),
372 hint=_(b'set fastannotate.mainbranch'),
370 hint=_(b'set fastannotate.mainbranch'),
373 )
371 )
374 initvisit.append(masterfctx)
372 initvisit.append(masterfctx)
375 visit = initvisit[:]
373 visit = initvisit[:]
376 pcache = {}
374 pcache = {}
377 needed = {revfctx: 1}
375 needed = {revfctx: 1}
378 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
376 hist = {} # {fctx: ([(llrev or fctx, linenum)], text)}
379 while visit:
377 while visit:
380 f = visit.pop()
378 f = visit.pop()
381 if f in pcache or f in hist:
379 if f in pcache or f in hist:
382 continue
380 continue
383 if f in self.revmap: # in the old main branch, it's a joint
381 if f in self.revmap: # in the old main branch, it's a joint
384 llrev = self.revmap.hsh2rev(f.node())
382 llrev = self.revmap.hsh2rev(f.node())
385 self.linelog.annotate(llrev)
383 self.linelog.annotate(llrev)
386 result = self.linelog.annotateresult
384 result = self.linelog.annotateresult
387 hist[f] = (result, f.data())
385 hist[f] = (result, f.data())
388 continue
386 continue
389 pl = self._parentfunc(f)
387 pl = self._parentfunc(f)
390 pcache[f] = pl
388 pcache[f] = pl
391 for p in pl:
389 for p in pl:
392 needed[p] = needed.get(p, 0) + 1
390 needed[p] = needed.get(p, 0) + 1
393 if p not in pcache:
391 if p not in pcache:
394 visit.append(p)
392 visit.append(p)
395
393
396 # 2nd (simple) DFS calculates new changesets in the main branch
394 # 2nd (simple) DFS calculates new changesets in the main branch
397 # ('o' nodes in # the above graph), so we know when to update linelog.
395 # ('o' nodes in # the above graph), so we know when to update linelog.
398 newmainbranch = set()
396 newmainbranch = set()
399 f = masterfctx
397 f = masterfctx
400 while f and f not in self.revmap:
398 while f and f not in self.revmap:
401 newmainbranch.add(f)
399 newmainbranch.add(f)
402 pl = pcache[f]
400 pl = pcache[f]
403 if pl:
401 if pl:
404 f = pl[0]
402 f = pl[0]
405 else:
403 else:
406 f = None
404 f = None
407 break
405 break
408
406
409 # f, if present, is the position where the last build stopped at, and
407 # f, if present, is the position where the last build stopped at, and
410 # should be the "master" last time. check to see if we can continue
408 # should be the "master" last time. check to see if we can continue
411 # building the linelog incrementally. (we cannot if diverged)
409 # building the linelog incrementally. (we cannot if diverged)
412 if masterfctx is not None:
410 if masterfctx is not None:
413 self._checklastmasterhead(f)
411 self._checklastmasterhead(f)
414
412
415 if self.ui.debugflag:
413 if self.ui.debugflag:
416 if newmainbranch:
414 if newmainbranch:
417 self.ui.debug(
415 self.ui.debug(
418 b'fastannotate: %s: %d new changesets in the main'
416 b'fastannotate: %s: %d new changesets in the main'
419 b' branch\n' % (self.path, len(newmainbranch))
417 b' branch\n' % (self.path, len(newmainbranch))
420 )
418 )
421 elif not hist: # no joints, no updates
419 elif not hist: # no joints, no updates
422 self.ui.debug(
420 self.ui.debug(
423 b'fastannotate: %s: linelog cannot help in '
421 b'fastannotate: %s: linelog cannot help in '
424 b'annotating this revision\n' % self.path
422 b'annotating this revision\n' % self.path
425 )
423 )
426
424
427 # prepare annotateresult so we can update linelog incrementally
425 # prepare annotateresult so we can update linelog incrementally
428 self.linelog.annotate(self.linelog.maxrev)
426 self.linelog.annotate(self.linelog.maxrev)
429
427
430 # 3rd DFS does the actual annotate
428 # 3rd DFS does the actual annotate
431 visit = initvisit[:]
429 visit = initvisit[:]
432 progress = self.ui.makeprogress(
430 progress = self.ui.makeprogress(
433 b'building cache', total=len(newmainbranch)
431 b'building cache', total=len(newmainbranch)
434 )
432 )
435 while visit:
433 while visit:
436 f = visit[-1]
434 f = visit[-1]
437 if f in hist:
435 if f in hist:
438 visit.pop()
436 visit.pop()
439 continue
437 continue
440
438
441 ready = True
439 ready = True
442 pl = pcache[f]
440 pl = pcache[f]
443 for p in pl:
441 for p in pl:
444 if p not in hist:
442 if p not in hist:
445 ready = False
443 ready = False
446 visit.append(p)
444 visit.append(p)
447 if not ready:
445 if not ready:
448 continue
446 continue
449
447
450 visit.pop()
448 visit.pop()
451 blocks = None # mdiff blocks, used for appending linelog
449 blocks = None # mdiff blocks, used for appending linelog
452 ismainbranch = f in newmainbranch
450 ismainbranch = f in newmainbranch
453 # curr is the same as the traditional annotate algorithm,
451 # curr is the same as the traditional annotate algorithm,
454 # if we only care about linear history (do not follow merge),
452 # if we only care about linear history (do not follow merge),
455 # then curr is not actually used.
453 # then curr is not actually used.
456 assert f not in hist
454 assert f not in hist
457 curr = _decorate(f)
455 curr = _decorate(f)
458 for i, p in enumerate(pl):
456 for i, p in enumerate(pl):
459 bs = list(self._diffblocks(hist[p][1], curr[1]))
457 bs = list(self._diffblocks(hist[p][1], curr[1]))
460 if i == 0 and ismainbranch:
458 if i == 0 and ismainbranch:
461 blocks = bs
459 blocks = bs
462 curr = _pair(hist[p], curr, bs)
460 curr = _pair(hist[p], curr, bs)
463 if needed[p] == 1:
461 if needed[p] == 1:
464 del hist[p]
462 del hist[p]
465 del needed[p]
463 del needed[p]
466 else:
464 else:
467 needed[p] -= 1
465 needed[p] -= 1
468
466
469 hist[f] = curr
467 hist[f] = curr
470 del pcache[f]
468 del pcache[f]
471
469
472 if ismainbranch: # need to write to linelog
470 if ismainbranch: # need to write to linelog
473 progress.increment()
471 progress.increment()
474 bannotated = None
472 bannotated = None
475 if len(pl) == 2 and self.opts.followmerge: # merge
473 if len(pl) == 2 and self.opts.followmerge: # merge
476 bannotated = curr[0]
474 bannotated = curr[0]
477 if blocks is None: # no parents, add an empty one
475 if blocks is None: # no parents, add an empty one
478 blocks = list(self._diffblocks(b'', curr[1]))
476 blocks = list(self._diffblocks(b'', curr[1]))
479 self._appendrev(f, blocks, bannotated)
477 self._appendrev(f, blocks, bannotated)
480 elif showpath: # not append linelog, but we need to record path
478 elif showpath: # not append linelog, but we need to record path
481 self._node2path[f.node()] = f.path()
479 self._node2path[f.node()] = f.path()
482
480
483 progress.complete()
481 progress.complete()
484
482
485 result = [
483 result = [
486 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
484 ((self.revmap.rev2hsh(fr) if isinstance(fr, int) else fr.node()), l)
487 for fr, l in hist[revfctx][0]
485 for fr, l in hist[revfctx][0]
488 ] # [(node, linenumber)]
486 ] # [(node, linenumber)]
489 return self._refineannotateresult(result, revfctx, showpath, showlines)
487 return self._refineannotateresult(result, revfctx, showpath, showlines)
490
488
491 def canannotatedirectly(self, rev):
489 def canannotatedirectly(self, rev):
492 """(str) -> bool, fctx or node.
490 """(str) -> bool, fctx or node.
493 return (True, f) if we can annotate without updating the linelog, pass
491 return (True, f) if we can annotate without updating the linelog, pass
494 f to annotatedirectly.
492 f to annotatedirectly.
495 return (False, f) if we need extra calculation. f is the fctx resolved
493 return (False, f) if we need extra calculation. f is the fctx resolved
496 from rev.
494 from rev.
497 """
495 """
498 result = True
496 result = True
499 f = None
497 f = None
500 if not isinstance(rev, int) and rev is not None:
498 if not isinstance(rev, int) and rev is not None:
501 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
499 hsh = {20: bytes, 40: bin}.get(len(rev), lambda x: None)(rev)
502 if hsh is not None and (hsh, self.path) in self.revmap:
500 if hsh is not None and (hsh, self.path) in self.revmap:
503 f = hsh
501 f = hsh
504 if f is None:
502 if f is None:
505 adjustctx = b'linkrev' if self._perfhack else True
503 adjustctx = b'linkrev' if self._perfhack else True
506 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
504 f = self._resolvefctx(rev, adjustctx=adjustctx, resolverev=True)
507 result = f in self.revmap
505 result = f in self.revmap
508 if not result and self._perfhack:
506 if not result and self._perfhack:
509 # redo the resolution without perfhack - as we are going to
507 # redo the resolution without perfhack - as we are going to
510 # do write operations, we need a correct fctx.
508 # do write operations, we need a correct fctx.
511 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
509 f = self._resolvefctx(rev, adjustctx=True, resolverev=True)
512 return result, f
510 return result, f
513
511
514 def annotatealllines(self, rev, showpath=False, showlines=False):
512 def annotatealllines(self, rev, showpath=False, showlines=False):
515 """(rev : str) -> [(node : str, linenum : int, path : str)]
513 """(rev : str) -> [(node : str, linenum : int, path : str)]
516
514
517 the result has the same format with annotate, but include all (including
515 the result has the same format with annotate, but include all (including
518 deleted) lines up to rev. call this after calling annotate(rev, ...) for
516 deleted) lines up to rev. call this after calling annotate(rev, ...) for
519 better performance and accuracy.
517 better performance and accuracy.
520 """
518 """
521 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
519 revfctx = self._resolvefctx(rev, resolverev=True, adjustctx=True)
522
520
523 # find a chain from rev to anything in the mainbranch
521 # find a chain from rev to anything in the mainbranch
524 if revfctx not in self.revmap:
522 if revfctx not in self.revmap:
525 chain = [revfctx]
523 chain = [revfctx]
526 a = b''
524 a = b''
527 while True:
525 while True:
528 f = chain[-1]
526 f = chain[-1]
529 pl = self._parentfunc(f)
527 pl = self._parentfunc(f)
530 if not pl:
528 if not pl:
531 break
529 break
532 if pl[0] in self.revmap:
530 if pl[0] in self.revmap:
533 a = pl[0].data()
531 a = pl[0].data()
534 break
532 break
535 chain.append(pl[0])
533 chain.append(pl[0])
536
534
537 # both self.linelog and self.revmap is backed by filesystem. now
535 # both self.linelog and self.revmap is backed by filesystem. now
538 # we want to modify them but do not want to write changes back to
536 # we want to modify them but do not want to write changes back to
539 # files. so we create in-memory objects and copy them. it's like
537 # files. so we create in-memory objects and copy them. it's like
540 # a "fork".
538 # a "fork".
541 linelog = linelogmod.linelog()
539 linelog = linelogmod.linelog()
542 linelog.copyfrom(self.linelog)
540 linelog.copyfrom(self.linelog)
543 linelog.annotate(linelog.maxrev)
541 linelog.annotate(linelog.maxrev)
544 revmap = revmapmod.revmap()
542 revmap = revmapmod.revmap()
545 revmap.copyfrom(self.revmap)
543 revmap.copyfrom(self.revmap)
546
544
547 for f in reversed(chain):
545 for f in reversed(chain):
548 b = f.data()
546 b = f.data()
549 blocks = list(self._diffblocks(a, b))
547 blocks = list(self._diffblocks(a, b))
550 self._doappendrev(linelog, revmap, f, blocks)
548 self._doappendrev(linelog, revmap, f, blocks)
551 a = b
549 a = b
552 else:
550 else:
553 # fastpath: use existing linelog, revmap as we don't write to them
551 # fastpath: use existing linelog, revmap as we don't write to them
554 linelog = self.linelog
552 linelog = self.linelog
555 revmap = self.revmap
553 revmap = self.revmap
556
554
557 lines = linelog.getalllines()
555 lines = linelog.getalllines()
558 hsh = revfctx.node()
556 hsh = revfctx.node()
559 llrev = revmap.hsh2rev(hsh)
557 llrev = revmap.hsh2rev(hsh)
560 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
558 result = [(revmap.rev2hsh(r), l) for r, l in lines if r <= llrev]
561 # cannot use _refineannotateresult since we need custom logic for
559 # cannot use _refineannotateresult since we need custom logic for
562 # resolving line contents
560 # resolving line contents
563 if showpath:
561 if showpath:
564 result = self._addpathtoresult(result, revmap)
562 result = self._addpathtoresult(result, revmap)
565 if showlines:
563 if showlines:
566 linecontents = self._resolvelines(result, revmap, linelog)
564 linecontents = self._resolvelines(result, revmap, linelog)
567 result = (result, linecontents)
565 result = (result, linecontents)
568 return result
566 return result
569
567
570 def _resolvelines(self, annotateresult, revmap, linelog):
568 def _resolvelines(self, annotateresult, revmap, linelog):
571 """(annotateresult) -> [line]. designed for annotatealllines.
569 """(annotateresult) -> [line]. designed for annotatealllines.
572 this is probably the most inefficient code in the whole fastannotate
570 this is probably the most inefficient code in the whole fastannotate
573 directory. but we have made a decision that the linelog does not
571 directory. but we have made a decision that the linelog does not
574 store line contents. so getting them requires random accesses to
572 store line contents. so getting them requires random accesses to
575 the revlog data, since they can be many, it can be very slow.
573 the revlog data, since they can be many, it can be very slow.
576 """
574 """
577 # [llrev]
575 # [llrev]
578 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
576 revs = [revmap.hsh2rev(l[0]) for l in annotateresult]
579 result = [None] * len(annotateresult)
577 result = [None] * len(annotateresult)
580 # {(rev, linenum): [lineindex]}
578 # {(rev, linenum): [lineindex]}
581 key2idxs = collections.defaultdict(list)
579 key2idxs = collections.defaultdict(list)
582 for i in range(len(result)):
580 for i in range(len(result)):
583 key2idxs[(revs[i], annotateresult[i][1])].append(i)
581 key2idxs[(revs[i], annotateresult[i][1])].append(i)
584 while key2idxs:
582 while key2idxs:
585 # find an unresolved line and its linelog rev to annotate
583 # find an unresolved line and its linelog rev to annotate
586 hsh = None
584 hsh = None
587 try:
585 try:
588 for (rev, _linenum), idxs in key2idxs.items():
586 for (rev, _linenum), idxs in key2idxs.items():
589 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
587 if revmap.rev2flag(rev) & revmapmod.sidebranchflag:
590 continue
588 continue
591 hsh = annotateresult[idxs[0]][0]
589 hsh = annotateresult[idxs[0]][0]
592 break
590 break
593 except StopIteration: # no more unresolved lines
591 except StopIteration: # no more unresolved lines
594 return result
592 return result
595 if hsh is None:
593 if hsh is None:
596 # the remaining key2idxs are not in main branch, resolving them
594 # the remaining key2idxs are not in main branch, resolving them
597 # using the hard way...
595 # using the hard way...
598 revlines = {}
596 revlines = {}
599 for (rev, linenum), idxs in key2idxs.items():
597 for (rev, linenum), idxs in key2idxs.items():
600 if rev not in revlines:
598 if rev not in revlines:
601 hsh = annotateresult[idxs[0]][0]
599 hsh = annotateresult[idxs[0]][0]
602 if self.ui.debugflag:
600 if self.ui.debugflag:
603 self.ui.debug(
601 self.ui.debug(
604 b'fastannotate: reading %s line #%d '
602 b'fastannotate: reading %s line #%d '
605 b'to resolve lines %r\n'
603 b'to resolve lines %r\n'
606 % (short(hsh), linenum, idxs)
604 % (short(hsh), linenum, idxs)
607 )
605 )
608 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
606 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
609 lines = mdiff.splitnewlines(fctx.data())
607 lines = mdiff.splitnewlines(fctx.data())
610 revlines[rev] = lines
608 revlines[rev] = lines
611 for idx in idxs:
609 for idx in idxs:
612 result[idx] = revlines[rev][linenum]
610 result[idx] = revlines[rev][linenum]
613 assert all(x is not None for x in result)
611 assert all(x is not None for x in result)
614 return result
612 return result
615
613
616 # run the annotate and the lines should match to the file content
614 # run the annotate and the lines should match to the file content
617 self.ui.debug(
615 self.ui.debug(
618 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
616 b'fastannotate: annotate %s to resolve lines\n' % short(hsh)
619 )
617 )
620 linelog.annotate(rev)
618 linelog.annotate(rev)
621 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
619 fctx = self._resolvefctx(hsh, revmap.rev2path(rev))
622 annotated = linelog.annotateresult
620 annotated = linelog.annotateresult
623 lines = mdiff.splitnewlines(fctx.data())
621 lines = mdiff.splitnewlines(fctx.data())
624 if len(lines) != len(annotated):
622 if len(lines) != len(annotated):
625 raise faerror.CorruptedFileError(b'unexpected annotated lines')
623 raise faerror.CorruptedFileError(b'unexpected annotated lines')
626 # resolve lines from the annotate result
624 # resolve lines from the annotate result
627 for i, line in enumerate(lines):
625 for i, line in enumerate(lines):
628 k = annotated[i]
626 k = annotated[i]
629 if k in key2idxs:
627 if k in key2idxs:
630 for idx in key2idxs[k]:
628 for idx in key2idxs[k]:
631 result[idx] = line
629 result[idx] = line
632 del key2idxs[k]
630 del key2idxs[k]
633 return result
631 return result
634
632
635 def annotatedirectly(self, f, showpath, showlines):
633 def annotatedirectly(self, f, showpath, showlines):
636 """like annotate, but when we know that f is in linelog.
634 """like annotate, but when we know that f is in linelog.
637 f can be either a 20-char str (node) or a fctx. this is for perf - in
635 f can be either a 20-char str (node) or a fctx. this is for perf - in
638 the best case, the user provides a node and we don't need to read the
636 the best case, the user provides a node and we don't need to read the
639 filelog or construct any filecontext.
637 filelog or construct any filecontext.
640 """
638 """
641 if isinstance(f, bytes):
639 if isinstance(f, bytes):
642 hsh = f
640 hsh = f
643 else:
641 else:
644 hsh = f.node()
642 hsh = f.node()
645 llrev = self.revmap.hsh2rev(hsh)
643 llrev = self.revmap.hsh2rev(hsh)
646 if not llrev:
644 if not llrev:
647 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
645 raise faerror.CorruptedFileError(b'%s is not in revmap' % hex(hsh))
648 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
646 if (self.revmap.rev2flag(llrev) & revmapmod.sidebranchflag) != 0:
649 raise faerror.CorruptedFileError(
647 raise faerror.CorruptedFileError(
650 b'%s is not in revmap mainbranch' % hex(hsh)
648 b'%s is not in revmap mainbranch' % hex(hsh)
651 )
649 )
652 self.linelog.annotate(llrev)
650 self.linelog.annotate(llrev)
653 result = [
651 result = [
654 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
652 (self.revmap.rev2hsh(r), l) for r, l in self.linelog.annotateresult
655 ]
653 ]
656 return self._refineannotateresult(result, f, showpath, showlines)
654 return self._refineannotateresult(result, f, showpath, showlines)
657
655
658 def _refineannotateresult(self, result, f, showpath, showlines):
656 def _refineannotateresult(self, result, f, showpath, showlines):
659 """add the missing path or line contents, they can be expensive.
657 """add the missing path or line contents, they can be expensive.
660 f could be either node or fctx.
658 f could be either node or fctx.
661 """
659 """
662 if showpath:
660 if showpath:
663 result = self._addpathtoresult(result)
661 result = self._addpathtoresult(result)
664 if showlines:
662 if showlines:
665 if isinstance(f, bytes): # f: node or fctx
663 if isinstance(f, bytes): # f: node or fctx
666 llrev = self.revmap.hsh2rev(f)
664 llrev = self.revmap.hsh2rev(f)
667 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
665 fctx = self._resolvefctx(f, self.revmap.rev2path(llrev))
668 else:
666 else:
669 fctx = f
667 fctx = f
670 lines = mdiff.splitnewlines(fctx.data())
668 lines = mdiff.splitnewlines(fctx.data())
671 if len(lines) != len(result): # linelog is probably corrupted
669 if len(lines) != len(result): # linelog is probably corrupted
672 raise faerror.CorruptedFileError()
670 raise faerror.CorruptedFileError()
673 result = (result, lines)
671 result = (result, lines)
674 return result
672 return result
675
673
676 def _appendrev(self, fctx, blocks, bannotated=None):
674 def _appendrev(self, fctx, blocks, bannotated=None):
677 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
675 self._doappendrev(self.linelog, self.revmap, fctx, blocks, bannotated)
678
676
679 def _diffblocks(self, a, b):
677 def _diffblocks(self, a, b):
680 return mdiff.allblocks(a, b, self.opts.diffopts)
678 return mdiff.allblocks(a, b, self.opts.diffopts)
681
679
682 @staticmethod
680 @staticmethod
683 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
681 def _doappendrev(linelog, revmap, fctx, blocks, bannotated=None):
684 """append a revision to linelog and revmap"""
682 """append a revision to linelog and revmap"""
685
683
686 def getllrev(f):
684 def getllrev(f):
687 """(fctx) -> int"""
685 """(fctx) -> int"""
688 # f should not be a linelog revision
686 # f should not be a linelog revision
689 if isinstance(f, int):
687 if isinstance(f, int):
690 raise error.ProgrammingError(b'f should not be an int')
688 raise error.ProgrammingError(b'f should not be an int')
691 # f is a fctx, allocate linelog rev on demand
689 # f is a fctx, allocate linelog rev on demand
692 hsh = f.node()
690 hsh = f.node()
693 rev = revmap.hsh2rev(hsh)
691 rev = revmap.hsh2rev(hsh)
694 if rev is None:
692 if rev is None:
695 rev = revmap.append(hsh, sidebranch=True, path=f.path())
693 rev = revmap.append(hsh, sidebranch=True, path=f.path())
696 return rev
694 return rev
697
695
698 # append sidebranch revisions to revmap
696 # append sidebranch revisions to revmap
699 siderevs = []
697 siderevs = []
700 siderevmap = {} # node: int
698 siderevmap = {} # node: int
701 if bannotated is not None:
699 if bannotated is not None:
702 for (a1, a2, b1, b2), op in blocks:
700 for (a1, a2, b1, b2), op in blocks:
703 if op != b'=':
701 if op != b'=':
704 # f could be either linelong rev, or fctx.
702 # f could be either linelong rev, or fctx.
705 siderevs += [
703 siderevs += [
706 f
704 f
707 for f, l in bannotated[b1:b2]
705 for f, l in bannotated[b1:b2]
708 if not isinstance(f, int)
706 if not isinstance(f, int)
709 ]
707 ]
710 siderevs = set(siderevs)
708 siderevs = set(siderevs)
711 if fctx in siderevs: # mainnode must be appended seperately
709 if fctx in siderevs: # mainnode must be appended seperately
712 siderevs.remove(fctx)
710 siderevs.remove(fctx)
713 for f in siderevs:
711 for f in siderevs:
714 siderevmap[f] = getllrev(f)
712 siderevmap[f] = getllrev(f)
715
713
716 # the changeset in the main branch, could be a merge
714 # the changeset in the main branch, could be a merge
717 llrev = revmap.append(fctx.node(), path=fctx.path())
715 llrev = revmap.append(fctx.node(), path=fctx.path())
718 siderevmap[fctx] = llrev
716 siderevmap[fctx] = llrev
719
717
720 for (a1, a2, b1, b2), op in reversed(blocks):
718 for (a1, a2, b1, b2), op in reversed(blocks):
721 if op == b'=':
719 if op == b'=':
722 continue
720 continue
723 if bannotated is None:
721 if bannotated is None:
724 linelog.replacelines(llrev, a1, a2, b1, b2)
722 linelog.replacelines(llrev, a1, a2, b1, b2)
725 else:
723 else:
726 blines = [
724 blines = [
727 ((r if isinstance(r, int) else siderevmap[r]), l)
725 ((r if isinstance(r, int) else siderevmap[r]), l)
728 for r, l in bannotated[b1:b2]
726 for r, l in bannotated[b1:b2]
729 ]
727 ]
730 linelog.replacelines_vec(llrev, a1, a2, blines)
728 linelog.replacelines_vec(llrev, a1, a2, blines)
731
729
732 def _addpathtoresult(self, annotateresult, revmap=None):
730 def _addpathtoresult(self, annotateresult, revmap=None):
733 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
731 """(revmap, [(node, linenum)]) -> [(node, linenum, path)]"""
734 if revmap is None:
732 if revmap is None:
735 revmap = self.revmap
733 revmap = self.revmap
736
734
737 def _getpath(nodeid):
735 def _getpath(nodeid):
738 path = self._node2path.get(nodeid)
736 path = self._node2path.get(nodeid)
739 if path is None:
737 if path is None:
740 path = revmap.rev2path(revmap.hsh2rev(nodeid))
738 path = revmap.rev2path(revmap.hsh2rev(nodeid))
741 self._node2path[nodeid] = path
739 self._node2path[nodeid] = path
742 return path
740 return path
743
741
744 return [(n, l, _getpath(n)) for n, l in annotateresult]
742 return [(n, l, _getpath(n)) for n, l in annotateresult]
745
743
746 def _checklastmasterhead(self, fctx):
744 def _checklastmasterhead(self, fctx):
747 """check if fctx is the master's head last time, raise if not"""
745 """check if fctx is the master's head last time, raise if not"""
748 if fctx is None:
746 if fctx is None:
749 llrev = 0
747 llrev = 0
750 else:
748 else:
751 llrev = self.revmap.hsh2rev(fctx.node())
749 llrev = self.revmap.hsh2rev(fctx.node())
752 if not llrev:
750 if not llrev:
753 raise faerror.CannotReuseError()
751 raise faerror.CannotReuseError()
754 if self.linelog.maxrev != llrev:
752 if self.linelog.maxrev != llrev:
755 raise faerror.CannotReuseError()
753 raise faerror.CannotReuseError()
756
754
757 @util.propertycache
755 @util.propertycache
758 def _parentfunc(self):
756 def _parentfunc(self):
759 """-> (fctx) -> [fctx]"""
757 """-> (fctx) -> [fctx]"""
760 followrename = self.opts.followrename
758 followrename = self.opts.followrename
761 followmerge = self.opts.followmerge
759 followmerge = self.opts.followmerge
762
760
763 def parents(f):
761 def parents(f):
764 pl = _parents(f, follow=followrename)
762 pl = _parents(f, follow=followrename)
765 if not followmerge:
763 if not followmerge:
766 pl = pl[:1]
764 pl = pl[:1]
767 return pl
765 return pl
768
766
769 return parents
767 return parents
770
768
771 @util.propertycache
769 @util.propertycache
772 def _perfhack(self):
770 def _perfhack(self):
773 return self.ui.configbool(b'fastannotate', b'perfhack')
771 return self.ui.configbool(b'fastannotate', b'perfhack')
774
772
775 def _resolvefctx(self, rev, path=None, **kwds):
773 def _resolvefctx(self, rev, path=None, **kwds):
776 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
774 return resolvefctx(self.repo, rev, (path or self.path), **kwds)
777
775
778
776
779 def _unlinkpaths(paths):
777 def _unlinkpaths(paths):
780 """silent, best-effort unlink"""
778 """silent, best-effort unlink"""
781 for path in paths:
779 for path in paths:
782 try:
780 try:
783 util.unlink(path)
781 util.unlink(path)
784 except OSError:
782 except OSError:
785 pass
783 pass
786
784
787
785
788 class pathhelper:
786 class pathhelper:
789 """helper for getting paths for lockfile, linelog and revmap"""
787 """helper for getting paths for lockfile, linelog and revmap"""
790
788
791 def __init__(self, repo, path, opts=defaultopts):
789 def __init__(self, repo, path, opts=defaultopts):
792 # different options use different directories
790 # different options use different directories
793 self._vfspath = os.path.join(
791 self._vfspath = os.path.join(
794 b'fastannotate', opts.shortstr, encodedir(path)
792 b'fastannotate', opts.shortstr, encodedir(path)
795 )
793 )
796 self._repo = repo
794 self._repo = repo
797
795
798 @property
796 @property
799 def dirname(self):
797 def dirname(self):
800 return os.path.dirname(self._repo.vfs.join(self._vfspath))
798 return os.path.dirname(self._repo.vfs.join(self._vfspath))
801
799
802 @property
800 @property
803 def linelogpath(self):
801 def linelogpath(self):
804 return self._repo.vfs.join(self._vfspath + b'.l')
802 return self._repo.vfs.join(self._vfspath + b'.l')
805
803
806 def lock(self):
804 def lock(self):
807 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
805 return lockmod.lock(self._repo.vfs, self._vfspath + b'.lock')
808
806
809 @property
807 @property
810 def revmappath(self):
808 def revmappath(self):
811 return self._repo.vfs.join(self._vfspath + b'.m')
809 return self._repo.vfs.join(self._vfspath + b'.m')
812
810
813
811
814 @contextlib.contextmanager
812 @contextlib.contextmanager
815 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
813 def annotatecontext(repo, path, opts=defaultopts, rebuild=False):
816 """context needed to perform (fast) annotate on a file
814 """context needed to perform (fast) annotate on a file
817
815
818 an annotatecontext of a single file consists of two structures: the
816 an annotatecontext of a single file consists of two structures: the
819 linelog and the revmap. this function takes care of locking. only 1
817 linelog and the revmap. this function takes care of locking. only 1
820 process is allowed to write that file's linelog and revmap at a time.
818 process is allowed to write that file's linelog and revmap at a time.
821
819
822 when something goes wrong, this function will assume the linelog and the
820 when something goes wrong, this function will assume the linelog and the
823 revmap are in a bad state, and remove them from disk.
821 revmap are in a bad state, and remove them from disk.
824
822
825 use this function in the following way:
823 use this function in the following way:
826
824
827 with annotatecontext(...) as actx:
825 with annotatecontext(...) as actx:
828 actx. ....
826 actx. ....
829 """
827 """
830 helper = pathhelper(repo, path, opts)
828 helper = pathhelper(repo, path, opts)
831 util.makedirs(helper.dirname)
829 util.makedirs(helper.dirname)
832 revmappath = helper.revmappath
830 revmappath = helper.revmappath
833 linelogpath = helper.linelogpath
831 linelogpath = helper.linelogpath
834 actx = None
832 actx = None
835 try:
833 try:
836 with helper.lock():
834 with helper.lock():
837 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
835 actx = _annotatecontext(repo, path, linelogpath, revmappath, opts)
838 if rebuild:
836 if rebuild:
839 actx.rebuild()
837 actx.rebuild()
840 yield actx
838 yield actx
841 except Exception:
839 except Exception:
842 if actx is not None:
840 if actx is not None:
843 actx.rebuild()
841 actx.rebuild()
844 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
842 repo.ui.debug(b'fastannotate: %s: cache broken and deleted\n' % path)
845 raise
843 raise
846 finally:
844 finally:
847 if actx is not None:
845 if actx is not None:
848 actx.close()
846 actx.close()
849
847
850
848
851 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
849 def fctxannotatecontext(fctx, follow=True, diffopts=None, rebuild=False):
852 """like annotatecontext but get the context from a fctx. convenient when
850 """like annotatecontext but get the context from a fctx. convenient when
853 used in fctx.annotate
851 used in fctx.annotate
854 """
852 """
855 repo = fctx._repo
853 repo = fctx._repo
856 path = fctx._path
854 path = fctx._path
857 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
855 if repo.ui.configbool(b'fastannotate', b'forcefollow', True):
858 follow = True
856 follow = True
859 aopts = annotateopts(diffopts=diffopts, followrename=follow)
857 aopts = annotateopts(diffopts=diffopts, followrename=follow)
860 return annotatecontext(repo, path, aopts, rebuild)
858 return annotatecontext(repo, path, aopts, rebuild)
@@ -1,136 +1,135 b''
1 # Copyright 2016-present Facebook. All Rights Reserved.
1 # Copyright 2016-present Facebook. All Rights Reserved.
2 #
2 #
3 # support: fastannotate support for hgweb, and filectx
3 # support: fastannotate support for hgweb, and filectx
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 from mercurial.pycompat import getattr
10 from mercurial import (
9 from mercurial import (
11 context as hgcontext,
10 context as hgcontext,
12 dagop,
11 dagop,
13 extensions,
12 extensions,
14 hgweb,
13 hgweb,
15 patch,
14 patch,
16 util,
15 util,
17 )
16 )
18
17
19 from . import (
18 from . import (
20 context,
19 context,
21 revmap,
20 revmap,
22 )
21 )
23
22
24
23
25 class _lazyfctx:
24 class _lazyfctx:
26 """delegates to fctx but do not construct fctx when unnecessary"""
25 """delegates to fctx but do not construct fctx when unnecessary"""
27
26
28 def __init__(self, repo, node, path):
27 def __init__(self, repo, node, path):
29 self._node = node
28 self._node = node
30 self._path = path
29 self._path = path
31 self._repo = repo
30 self._repo = repo
32
31
33 def node(self):
32 def node(self):
34 return self._node
33 return self._node
35
34
36 def path(self):
35 def path(self):
37 return self._path
36 return self._path
38
37
39 @util.propertycache
38 @util.propertycache
40 def _fctx(self):
39 def _fctx(self):
41 return context.resolvefctx(self._repo, self._node, self._path)
40 return context.resolvefctx(self._repo, self._node, self._path)
42
41
43 def __getattr__(self, name):
42 def __getattr__(self, name):
44 return getattr(self._fctx, name)
43 return getattr(self._fctx, name)
45
44
46
45
47 def _convertoutputs(repo, annotated, contents):
46 def _convertoutputs(repo, annotated, contents):
48 """convert fastannotate outputs to vanilla annotate format"""
47 """convert fastannotate outputs to vanilla annotate format"""
49 # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
48 # fastannotate returns: [(nodeid, linenum, path)], [linecontent]
50 # convert to what fctx.annotate returns: [annotateline]
49 # convert to what fctx.annotate returns: [annotateline]
51 results = []
50 results = []
52 fctxmap = {}
51 fctxmap = {}
53 annotateline = dagop.annotateline
52 annotateline = dagop.annotateline
54 for i, (hsh, linenum, path) in enumerate(annotated):
53 for i, (hsh, linenum, path) in enumerate(annotated):
55 if (hsh, path) not in fctxmap:
54 if (hsh, path) not in fctxmap:
56 fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
55 fctxmap[(hsh, path)] = _lazyfctx(repo, hsh, path)
57 # linenum: the user wants 1-based, we have 0-based.
56 # linenum: the user wants 1-based, we have 0-based.
58 lineno = linenum + 1
57 lineno = linenum + 1
59 fctx = fctxmap[(hsh, path)]
58 fctx = fctxmap[(hsh, path)]
60 line = contents[i]
59 line = contents[i]
61 results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
60 results.append(annotateline(fctx=fctx, lineno=lineno, text=line))
62 return results
61 return results
63
62
64
63
65 def _getmaster(fctx):
64 def _getmaster(fctx):
66 """(fctx) -> str"""
65 """(fctx) -> str"""
67 return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
66 return fctx._repo.ui.config(b'fastannotate', b'mainbranch') or b'default'
68
67
69
68
70 def _doannotate(fctx, follow=True, diffopts=None):
69 def _doannotate(fctx, follow=True, diffopts=None):
71 """like the vanilla fctx.annotate, but do it via fastannotate, and make
70 """like the vanilla fctx.annotate, but do it via fastannotate, and make
72 the output format compatible with the vanilla fctx.annotate.
71 the output format compatible with the vanilla fctx.annotate.
73 may raise Exception, and always return line numbers.
72 may raise Exception, and always return line numbers.
74 """
73 """
75 master = _getmaster(fctx)
74 master = _getmaster(fctx)
76
75
77 with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
76 with context.fctxannotatecontext(fctx, follow, diffopts) as ac:
78 try:
77 try:
79 annotated, contents = ac.annotate(
78 annotated, contents = ac.annotate(
80 fctx.rev(), master=master, showpath=True, showlines=True
79 fctx.rev(), master=master, showpath=True, showlines=True
81 )
80 )
82 except Exception:
81 except Exception:
83 ac.rebuild() # try rebuild once
82 ac.rebuild() # try rebuild once
84 fctx._repo.ui.debug(
83 fctx._repo.ui.debug(
85 b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
84 b'fastannotate: %s: rebuilding broken cache\n' % fctx._path
86 )
85 )
87 try:
86 try:
88 annotated, contents = ac.annotate(
87 annotated, contents = ac.annotate(
89 fctx.rev(), master=master, showpath=True, showlines=True
88 fctx.rev(), master=master, showpath=True, showlines=True
90 )
89 )
91 except Exception:
90 except Exception:
92 raise
91 raise
93
92
94 assert annotated and contents
93 assert annotated and contents
95 return _convertoutputs(fctx._repo, annotated, contents)
94 return _convertoutputs(fctx._repo, annotated, contents)
96
95
97
96
98 def _hgwebannotate(orig, fctx, ui):
97 def _hgwebannotate(orig, fctx, ui):
99 diffopts = patch.difffeatureopts(
98 diffopts = patch.difffeatureopts(
100 ui, untrusted=True, section=b'annotate', whitespace=True
99 ui, untrusted=True, section=b'annotate', whitespace=True
101 )
100 )
102 return _doannotate(fctx, diffopts=diffopts)
101 return _doannotate(fctx, diffopts=diffopts)
103
102
104
103
105 def _fctxannotate(
104 def _fctxannotate(
106 orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
105 orig, self, follow=False, linenumber=False, skiprevs=None, diffopts=None
107 ):
106 ):
108 if skiprevs:
107 if skiprevs:
109 # skiprevs is not supported yet
108 # skiprevs is not supported yet
110 return orig(
109 return orig(
111 self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
110 self, follow, linenumber, skiprevs=skiprevs, diffopts=diffopts
112 )
111 )
113 try:
112 try:
114 return _doannotate(self, follow, diffopts)
113 return _doannotate(self, follow, diffopts)
115 except Exception as ex:
114 except Exception as ex:
116 self._repo.ui.debug(
115 self._repo.ui.debug(
117 b'fastannotate: falling back to the vanilla annotate: %r\n' % ex
116 b'fastannotate: falling back to the vanilla annotate: %r\n' % ex
118 )
117 )
119 return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
118 return orig(self, follow=follow, skiprevs=skiprevs, diffopts=diffopts)
120
119
121
120
122 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
121 def _remotefctxannotate(orig, self, follow=False, skiprevs=None, diffopts=None):
123 # skipset: a set-like used to test if a fctx needs to be downloaded
122 # skipset: a set-like used to test if a fctx needs to be downloaded
124 with context.fctxannotatecontext(self, follow, diffopts) as ac:
123 with context.fctxannotatecontext(self, follow, diffopts) as ac:
125 skipset = revmap.revmap(ac.revmappath)
124 skipset = revmap.revmap(ac.revmappath)
126 return orig(
125 return orig(
127 self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
126 self, follow, skiprevs=skiprevs, diffopts=diffopts, prefetchskip=skipset
128 )
127 )
129
128
130
129
131 def replacehgwebannotate():
130 def replacehgwebannotate():
132 extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
131 extensions.wrapfunction(hgweb.webutil, 'annotate', _hgwebannotate)
133
132
134
133
135 def replacefctxannotate():
134 def replacefctxannotate():
136 extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
135 extensions.wrapfunction(hgcontext.basefilectx, 'annotate', _fctxannotate)
@@ -1,2682 +1,2681 b''
1 # histedit.py - interactive history editing for mercurial
1 # histedit.py - interactive history editing for mercurial
2 #
2 #
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """interactive history editing
7 """interactive history editing
8
8
9 With this extension installed, Mercurial gains one new command: histedit. Usage
9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 is as follows, assuming the following history::
10 is as follows, assuming the following history::
11
11
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 | Add delta
13 | Add delta
14 |
14 |
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 | Add gamma
16 | Add gamma
17 |
17 |
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 | Add beta
19 | Add beta
20 |
20 |
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 Add alpha
22 Add alpha
23
23
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 file open in your editor::
25 file open in your editor::
26
26
27 pick c561b4e977df Add beta
27 pick c561b4e977df Add beta
28 pick 030b686bedc4 Add gamma
28 pick 030b686bedc4 Add gamma
29 pick 7c2fd3b9020c Add delta
29 pick 7c2fd3b9020c Add delta
30
30
31 # Edit history between c561b4e977df and 7c2fd3b9020c
31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 #
32 #
33 # Commits are listed from least to most recent
33 # Commits are listed from least to most recent
34 #
34 #
35 # Commands:
35 # Commands:
36 # p, pick = use commit
36 # p, pick = use commit
37 # e, edit = use commit, but allow edits before making new commit
37 # e, edit = use commit, but allow edits before making new commit
38 # f, fold = use commit, but combine it with the one above
38 # f, fold = use commit, but combine it with the one above
39 # r, roll = like fold, but discard this commit's description and date
39 # r, roll = like fold, but discard this commit's description and date
40 # d, drop = remove commit from history
40 # d, drop = remove commit from history
41 # m, mess = edit commit message without changing commit content
41 # m, mess = edit commit message without changing commit content
42 # b, base = checkout changeset and apply further changesets from there
42 # b, base = checkout changeset and apply further changesets from there
43 #
43 #
44
44
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 In this file, lines beginning with ``#`` are ignored. You must specify a rule
46 for each revision in your history. For example, if you had meant to add gamma
46 for each revision in your history. For example, if you had meant to add gamma
47 before beta, and then wanted to add delta in the same revision as beta, you
47 before beta, and then wanted to add delta in the same revision as beta, you
48 would reorganize the file to look like this::
48 would reorganize the file to look like this::
49
49
50 pick 030b686bedc4 Add gamma
50 pick 030b686bedc4 Add gamma
51 pick c561b4e977df Add beta
51 pick c561b4e977df Add beta
52 fold 7c2fd3b9020c Add delta
52 fold 7c2fd3b9020c Add delta
53
53
54 # Edit history between c561b4e977df and 7c2fd3b9020c
54 # Edit history between c561b4e977df and 7c2fd3b9020c
55 #
55 #
56 # Commits are listed from least to most recent
56 # Commits are listed from least to most recent
57 #
57 #
58 # Commands:
58 # Commands:
59 # p, pick = use commit
59 # p, pick = use commit
60 # e, edit = use commit, but allow edits before making new commit
60 # e, edit = use commit, but allow edits before making new commit
61 # f, fold = use commit, but combine it with the one above
61 # f, fold = use commit, but combine it with the one above
62 # r, roll = like fold, but discard this commit's description and date
62 # r, roll = like fold, but discard this commit's description and date
63 # d, drop = remove commit from history
63 # d, drop = remove commit from history
64 # m, mess = edit commit message without changing commit content
64 # m, mess = edit commit message without changing commit content
65 # b, base = checkout changeset and apply further changesets from there
65 # b, base = checkout changeset and apply further changesets from there
66 #
66 #
67
67
68 At which point you close the editor and ``histedit`` starts working. When you
68 At which point you close the editor and ``histedit`` starts working. When you
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
69 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
70 those revisions together, offering you a chance to clean up the commit message::
70 those revisions together, offering you a chance to clean up the commit message::
71
71
72 Add beta
72 Add beta
73 ***
73 ***
74 Add delta
74 Add delta
75
75
76 Edit the commit message to your liking, then close the editor. The date used
76 Edit the commit message to your liking, then close the editor. The date used
77 for the commit will be the later of the two commits' dates. For this example,
77 for the commit will be the later of the two commits' dates. For this example,
78 let's assume that the commit message was changed to ``Add beta and delta.``
78 let's assume that the commit message was changed to ``Add beta and delta.``
79 After histedit has run and had a chance to remove any old or temporary
79 After histedit has run and had a chance to remove any old or temporary
80 revisions it needed, the history looks like this::
80 revisions it needed, the history looks like this::
81
81
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
82 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
83 | Add beta and delta.
83 | Add beta and delta.
84 |
84 |
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
85 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
86 | Add gamma
86 | Add gamma
87 |
87 |
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
88 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
89 Add alpha
89 Add alpha
90
90
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
91 Note that ``histedit`` does *not* remove any revisions (even its own temporary
92 ones) until after it has completed all the editing operations, so it will
92 ones) until after it has completed all the editing operations, so it will
93 probably perform several strip operations when it's done. For the above example,
93 probably perform several strip operations when it's done. For the above example,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
94 it had to run strip twice. Strip can be slow depending on a variety of factors,
95 so you might need to be a little patient. You can choose to keep the original
95 so you might need to be a little patient. You can choose to keep the original
96 revisions by passing the ``--keep`` flag.
96 revisions by passing the ``--keep`` flag.
97
97
98 The ``edit`` operation will drop you back to a command prompt,
98 The ``edit`` operation will drop you back to a command prompt,
99 allowing you to edit files freely, or even use ``hg record`` to commit
99 allowing you to edit files freely, or even use ``hg record`` to commit
100 some changes as a separate commit. When you're done, any remaining
100 some changes as a separate commit. When you're done, any remaining
101 uncommitted changes will be committed as well. When done, run ``hg
101 uncommitted changes will be committed as well. When done, run ``hg
102 histedit --continue`` to finish this step. If there are uncommitted
102 histedit --continue`` to finish this step. If there are uncommitted
103 changes, you'll be prompted for a new commit message, but the default
103 changes, you'll be prompted for a new commit message, but the default
104 commit message will be the original message for the ``edit`` ed
104 commit message will be the original message for the ``edit`` ed
105 revision, and the date of the original commit will be preserved.
105 revision, and the date of the original commit will be preserved.
106
106
107 The ``message`` operation will give you a chance to revise a commit
107 The ``message`` operation will give you a chance to revise a commit
108 message without changing the contents. It's a shortcut for doing
108 message without changing the contents. It's a shortcut for doing
109 ``edit`` immediately followed by `hg histedit --continue``.
109 ``edit`` immediately followed by `hg histedit --continue``.
110
110
111 If ``histedit`` encounters a conflict when moving a revision (while
111 If ``histedit`` encounters a conflict when moving a revision (while
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
112 handling ``pick`` or ``fold``), it'll stop in a similar manner to
113 ``edit`` with the difference that it won't prompt you for a commit
113 ``edit`` with the difference that it won't prompt you for a commit
114 message when done. If you decide at this point that you don't like how
114 message when done. If you decide at this point that you don't like how
115 much work it will be to rearrange history, or that you made a mistake,
115 much work it will be to rearrange history, or that you made a mistake,
116 you can use ``hg histedit --abort`` to abandon the new changes you
116 you can use ``hg histedit --abort`` to abandon the new changes you
117 have made and return to the state before you attempted to edit your
117 have made and return to the state before you attempted to edit your
118 history.
118 history.
119
119
120 If we clone the histedit-ed example repository above and add four more
120 If we clone the histedit-ed example repository above and add four more
121 changes, such that we have the following history::
121 changes, such that we have the following history::
122
122
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
123 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
124 | Add theta
124 | Add theta
125 |
125 |
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
126 o 5 140988835471 2009-04-27 18:04 -0500 stefan
127 | Add eta
127 | Add eta
128 |
128 |
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
129 o 4 122930637314 2009-04-27 18:04 -0500 stefan
130 | Add zeta
130 | Add zeta
131 |
131 |
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
132 o 3 836302820282 2009-04-27 18:04 -0500 stefan
133 | Add epsilon
133 | Add epsilon
134 |
134 |
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
135 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
136 | Add beta and delta.
136 | Add beta and delta.
137 |
137 |
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
138 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
139 | Add gamma
139 | Add gamma
140 |
140 |
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
141 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
142 Add alpha
142 Add alpha
143
143
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
144 If you run ``hg histedit --outgoing`` on the clone then it is the same
145 as running ``hg histedit 836302820282``. If you need plan to push to a
145 as running ``hg histedit 836302820282``. If you need plan to push to a
146 repository that Mercurial does not detect to be related to the source
146 repository that Mercurial does not detect to be related to the source
147 repo, you can add a ``--force`` option.
147 repo, you can add a ``--force`` option.
148
148
149 Config
149 Config
150 ------
150 ------
151
151
152 Histedit rule lines are truncated to 80 characters by default. You
152 Histedit rule lines are truncated to 80 characters by default. You
153 can customize this behavior by setting a different length in your
153 can customize this behavior by setting a different length in your
154 configuration file::
154 configuration file::
155
155
156 [histedit]
156 [histedit]
157 linelen = 120 # truncate rule lines at 120 characters
157 linelen = 120 # truncate rule lines at 120 characters
158
158
159 The summary of a change can be customized as well::
159 The summary of a change can be customized as well::
160
160
161 [histedit]
161 [histedit]
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
162 summary-template = '{rev} {bookmarks} {desc|firstline}'
163
163
164 The customized summary should be kept short enough that rule lines
164 The customized summary should be kept short enough that rule lines
165 will fit in the configured line length. See above if that requires
165 will fit in the configured line length. See above if that requires
166 customization.
166 customization.
167
167
168 ``hg histedit`` attempts to automatically choose an appropriate base
168 ``hg histedit`` attempts to automatically choose an appropriate base
169 revision to use. To change which base revision is used, define a
169 revision to use. To change which base revision is used, define a
170 revset in your configuration file::
170 revset in your configuration file::
171
171
172 [histedit]
172 [histedit]
173 defaultrev = only(.) & draft()
173 defaultrev = only(.) & draft()
174
174
175 By default each edited revision needs to be present in histedit commands.
175 By default each edited revision needs to be present in histedit commands.
176 To remove revision you need to use ``drop`` operation. You can configure
176 To remove revision you need to use ``drop`` operation. You can configure
177 the drop to be implicit for missing commits by adding::
177 the drop to be implicit for missing commits by adding::
178
178
179 [histedit]
179 [histedit]
180 dropmissing = True
180 dropmissing = True
181
181
182 By default, histedit will close the transaction after each action. For
182 By default, histedit will close the transaction after each action. For
183 performance purposes, you can configure histedit to use a single transaction
183 performance purposes, you can configure histedit to use a single transaction
184 across the entire histedit. WARNING: This setting introduces a significant risk
184 across the entire histedit. WARNING: This setting introduces a significant risk
185 of losing the work you've done in a histedit if the histedit aborts
185 of losing the work you've done in a histedit if the histedit aborts
186 unexpectedly::
186 unexpectedly::
187
187
188 [histedit]
188 [histedit]
189 singletransaction = True
189 singletransaction = True
190
190
191 """
191 """
192
192
193
193
194 # chistedit dependencies that are not available everywhere
194 # chistedit dependencies that are not available everywhere
195 try:
195 try:
196 import fcntl
196 import fcntl
197 import termios
197 import termios
198 except ImportError:
198 except ImportError:
199 fcntl = None
199 fcntl = None
200 termios = None
200 termios = None
201
201
202 import binascii
202 import binascii
203 import functools
203 import functools
204 import os
204 import os
205 import pickle
205 import pickle
206 import struct
206 import struct
207
207
208 from mercurial.i18n import _
208 from mercurial.i18n import _
209 from mercurial.pycompat import (
209 from mercurial.pycompat import (
210 getattr,
211 open,
210 open,
212 )
211 )
213 from mercurial.node import (
212 from mercurial.node import (
214 bin,
213 bin,
215 hex,
214 hex,
216 short,
215 short,
217 )
216 )
218 from mercurial import (
217 from mercurial import (
219 bundle2,
218 bundle2,
220 cmdutil,
219 cmdutil,
221 context,
220 context,
222 copies,
221 copies,
223 destutil,
222 destutil,
224 discovery,
223 discovery,
225 encoding,
224 encoding,
226 error,
225 error,
227 exchange,
226 exchange,
228 extensions,
227 extensions,
229 hg,
228 hg,
230 logcmdutil,
229 logcmdutil,
231 merge as mergemod,
230 merge as mergemod,
232 mergestate as mergestatemod,
231 mergestate as mergestatemod,
233 mergeutil,
232 mergeutil,
234 obsolete,
233 obsolete,
235 pycompat,
234 pycompat,
236 registrar,
235 registrar,
237 repair,
236 repair,
238 rewriteutil,
237 rewriteutil,
239 scmutil,
238 scmutil,
240 state as statemod,
239 state as statemod,
241 util,
240 util,
242 )
241 )
243 from mercurial.utils import (
242 from mercurial.utils import (
244 dateutil,
243 dateutil,
245 stringutil,
244 stringutil,
246 urlutil,
245 urlutil,
247 )
246 )
248
247
249 cmdtable = {}
248 cmdtable = {}
250 command = registrar.command(cmdtable)
249 command = registrar.command(cmdtable)
251
250
252 configtable = {}
251 configtable = {}
253 configitem = registrar.configitem(configtable)
252 configitem = registrar.configitem(configtable)
254 configitem(
253 configitem(
255 b'experimental',
254 b'experimental',
256 b'histedit.autoverb',
255 b'histedit.autoverb',
257 default=False,
256 default=False,
258 )
257 )
259 configitem(
258 configitem(
260 b'histedit',
259 b'histedit',
261 b'defaultrev',
260 b'defaultrev',
262 default=None,
261 default=None,
263 )
262 )
264 configitem(
263 configitem(
265 b'histedit',
264 b'histedit',
266 b'dropmissing',
265 b'dropmissing',
267 default=False,
266 default=False,
268 )
267 )
269 configitem(
268 configitem(
270 b'histedit',
269 b'histedit',
271 b'linelen',
270 b'linelen',
272 default=80,
271 default=80,
273 )
272 )
274 configitem(
273 configitem(
275 b'histedit',
274 b'histedit',
276 b'singletransaction',
275 b'singletransaction',
277 default=False,
276 default=False,
278 )
277 )
279 configitem(
278 configitem(
280 b'ui',
279 b'ui',
281 b'interface.histedit',
280 b'interface.histedit',
282 default=None,
281 default=None,
283 )
282 )
284 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
283 configitem(b'histedit', b'summary-template', default=b'{rev} {desc|firstline}')
285 # TODO: Teach the text-based histedit interface to respect this config option
284 # TODO: Teach the text-based histedit interface to respect this config option
286 # before we make it non-experimental.
285 # before we make it non-experimental.
287 configitem(
286 configitem(
288 b'histedit', b'later-commits-first', default=False, experimental=True
287 b'histedit', b'later-commits-first', default=False, experimental=True
289 )
288 )
290
289
291 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
290 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
292 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
291 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
293 # be specifying the version(s) of Mercurial they are tested with, or
292 # be specifying the version(s) of Mercurial they are tested with, or
294 # leave the attribute unspecified.
293 # leave the attribute unspecified.
295 testedwith = b'ships-with-hg-core'
294 testedwith = b'ships-with-hg-core'
296
295
297 actiontable = {}
296 actiontable = {}
298 primaryactions = set()
297 primaryactions = set()
299 secondaryactions = set()
298 secondaryactions = set()
300 tertiaryactions = set()
299 tertiaryactions = set()
301 internalactions = set()
300 internalactions = set()
302
301
303
302
304 def geteditcomment(ui, first, last):
303 def geteditcomment(ui, first, last):
305 """construct the editor comment
304 """construct the editor comment
306 The comment includes::
305 The comment includes::
307 - an intro
306 - an intro
308 - sorted primary commands
307 - sorted primary commands
309 - sorted short commands
308 - sorted short commands
310 - sorted long commands
309 - sorted long commands
311 - additional hints
310 - additional hints
312
311
313 Commands are only included once.
312 Commands are only included once.
314 """
313 """
315 intro = _(
314 intro = _(
316 b"""Edit history between %s and %s
315 b"""Edit history between %s and %s
317
316
318 Commits are listed from least to most recent
317 Commits are listed from least to most recent
319
318
320 You can reorder changesets by reordering the lines
319 You can reorder changesets by reordering the lines
321
320
322 Commands:
321 Commands:
323 """
322 """
324 )
323 )
325 actions = []
324 actions = []
326
325
327 def addverb(v):
326 def addverb(v):
328 a = actiontable[v]
327 a = actiontable[v]
329 lines = a.message.split(b"\n")
328 lines = a.message.split(b"\n")
330 if len(a.verbs):
329 if len(a.verbs):
331 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
330 v = b', '.join(sorted(a.verbs, key=lambda v: len(v)))
332 actions.append(b" %s = %s" % (v, lines[0]))
331 actions.append(b" %s = %s" % (v, lines[0]))
333 actions.extend([b' %s'] * (len(lines) - 1))
332 actions.extend([b' %s'] * (len(lines) - 1))
334
333
335 for v in (
334 for v in (
336 sorted(primaryactions)
335 sorted(primaryactions)
337 + sorted(secondaryactions)
336 + sorted(secondaryactions)
338 + sorted(tertiaryactions)
337 + sorted(tertiaryactions)
339 ):
338 ):
340 addverb(v)
339 addverb(v)
341 actions.append(b'')
340 actions.append(b'')
342
341
343 hints = []
342 hints = []
344 if ui.configbool(b'histedit', b'dropmissing'):
343 if ui.configbool(b'histedit', b'dropmissing'):
345 hints.append(
344 hints.append(
346 b"Deleting a changeset from the list "
345 b"Deleting a changeset from the list "
347 b"will DISCARD it from the edited history!"
346 b"will DISCARD it from the edited history!"
348 )
347 )
349
348
350 lines = (intro % (first, last)).split(b'\n') + actions + hints
349 lines = (intro % (first, last)).split(b'\n') + actions + hints
351
350
352 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
351 return b''.join([b'# %s\n' % l if l else b'#\n' for l in lines])
353
352
354
353
355 class histeditstate:
354 class histeditstate:
356 def __init__(self, repo):
355 def __init__(self, repo):
357 self.repo = repo
356 self.repo = repo
358 self.actions = None
357 self.actions = None
359 self.keep = None
358 self.keep = None
360 self.topmost = None
359 self.topmost = None
361 self.parentctxnode = None
360 self.parentctxnode = None
362 self.lock = None
361 self.lock = None
363 self.wlock = None
362 self.wlock = None
364 self.backupfile = None
363 self.backupfile = None
365 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
364 self.stateobj = statemod.cmdstate(repo, b'histedit-state')
366 self.replacements = []
365 self.replacements = []
367
366
368 def read(self):
367 def read(self):
369 """Load histedit state from disk and set fields appropriately."""
368 """Load histedit state from disk and set fields appropriately."""
370 if not self.stateobj.exists():
369 if not self.stateobj.exists():
371 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
370 cmdutil.wrongtooltocontinue(self.repo, _(b'histedit'))
372
371
373 data = self._read()
372 data = self._read()
374
373
375 self.parentctxnode = data[b'parentctxnode']
374 self.parentctxnode = data[b'parentctxnode']
376 actions = parserules(data[b'rules'], self)
375 actions = parserules(data[b'rules'], self)
377 self.actions = actions
376 self.actions = actions
378 self.keep = data[b'keep']
377 self.keep = data[b'keep']
379 self.topmost = data[b'topmost']
378 self.topmost = data[b'topmost']
380 self.replacements = data[b'replacements']
379 self.replacements = data[b'replacements']
381 self.backupfile = data[b'backupfile']
380 self.backupfile = data[b'backupfile']
382
381
383 def _read(self):
382 def _read(self):
384 fp = self.repo.vfs.read(b'histedit-state')
383 fp = self.repo.vfs.read(b'histedit-state')
385 if fp.startswith(b'v1\n'):
384 if fp.startswith(b'v1\n'):
386 data = self._load()
385 data = self._load()
387 parentctxnode, rules, keep, topmost, replacements, backupfile = data
386 parentctxnode, rules, keep, topmost, replacements, backupfile = data
388 else:
387 else:
389 data = pickle.loads(fp)
388 data = pickle.loads(fp)
390 parentctxnode, rules, keep, topmost, replacements = data
389 parentctxnode, rules, keep, topmost, replacements = data
391 backupfile = None
390 backupfile = None
392 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
391 rules = b"\n".join([b"%s %s" % (verb, rest) for [verb, rest] in rules])
393
392
394 return {
393 return {
395 b'parentctxnode': parentctxnode,
394 b'parentctxnode': parentctxnode,
396 b"rules": rules,
395 b"rules": rules,
397 b"keep": keep,
396 b"keep": keep,
398 b"topmost": topmost,
397 b"topmost": topmost,
399 b"replacements": replacements,
398 b"replacements": replacements,
400 b"backupfile": backupfile,
399 b"backupfile": backupfile,
401 }
400 }
402
401
403 def write(self, tr=None):
402 def write(self, tr=None):
404 if tr:
403 if tr:
405 tr.addfilegenerator(
404 tr.addfilegenerator(
406 b'histedit-state',
405 b'histedit-state',
407 (b'histedit-state',),
406 (b'histedit-state',),
408 self._write,
407 self._write,
409 location=b'plain',
408 location=b'plain',
410 )
409 )
411 else:
410 else:
412 with self.repo.vfs(b"histedit-state", b"w") as f:
411 with self.repo.vfs(b"histedit-state", b"w") as f:
413 self._write(f)
412 self._write(f)
414
413
415 def _write(self, fp):
414 def _write(self, fp):
416 fp.write(b'v1\n')
415 fp.write(b'v1\n')
417 fp.write(b'%s\n' % hex(self.parentctxnode))
416 fp.write(b'%s\n' % hex(self.parentctxnode))
418 fp.write(b'%s\n' % hex(self.topmost))
417 fp.write(b'%s\n' % hex(self.topmost))
419 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
418 fp.write(b'%s\n' % (b'True' if self.keep else b'False'))
420 fp.write(b'%d\n' % len(self.actions))
419 fp.write(b'%d\n' % len(self.actions))
421 for action in self.actions:
420 for action in self.actions:
422 fp.write(b'%s\n' % action.tostate())
421 fp.write(b'%s\n' % action.tostate())
423 fp.write(b'%d\n' % len(self.replacements))
422 fp.write(b'%d\n' % len(self.replacements))
424 for replacement in self.replacements:
423 for replacement in self.replacements:
425 fp.write(
424 fp.write(
426 b'%s%s\n'
425 b'%s%s\n'
427 % (
426 % (
428 hex(replacement[0]),
427 hex(replacement[0]),
429 b''.join(hex(r) for r in replacement[1]),
428 b''.join(hex(r) for r in replacement[1]),
430 )
429 )
431 )
430 )
432 backupfile = self.backupfile
431 backupfile = self.backupfile
433 if not backupfile:
432 if not backupfile:
434 backupfile = b''
433 backupfile = b''
435 fp.write(b'%s\n' % backupfile)
434 fp.write(b'%s\n' % backupfile)
436
435
437 def _load(self):
436 def _load(self):
438 fp = self.repo.vfs(b'histedit-state', b'r')
437 fp = self.repo.vfs(b'histedit-state', b'r')
439 lines = [l[:-1] for l in fp.readlines()]
438 lines = [l[:-1] for l in fp.readlines()]
440
439
441 index = 0
440 index = 0
442 lines[index] # version number
441 lines[index] # version number
443 index += 1
442 index += 1
444
443
445 parentctxnode = bin(lines[index])
444 parentctxnode = bin(lines[index])
446 index += 1
445 index += 1
447
446
448 topmost = bin(lines[index])
447 topmost = bin(lines[index])
449 index += 1
448 index += 1
450
449
451 keep = lines[index] == b'True'
450 keep = lines[index] == b'True'
452 index += 1
451 index += 1
453
452
454 # Rules
453 # Rules
455 rules = []
454 rules = []
456 rulelen = int(lines[index])
455 rulelen = int(lines[index])
457 index += 1
456 index += 1
458 for i in range(rulelen):
457 for i in range(rulelen):
459 ruleaction = lines[index]
458 ruleaction = lines[index]
460 index += 1
459 index += 1
461 rule = lines[index]
460 rule = lines[index]
462 index += 1
461 index += 1
463 rules.append((ruleaction, rule))
462 rules.append((ruleaction, rule))
464
463
465 # Replacements
464 # Replacements
466 replacements = []
465 replacements = []
467 replacementlen = int(lines[index])
466 replacementlen = int(lines[index])
468 index += 1
467 index += 1
469 for i in range(replacementlen):
468 for i in range(replacementlen):
470 replacement = lines[index]
469 replacement = lines[index]
471 original = bin(replacement[:40])
470 original = bin(replacement[:40])
472 succ = [
471 succ = [
473 bin(replacement[i : i + 40])
472 bin(replacement[i : i + 40])
474 for i in range(40, len(replacement), 40)
473 for i in range(40, len(replacement), 40)
475 ]
474 ]
476 replacements.append((original, succ))
475 replacements.append((original, succ))
477 index += 1
476 index += 1
478
477
479 backupfile = lines[index]
478 backupfile = lines[index]
480 index += 1
479 index += 1
481
480
482 fp.close()
481 fp.close()
483
482
484 return parentctxnode, rules, keep, topmost, replacements, backupfile
483 return parentctxnode, rules, keep, topmost, replacements, backupfile
485
484
486 def clear(self):
485 def clear(self):
487 if self.inprogress():
486 if self.inprogress():
488 self.repo.vfs.unlink(b'histedit-state')
487 self.repo.vfs.unlink(b'histedit-state')
489
488
490 def inprogress(self):
489 def inprogress(self):
491 return self.repo.vfs.exists(b'histedit-state')
490 return self.repo.vfs.exists(b'histedit-state')
492
491
493
492
494 class histeditaction:
493 class histeditaction:
495 def __init__(self, state, node):
494 def __init__(self, state, node):
496 self.state = state
495 self.state = state
497 self.repo = state.repo
496 self.repo = state.repo
498 self.node = node
497 self.node = node
499
498
500 @classmethod
499 @classmethod
501 def fromrule(cls, state, rule):
500 def fromrule(cls, state, rule):
502 """Parses the given rule, returning an instance of the histeditaction."""
501 """Parses the given rule, returning an instance of the histeditaction."""
503 ruleid = rule.strip().split(b' ', 1)[0]
502 ruleid = rule.strip().split(b' ', 1)[0]
504 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
503 # ruleid can be anything from rev numbers, hashes, "bookmarks" etc
505 # Check for validation of rule ids and get the rulehash
504 # Check for validation of rule ids and get the rulehash
506 try:
505 try:
507 rev = bin(ruleid)
506 rev = bin(ruleid)
508 except binascii.Error:
507 except binascii.Error:
509 try:
508 try:
510 _ctx = scmutil.revsingle(state.repo, ruleid)
509 _ctx = scmutil.revsingle(state.repo, ruleid)
511 rulehash = _ctx.hex()
510 rulehash = _ctx.hex()
512 rev = bin(rulehash)
511 rev = bin(rulehash)
513 except error.RepoLookupError:
512 except error.RepoLookupError:
514 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
513 raise error.ParseError(_(b"invalid changeset %s") % ruleid)
515 return cls(state, rev)
514 return cls(state, rev)
516
515
517 def verify(self, prev, expected, seen):
516 def verify(self, prev, expected, seen):
518 """Verifies semantic correctness of the rule"""
517 """Verifies semantic correctness of the rule"""
519 repo = self.repo
518 repo = self.repo
520 ha = hex(self.node)
519 ha = hex(self.node)
521 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
520 self.node = scmutil.resolvehexnodeidprefix(repo, ha)
522 if self.node is None:
521 if self.node is None:
523 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
522 raise error.ParseError(_(b'unknown changeset %s listed') % ha[:12])
524 self._verifynodeconstraints(prev, expected, seen)
523 self._verifynodeconstraints(prev, expected, seen)
525
524
526 def _verifynodeconstraints(self, prev, expected, seen):
525 def _verifynodeconstraints(self, prev, expected, seen):
527 # by default command need a node in the edited list
526 # by default command need a node in the edited list
528 if self.node not in expected:
527 if self.node not in expected:
529 raise error.ParseError(
528 raise error.ParseError(
530 _(b'%s "%s" changeset was not a candidate')
529 _(b'%s "%s" changeset was not a candidate')
531 % (self.verb, short(self.node)),
530 % (self.verb, short(self.node)),
532 hint=_(b'only use listed changesets'),
531 hint=_(b'only use listed changesets'),
533 )
532 )
534 # and only one command per node
533 # and only one command per node
535 if self.node in seen:
534 if self.node in seen:
536 raise error.ParseError(
535 raise error.ParseError(
537 _(b'duplicated command for changeset %s') % short(self.node)
536 _(b'duplicated command for changeset %s') % short(self.node)
538 )
537 )
539
538
540 def torule(self):
539 def torule(self):
541 """build a histedit rule line for an action
540 """build a histedit rule line for an action
542
541
543 by default lines are in the form:
542 by default lines are in the form:
544 <hash> <rev> <summary>
543 <hash> <rev> <summary>
545 """
544 """
546 ctx = self.repo[self.node]
545 ctx = self.repo[self.node]
547 ui = self.repo.ui
546 ui = self.repo.ui
548 # We don't want color codes in the commit message template, so
547 # We don't want color codes in the commit message template, so
549 # disable the label() template function while we render it.
548 # disable the label() template function while we render it.
550 with ui.configoverride(
549 with ui.configoverride(
551 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
550 {(b'templatealias', b'label(l,x)'): b"x"}, b'histedit'
552 ):
551 ):
553 summary = cmdutil.rendertemplate(
552 summary = cmdutil.rendertemplate(
554 ctx, ui.config(b'histedit', b'summary-template')
553 ctx, ui.config(b'histedit', b'summary-template')
555 )
554 )
556 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
555 line = b'%s %s %s' % (self.verb, ctx, stringutil.firstline(summary))
557 # trim to 75 columns by default so it's not stupidly wide in my editor
556 # trim to 75 columns by default so it's not stupidly wide in my editor
558 # (the 5 more are left for verb)
557 # (the 5 more are left for verb)
559 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
558 maxlen = self.repo.ui.configint(b'histedit', b'linelen')
560 maxlen = max(maxlen, 22) # avoid truncating hash
559 maxlen = max(maxlen, 22) # avoid truncating hash
561 return stringutil.ellipsis(line, maxlen)
560 return stringutil.ellipsis(line, maxlen)
562
561
563 def tostate(self):
562 def tostate(self):
564 """Print an action in format used by histedit state files
563 """Print an action in format used by histedit state files
565 (the first line is a verb, the remainder is the second)
564 (the first line is a verb, the remainder is the second)
566 """
565 """
567 return b"%s\n%s" % (self.verb, hex(self.node))
566 return b"%s\n%s" % (self.verb, hex(self.node))
568
567
569 def run(self):
568 def run(self):
570 """Runs the action. The default behavior is simply apply the action's
569 """Runs the action. The default behavior is simply apply the action's
571 rulectx onto the current parentctx."""
570 rulectx onto the current parentctx."""
572 self.applychange()
571 self.applychange()
573 self.continuedirty()
572 self.continuedirty()
574 return self.continueclean()
573 return self.continueclean()
575
574
576 def applychange(self):
575 def applychange(self):
577 """Applies the changes from this action's rulectx onto the current
576 """Applies the changes from this action's rulectx onto the current
578 parentctx, but does not commit them."""
577 parentctx, but does not commit them."""
579 repo = self.repo
578 repo = self.repo
580 rulectx = repo[self.node]
579 rulectx = repo[self.node]
581 with repo.ui.silent():
580 with repo.ui.silent():
582 hg.update(repo, self.state.parentctxnode, quietempty=True)
581 hg.update(repo, self.state.parentctxnode, quietempty=True)
583 stats = applychanges(repo.ui, repo, rulectx, {})
582 stats = applychanges(repo.ui, repo, rulectx, {})
584 repo.dirstate.setbranch(rulectx.branch(), repo.currenttransaction())
583 repo.dirstate.setbranch(rulectx.branch(), repo.currenttransaction())
585 if stats.unresolvedcount:
584 if stats.unresolvedcount:
586 raise error.InterventionRequired(
585 raise error.InterventionRequired(
587 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
586 _(b'Fix up the change (%s %s)') % (self.verb, short(self.node)),
588 hint=_(b'hg histedit --continue to resume'),
587 hint=_(b'hg histedit --continue to resume'),
589 )
588 )
590
589
591 def continuedirty(self):
590 def continuedirty(self):
592 """Continues the action when changes have been applied to the working
591 """Continues the action when changes have been applied to the working
593 copy. The default behavior is to commit the dirty changes."""
592 copy. The default behavior is to commit the dirty changes."""
594 repo = self.repo
593 repo = self.repo
595 rulectx = repo[self.node]
594 rulectx = repo[self.node]
596
595
597 editor = self.commiteditor()
596 editor = self.commiteditor()
598 commit = commitfuncfor(repo, rulectx)
597 commit = commitfuncfor(repo, rulectx)
599 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
598 if repo.ui.configbool(b'rewrite', b'update-timestamp'):
600 date = dateutil.makedate()
599 date = dateutil.makedate()
601 else:
600 else:
602 date = rulectx.date()
601 date = rulectx.date()
603 commit(
602 commit(
604 text=rulectx.description(),
603 text=rulectx.description(),
605 user=rulectx.user(),
604 user=rulectx.user(),
606 date=date,
605 date=date,
607 extra=rulectx.extra(),
606 extra=rulectx.extra(),
608 editor=editor,
607 editor=editor,
609 )
608 )
610
609
611 def commiteditor(self):
610 def commiteditor(self):
612 """The editor to be used to edit the commit message."""
611 """The editor to be used to edit the commit message."""
613 return False
612 return False
614
613
615 def continueclean(self):
614 def continueclean(self):
616 """Continues the action when the working copy is clean. The default
615 """Continues the action when the working copy is clean. The default
617 behavior is to accept the current commit as the new version of the
616 behavior is to accept the current commit as the new version of the
618 rulectx."""
617 rulectx."""
619 ctx = self.repo[b'.']
618 ctx = self.repo[b'.']
620 if ctx.node() == self.state.parentctxnode:
619 if ctx.node() == self.state.parentctxnode:
621 self.repo.ui.warn(
620 self.repo.ui.warn(
622 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
621 _(b'%s: skipping changeset (no changes)\n') % short(self.node)
623 )
622 )
624 return ctx, [(self.node, tuple())]
623 return ctx, [(self.node, tuple())]
625 if ctx.node() == self.node:
624 if ctx.node() == self.node:
626 # Nothing changed
625 # Nothing changed
627 return ctx, []
626 return ctx, []
628 return ctx, [(self.node, (ctx.node(),))]
627 return ctx, [(self.node, (ctx.node(),))]
629
628
630
629
631 def commitfuncfor(repo, src):
630 def commitfuncfor(repo, src):
632 """Build a commit function for the replacement of <src>
631 """Build a commit function for the replacement of <src>
633
632
634 This function ensure we apply the same treatment to all changesets.
633 This function ensure we apply the same treatment to all changesets.
635
634
636 - Add a 'histedit_source' entry in extra.
635 - Add a 'histedit_source' entry in extra.
637
636
638 Note that fold has its own separated logic because its handling is a bit
637 Note that fold has its own separated logic because its handling is a bit
639 different and not easily factored out of the fold method.
638 different and not easily factored out of the fold method.
640 """
639 """
641 phasemin = src.phase()
640 phasemin = src.phase()
642
641
643 def commitfunc(**kwargs):
642 def commitfunc(**kwargs):
644 overrides = {(b'phases', b'new-commit'): phasemin}
643 overrides = {(b'phases', b'new-commit'): phasemin}
645 with repo.ui.configoverride(overrides, b'histedit'):
644 with repo.ui.configoverride(overrides, b'histedit'):
646 extra = kwargs.get('extra', {}).copy()
645 extra = kwargs.get('extra', {}).copy()
647 extra[b'histedit_source'] = src.hex()
646 extra[b'histedit_source'] = src.hex()
648 kwargs['extra'] = extra
647 kwargs['extra'] = extra
649 return repo.commit(**kwargs)
648 return repo.commit(**kwargs)
650
649
651 return commitfunc
650 return commitfunc
652
651
653
652
654 def applychanges(ui, repo, ctx, opts):
653 def applychanges(ui, repo, ctx, opts):
655 """Merge changeset from ctx (only) in the current working directory"""
654 """Merge changeset from ctx (only) in the current working directory"""
656 if ctx.p1().node() == repo.dirstate.p1():
655 if ctx.p1().node() == repo.dirstate.p1():
657 # edits are "in place" we do not need to make any merge,
656 # edits are "in place" we do not need to make any merge,
658 # just applies changes on parent for editing
657 # just applies changes on parent for editing
659 with ui.silent():
658 with ui.silent():
660 cmdutil.revert(ui, repo, ctx, all=True)
659 cmdutil.revert(ui, repo, ctx, all=True)
661 stats = mergemod.updateresult(0, 0, 0, 0)
660 stats = mergemod.updateresult(0, 0, 0, 0)
662 else:
661 else:
663 try:
662 try:
664 # ui.forcemerge is an internal variable, do not document
663 # ui.forcemerge is an internal variable, do not document
665 repo.ui.setconfig(
664 repo.ui.setconfig(
666 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
665 b'ui', b'forcemerge', opts.get(b'tool', b''), b'histedit'
667 )
666 )
668 stats = mergemod.graft(
667 stats = mergemod.graft(
669 repo,
668 repo,
670 ctx,
669 ctx,
671 labels=[
670 labels=[
672 b'already edited',
671 b'already edited',
673 b'current change',
672 b'current change',
674 b'parent of current change',
673 b'parent of current change',
675 ],
674 ],
676 )
675 )
677 finally:
676 finally:
678 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
677 repo.ui.setconfig(b'ui', b'forcemerge', b'', b'histedit')
679 return stats
678 return stats
680
679
681
680
682 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
681 def collapse(repo, firstctx, lastctx, commitopts, skipprompt=False):
683 """collapse the set of revisions from first to last as new one.
682 """collapse the set of revisions from first to last as new one.
684
683
685 Expected commit options are:
684 Expected commit options are:
686 - message
685 - message
687 - date
686 - date
688 - username
687 - username
689 Commit message is edited in all cases.
688 Commit message is edited in all cases.
690
689
691 This function works in memory."""
690 This function works in memory."""
692 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
691 ctxs = list(repo.set(b'%d::%d', firstctx.rev(), lastctx.rev()))
693 if not ctxs:
692 if not ctxs:
694 return None
693 return None
695 for c in ctxs:
694 for c in ctxs:
696 if not c.mutable():
695 if not c.mutable():
697 raise error.ParseError(
696 raise error.ParseError(
698 _(b"cannot fold into public change %s") % short(c.node())
697 _(b"cannot fold into public change %s") % short(c.node())
699 )
698 )
700 base = firstctx.p1()
699 base = firstctx.p1()
701
700
702 # commit a new version of the old changeset, including the update
701 # commit a new version of the old changeset, including the update
703 # collect all files which might be affected
702 # collect all files which might be affected
704 files = set()
703 files = set()
705 for ctx in ctxs:
704 for ctx in ctxs:
706 files.update(ctx.files())
705 files.update(ctx.files())
707
706
708 # Recompute copies (avoid recording a -> b -> a)
707 # Recompute copies (avoid recording a -> b -> a)
709 copied = copies.pathcopies(base, lastctx)
708 copied = copies.pathcopies(base, lastctx)
710
709
711 # prune files which were reverted by the updates
710 # prune files which were reverted by the updates
712 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
711 files = [f for f in files if not cmdutil.samefile(f, lastctx, base)]
713 # commit version of these files as defined by head
712 # commit version of these files as defined by head
714 headmf = lastctx.manifest()
713 headmf = lastctx.manifest()
715
714
716 def filectxfn(repo, ctx, path):
715 def filectxfn(repo, ctx, path):
717 if path in headmf:
716 if path in headmf:
718 fctx = lastctx[path]
717 fctx = lastctx[path]
719 flags = fctx.flags()
718 flags = fctx.flags()
720 mctx = context.memfilectx(
719 mctx = context.memfilectx(
721 repo,
720 repo,
722 ctx,
721 ctx,
723 fctx.path(),
722 fctx.path(),
724 fctx.data(),
723 fctx.data(),
725 islink=b'l' in flags,
724 islink=b'l' in flags,
726 isexec=b'x' in flags,
725 isexec=b'x' in flags,
727 copysource=copied.get(path),
726 copysource=copied.get(path),
728 )
727 )
729 return mctx
728 return mctx
730 return None
729 return None
731
730
732 if commitopts.get(b'message'):
731 if commitopts.get(b'message'):
733 message = commitopts[b'message']
732 message = commitopts[b'message']
734 else:
733 else:
735 message = firstctx.description()
734 message = firstctx.description()
736 user = commitopts.get(b'user')
735 user = commitopts.get(b'user')
737 date = commitopts.get(b'date')
736 date = commitopts.get(b'date')
738 extra = commitopts.get(b'extra')
737 extra = commitopts.get(b'extra')
739
738
740 parents = (firstctx.p1().node(), firstctx.p2().node())
739 parents = (firstctx.p1().node(), firstctx.p2().node())
741 editor = None
740 editor = None
742 if not skipprompt:
741 if not skipprompt:
743 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
742 editor = cmdutil.getcommiteditor(edit=True, editform=b'histedit.fold')
744 new = context.memctx(
743 new = context.memctx(
745 repo,
744 repo,
746 parents=parents,
745 parents=parents,
747 text=message,
746 text=message,
748 files=files,
747 files=files,
749 filectxfn=filectxfn,
748 filectxfn=filectxfn,
750 user=user,
749 user=user,
751 date=date,
750 date=date,
752 extra=extra,
751 extra=extra,
753 editor=editor,
752 editor=editor,
754 )
753 )
755 return repo.commitctx(new)
754 return repo.commitctx(new)
756
755
757
756
758 def _isdirtywc(repo):
757 def _isdirtywc(repo):
759 return repo[None].dirty(missing=True)
758 return repo[None].dirty(missing=True)
760
759
761
760
762 def abortdirty():
761 def abortdirty():
763 raise error.StateError(
762 raise error.StateError(
764 _(b'working copy has pending changes'),
763 _(b'working copy has pending changes'),
765 hint=_(
764 hint=_(
766 b'amend, commit, or revert them and run histedit '
765 b'amend, commit, or revert them and run histedit '
767 b'--continue, or abort with histedit --abort'
766 b'--continue, or abort with histedit --abort'
768 ),
767 ),
769 )
768 )
770
769
771
770
772 def action(verbs, message, priority=False, internal=False):
771 def action(verbs, message, priority=False, internal=False):
773 def wrap(cls):
772 def wrap(cls):
774 assert not priority or not internal
773 assert not priority or not internal
775 verb = verbs[0]
774 verb = verbs[0]
776 if priority:
775 if priority:
777 primaryactions.add(verb)
776 primaryactions.add(verb)
778 elif internal:
777 elif internal:
779 internalactions.add(verb)
778 internalactions.add(verb)
780 elif len(verbs) > 1:
779 elif len(verbs) > 1:
781 secondaryactions.add(verb)
780 secondaryactions.add(verb)
782 else:
781 else:
783 tertiaryactions.add(verb)
782 tertiaryactions.add(verb)
784
783
785 cls.verb = verb
784 cls.verb = verb
786 cls.verbs = verbs
785 cls.verbs = verbs
787 cls.message = message
786 cls.message = message
788 for verb in verbs:
787 for verb in verbs:
789 actiontable[verb] = cls
788 actiontable[verb] = cls
790 return cls
789 return cls
791
790
792 return wrap
791 return wrap
793
792
794
793
795 @action([b'pick', b'p'], _(b'use commit'), priority=True)
794 @action([b'pick', b'p'], _(b'use commit'), priority=True)
796 class pick(histeditaction):
795 class pick(histeditaction):
797 def run(self):
796 def run(self):
798 rulectx = self.repo[self.node]
797 rulectx = self.repo[self.node]
799 if rulectx.p1().node() == self.state.parentctxnode:
798 if rulectx.p1().node() == self.state.parentctxnode:
800 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
799 self.repo.ui.debug(b'node %s unchanged\n' % short(self.node))
801 return rulectx, []
800 return rulectx, []
802
801
803 return super(pick, self).run()
802 return super(pick, self).run()
804
803
805
804
806 @action(
805 @action(
807 [b'edit', b'e'],
806 [b'edit', b'e'],
808 _(b'use commit, but allow edits before making new commit'),
807 _(b'use commit, but allow edits before making new commit'),
809 priority=True,
808 priority=True,
810 )
809 )
811 class edit(histeditaction):
810 class edit(histeditaction):
812 def run(self):
811 def run(self):
813 repo = self.repo
812 repo = self.repo
814 rulectx = repo[self.node]
813 rulectx = repo[self.node]
815 hg.update(repo, self.state.parentctxnode, quietempty=True)
814 hg.update(repo, self.state.parentctxnode, quietempty=True)
816 applychanges(repo.ui, repo, rulectx, {})
815 applychanges(repo.ui, repo, rulectx, {})
817 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
816 hint = _(b'to edit %s, `hg histedit --continue` after making changes')
818 raise error.InterventionRequired(
817 raise error.InterventionRequired(
819 _(b'Editing (%s), commit as needed now to split the change')
818 _(b'Editing (%s), commit as needed now to split the change')
820 % short(self.node),
819 % short(self.node),
821 hint=hint % short(self.node),
820 hint=hint % short(self.node),
822 )
821 )
823
822
824 def commiteditor(self):
823 def commiteditor(self):
825 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
824 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.edit')
826
825
827
826
828 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
827 @action([b'fold', b'f'], _(b'use commit, but combine it with the one above'))
829 class fold(histeditaction):
828 class fold(histeditaction):
830 def verify(self, prev, expected, seen):
829 def verify(self, prev, expected, seen):
831 """Verifies semantic correctness of the fold rule"""
830 """Verifies semantic correctness of the fold rule"""
832 super(fold, self).verify(prev, expected, seen)
831 super(fold, self).verify(prev, expected, seen)
833 repo = self.repo
832 repo = self.repo
834 if not prev:
833 if not prev:
835 c = repo[self.node].p1()
834 c = repo[self.node].p1()
836 elif not prev.verb in (b'pick', b'base'):
835 elif not prev.verb in (b'pick', b'base'):
837 return
836 return
838 else:
837 else:
839 c = repo[prev.node]
838 c = repo[prev.node]
840 if not c.mutable():
839 if not c.mutable():
841 raise error.ParseError(
840 raise error.ParseError(
842 _(b"cannot fold into public change %s") % short(c.node())
841 _(b"cannot fold into public change %s") % short(c.node())
843 )
842 )
844
843
845 def continuedirty(self):
844 def continuedirty(self):
846 repo = self.repo
845 repo = self.repo
847 rulectx = repo[self.node]
846 rulectx = repo[self.node]
848
847
849 commit = commitfuncfor(repo, rulectx)
848 commit = commitfuncfor(repo, rulectx)
850 commit(
849 commit(
851 text=b'fold-temp-revision %s' % short(self.node),
850 text=b'fold-temp-revision %s' % short(self.node),
852 user=rulectx.user(),
851 user=rulectx.user(),
853 date=rulectx.date(),
852 date=rulectx.date(),
854 extra=rulectx.extra(),
853 extra=rulectx.extra(),
855 )
854 )
856
855
857 def continueclean(self):
856 def continueclean(self):
858 repo = self.repo
857 repo = self.repo
859 ctx = repo[b'.']
858 ctx = repo[b'.']
860 rulectx = repo[self.node]
859 rulectx = repo[self.node]
861 parentctxnode = self.state.parentctxnode
860 parentctxnode = self.state.parentctxnode
862 if ctx.node() == parentctxnode:
861 if ctx.node() == parentctxnode:
863 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
862 repo.ui.warn(_(b'%s: empty changeset\n') % short(self.node))
864 return ctx, [(self.node, (parentctxnode,))]
863 return ctx, [(self.node, (parentctxnode,))]
865
864
866 parentctx = repo[parentctxnode]
865 parentctx = repo[parentctxnode]
867 newcommits = {
866 newcommits = {
868 c.node()
867 c.node()
869 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
868 for c in repo.set(b'(%d::. - %d)', parentctx.rev(), parentctx.rev())
870 }
869 }
871 if not newcommits:
870 if not newcommits:
872 repo.ui.warn(
871 repo.ui.warn(
873 _(
872 _(
874 b'%s: cannot fold - working copy is not a '
873 b'%s: cannot fold - working copy is not a '
875 b'descendant of previous commit %s\n'
874 b'descendant of previous commit %s\n'
876 )
875 )
877 % (short(self.node), short(parentctxnode))
876 % (short(self.node), short(parentctxnode))
878 )
877 )
879 return ctx, [(self.node, (ctx.node(),))]
878 return ctx, [(self.node, (ctx.node(),))]
880
879
881 middlecommits = newcommits.copy()
880 middlecommits = newcommits.copy()
882 middlecommits.discard(ctx.node())
881 middlecommits.discard(ctx.node())
883
882
884 return self.finishfold(
883 return self.finishfold(
885 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
884 repo.ui, repo, parentctx, rulectx, ctx.node(), middlecommits
886 )
885 )
887
886
888 def skipprompt(self):
887 def skipprompt(self):
889 """Returns true if the rule should skip the message editor.
888 """Returns true if the rule should skip the message editor.
890
889
891 For example, 'fold' wants to show an editor, but 'rollup'
890 For example, 'fold' wants to show an editor, but 'rollup'
892 doesn't want to.
891 doesn't want to.
893 """
892 """
894 return False
893 return False
895
894
896 def mergedescs(self):
895 def mergedescs(self):
897 """Returns true if the rule should merge messages of multiple changes.
896 """Returns true if the rule should merge messages of multiple changes.
898
897
899 This exists mainly so that 'rollup' rules can be a subclass of
898 This exists mainly so that 'rollup' rules can be a subclass of
900 'fold'.
899 'fold'.
901 """
900 """
902 return True
901 return True
903
902
904 def firstdate(self):
903 def firstdate(self):
905 """Returns true if the rule should preserve the date of the first
904 """Returns true if the rule should preserve the date of the first
906 change.
905 change.
907
906
908 This exists mainly so that 'rollup' rules can be a subclass of
907 This exists mainly so that 'rollup' rules can be a subclass of
909 'fold'.
908 'fold'.
910 """
909 """
911 return False
910 return False
912
911
913 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
912 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
914 mergemod.update(ctx.p1())
913 mergemod.update(ctx.p1())
915 ### prepare new commit data
914 ### prepare new commit data
916 commitopts = {}
915 commitopts = {}
917 commitopts[b'user'] = ctx.user()
916 commitopts[b'user'] = ctx.user()
918 # commit message
917 # commit message
919 if not self.mergedescs():
918 if not self.mergedescs():
920 newmessage = ctx.description()
919 newmessage = ctx.description()
921 else:
920 else:
922 newmessage = (
921 newmessage = (
923 b'\n***\n'.join(
922 b'\n***\n'.join(
924 [ctx.description()]
923 [ctx.description()]
925 + [repo[r].description() for r in internalchanges]
924 + [repo[r].description() for r in internalchanges]
926 + [oldctx.description()]
925 + [oldctx.description()]
927 )
926 )
928 + b'\n'
927 + b'\n'
929 )
928 )
930 commitopts[b'message'] = newmessage
929 commitopts[b'message'] = newmessage
931 # date
930 # date
932 if self.firstdate():
931 if self.firstdate():
933 commitopts[b'date'] = ctx.date()
932 commitopts[b'date'] = ctx.date()
934 else:
933 else:
935 commitopts[b'date'] = max(ctx.date(), oldctx.date())
934 commitopts[b'date'] = max(ctx.date(), oldctx.date())
936 # if date is to be updated to current
935 # if date is to be updated to current
937 if ui.configbool(b'rewrite', b'update-timestamp'):
936 if ui.configbool(b'rewrite', b'update-timestamp'):
938 commitopts[b'date'] = dateutil.makedate()
937 commitopts[b'date'] = dateutil.makedate()
939
938
940 extra = ctx.extra().copy()
939 extra = ctx.extra().copy()
941 # histedit_source
940 # histedit_source
942 # note: ctx is likely a temporary commit but that the best we can do
941 # note: ctx is likely a temporary commit but that the best we can do
943 # here. This is sufficient to solve issue3681 anyway.
942 # here. This is sufficient to solve issue3681 anyway.
944 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
943 extra[b'histedit_source'] = b'%s,%s' % (ctx.hex(), oldctx.hex())
945 commitopts[b'extra'] = extra
944 commitopts[b'extra'] = extra
946 phasemin = max(ctx.phase(), oldctx.phase())
945 phasemin = max(ctx.phase(), oldctx.phase())
947 overrides = {(b'phases', b'new-commit'): phasemin}
946 overrides = {(b'phases', b'new-commit'): phasemin}
948 with repo.ui.configoverride(overrides, b'histedit'):
947 with repo.ui.configoverride(overrides, b'histedit'):
949 n = collapse(
948 n = collapse(
950 repo,
949 repo,
951 ctx,
950 ctx,
952 repo[newnode],
951 repo[newnode],
953 commitopts,
952 commitopts,
954 skipprompt=self.skipprompt(),
953 skipprompt=self.skipprompt(),
955 )
954 )
956 if n is None:
955 if n is None:
957 return ctx, []
956 return ctx, []
958 mergemod.update(repo[n])
957 mergemod.update(repo[n])
959 replacements = [
958 replacements = [
960 (oldctx.node(), (newnode,)),
959 (oldctx.node(), (newnode,)),
961 (ctx.node(), (n,)),
960 (ctx.node(), (n,)),
962 (newnode, (n,)),
961 (newnode, (n,)),
963 ]
962 ]
964 for ich in internalchanges:
963 for ich in internalchanges:
965 replacements.append((ich, (n,)))
964 replacements.append((ich, (n,)))
966 return repo[n], replacements
965 return repo[n], replacements
967
966
968
967
969 @action(
968 @action(
970 [b'base', b'b'],
969 [b'base', b'b'],
971 _(b'checkout changeset and apply further changesets from there'),
970 _(b'checkout changeset and apply further changesets from there'),
972 )
971 )
973 class base(histeditaction):
972 class base(histeditaction):
974 def run(self):
973 def run(self):
975 if self.repo[b'.'].node() != self.node:
974 if self.repo[b'.'].node() != self.node:
976 mergemod.clean_update(self.repo[self.node])
975 mergemod.clean_update(self.repo[self.node])
977 return self.continueclean()
976 return self.continueclean()
978
977
979 def continuedirty(self):
978 def continuedirty(self):
980 abortdirty()
979 abortdirty()
981
980
982 def continueclean(self):
981 def continueclean(self):
983 basectx = self.repo[b'.']
982 basectx = self.repo[b'.']
984 return basectx, []
983 return basectx, []
985
984
986 def _verifynodeconstraints(self, prev, expected, seen):
985 def _verifynodeconstraints(self, prev, expected, seen):
987 # base can only be use with a node not in the edited set
986 # base can only be use with a node not in the edited set
988 if self.node in expected:
987 if self.node in expected:
989 msg = _(b'%s "%s" changeset was an edited list candidate')
988 msg = _(b'%s "%s" changeset was an edited list candidate')
990 raise error.ParseError(
989 raise error.ParseError(
991 msg % (self.verb, short(self.node)),
990 msg % (self.verb, short(self.node)),
992 hint=_(b'base must only use unlisted changesets'),
991 hint=_(b'base must only use unlisted changesets'),
993 )
992 )
994
993
995
994
996 @action(
995 @action(
997 [b'_multifold'],
996 [b'_multifold'],
998 _(
997 _(
999 b"""fold subclass used for when multiple folds happen in a row
998 b"""fold subclass used for when multiple folds happen in a row
1000
999
1001 We only want to fire the editor for the folded message once when
1000 We only want to fire the editor for the folded message once when
1002 (say) four changes are folded down into a single change. This is
1001 (say) four changes are folded down into a single change. This is
1003 similar to rollup, but we should preserve both messages so that
1002 similar to rollup, but we should preserve both messages so that
1004 when the last fold operation runs we can show the user all the
1003 when the last fold operation runs we can show the user all the
1005 commit messages in their editor.
1004 commit messages in their editor.
1006 """
1005 """
1007 ),
1006 ),
1008 internal=True,
1007 internal=True,
1009 )
1008 )
1010 class _multifold(fold):
1009 class _multifold(fold):
1011 def skipprompt(self):
1010 def skipprompt(self):
1012 return True
1011 return True
1013
1012
1014
1013
1015 @action(
1014 @action(
1016 [b"roll", b"r"],
1015 [b"roll", b"r"],
1017 _(b"like fold, but discard this commit's description and date"),
1016 _(b"like fold, but discard this commit's description and date"),
1018 )
1017 )
1019 class rollup(fold):
1018 class rollup(fold):
1020 def mergedescs(self):
1019 def mergedescs(self):
1021 return False
1020 return False
1022
1021
1023 def skipprompt(self):
1022 def skipprompt(self):
1024 return True
1023 return True
1025
1024
1026 def firstdate(self):
1025 def firstdate(self):
1027 return True
1026 return True
1028
1027
1029
1028
1030 @action([b"drop", b"d"], _(b'remove commit from history'))
1029 @action([b"drop", b"d"], _(b'remove commit from history'))
1031 class drop(histeditaction):
1030 class drop(histeditaction):
1032 def run(self):
1031 def run(self):
1033 parentctx = self.repo[self.state.parentctxnode]
1032 parentctx = self.repo[self.state.parentctxnode]
1034 return parentctx, [(self.node, tuple())]
1033 return parentctx, [(self.node, tuple())]
1035
1034
1036
1035
1037 @action(
1036 @action(
1038 [b"mess", b"m"],
1037 [b"mess", b"m"],
1039 _(b'edit commit message without changing commit content'),
1038 _(b'edit commit message without changing commit content'),
1040 priority=True,
1039 priority=True,
1041 )
1040 )
1042 class message(histeditaction):
1041 class message(histeditaction):
1043 def commiteditor(self):
1042 def commiteditor(self):
1044 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1043 return cmdutil.getcommiteditor(edit=True, editform=b'histedit.mess')
1045
1044
1046
1045
1047 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1046 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
1048 """utility function to find the first outgoing changeset
1047 """utility function to find the first outgoing changeset
1049
1048
1050 Used by initialization code"""
1049 Used by initialization code"""
1051 if opts is None:
1050 if opts is None:
1052 opts = {}
1051 opts = {}
1053 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1052 path = urlutil.get_unique_push_path(b'histedit', repo, ui, remote)
1054
1053
1055 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1054 ui.status(_(b'comparing with %s\n') % urlutil.hidepassword(path.loc))
1056
1055
1057 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1056 revs, checkout = hg.addbranchrevs(repo, repo, (path.branch, []), None)
1058 other = hg.peer(repo, opts, path)
1057 other = hg.peer(repo, opts, path)
1059
1058
1060 if revs:
1059 if revs:
1061 revs = [repo.lookup(rev) for rev in revs]
1060 revs = [repo.lookup(rev) for rev in revs]
1062
1061
1063 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1062 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
1064 if not outgoing.missing:
1063 if not outgoing.missing:
1065 raise error.StateError(_(b'no outgoing ancestors'))
1064 raise error.StateError(_(b'no outgoing ancestors'))
1066 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1065 roots = list(repo.revs(b"roots(%ln)", outgoing.missing))
1067 if len(roots) > 1:
1066 if len(roots) > 1:
1068 msg = _(b'there are ambiguous outgoing revisions')
1067 msg = _(b'there are ambiguous outgoing revisions')
1069 hint = _(b"see 'hg help histedit' for more detail")
1068 hint = _(b"see 'hg help histedit' for more detail")
1070 raise error.StateError(msg, hint=hint)
1069 raise error.StateError(msg, hint=hint)
1071 return repo[roots[0]].node()
1070 return repo[roots[0]].node()
1072
1071
1073
1072
1074 # Curses Support
1073 # Curses Support
1075 try:
1074 try:
1076 import curses
1075 import curses
1077 except ImportError:
1076 except ImportError:
1078 curses = None
1077 curses = None
1079
1078
1080 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1079 KEY_LIST = [b'pick', b'edit', b'fold', b'drop', b'mess', b'roll']
1081 ACTION_LABELS = {
1080 ACTION_LABELS = {
1082 b'fold': b'^fold',
1081 b'fold': b'^fold',
1083 b'roll': b'^roll',
1082 b'roll': b'^roll',
1084 }
1083 }
1085
1084
1086 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1085 COLOR_HELP, COLOR_SELECTED, COLOR_OK, COLOR_WARN, COLOR_CURRENT = 1, 2, 3, 4, 5
1087 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1086 COLOR_DIFF_ADD_LINE, COLOR_DIFF_DEL_LINE, COLOR_DIFF_OFFSET = 6, 7, 8
1088 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1087 COLOR_ROLL, COLOR_ROLL_CURRENT, COLOR_ROLL_SELECTED = 9, 10, 11
1089
1088
1090 E_QUIT, E_HISTEDIT = 1, 2
1089 E_QUIT, E_HISTEDIT = 1, 2
1091 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1090 E_PAGEDOWN, E_PAGEUP, E_LINEUP, E_LINEDOWN, E_RESIZE = 3, 4, 5, 6, 7
1092 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1091 MODE_INIT, MODE_PATCH, MODE_RULES, MODE_HELP = 0, 1, 2, 3
1093
1092
1094 KEYTABLE = {
1093 KEYTABLE = {
1095 b'global': {
1094 b'global': {
1096 b'h': b'next-action',
1095 b'h': b'next-action',
1097 b'KEY_RIGHT': b'next-action',
1096 b'KEY_RIGHT': b'next-action',
1098 b'l': b'prev-action',
1097 b'l': b'prev-action',
1099 b'KEY_LEFT': b'prev-action',
1098 b'KEY_LEFT': b'prev-action',
1100 b'q': b'quit',
1099 b'q': b'quit',
1101 b'c': b'histedit',
1100 b'c': b'histedit',
1102 b'C': b'histedit',
1101 b'C': b'histedit',
1103 b'v': b'showpatch',
1102 b'v': b'showpatch',
1104 b'?': b'help',
1103 b'?': b'help',
1105 },
1104 },
1106 MODE_RULES: {
1105 MODE_RULES: {
1107 b'd': b'action-drop',
1106 b'd': b'action-drop',
1108 b'e': b'action-edit',
1107 b'e': b'action-edit',
1109 b'f': b'action-fold',
1108 b'f': b'action-fold',
1110 b'm': b'action-mess',
1109 b'm': b'action-mess',
1111 b'p': b'action-pick',
1110 b'p': b'action-pick',
1112 b'r': b'action-roll',
1111 b'r': b'action-roll',
1113 b' ': b'select',
1112 b' ': b'select',
1114 b'j': b'down',
1113 b'j': b'down',
1115 b'k': b'up',
1114 b'k': b'up',
1116 b'KEY_DOWN': b'down',
1115 b'KEY_DOWN': b'down',
1117 b'KEY_UP': b'up',
1116 b'KEY_UP': b'up',
1118 b'J': b'move-down',
1117 b'J': b'move-down',
1119 b'K': b'move-up',
1118 b'K': b'move-up',
1120 b'KEY_NPAGE': b'move-down',
1119 b'KEY_NPAGE': b'move-down',
1121 b'KEY_PPAGE': b'move-up',
1120 b'KEY_PPAGE': b'move-up',
1122 b'0': b'goto', # Used for 0..9
1121 b'0': b'goto', # Used for 0..9
1123 },
1122 },
1124 MODE_PATCH: {
1123 MODE_PATCH: {
1125 b' ': b'page-down',
1124 b' ': b'page-down',
1126 b'KEY_NPAGE': b'page-down',
1125 b'KEY_NPAGE': b'page-down',
1127 b'KEY_PPAGE': b'page-up',
1126 b'KEY_PPAGE': b'page-up',
1128 b'j': b'line-down',
1127 b'j': b'line-down',
1129 b'k': b'line-up',
1128 b'k': b'line-up',
1130 b'KEY_DOWN': b'line-down',
1129 b'KEY_DOWN': b'line-down',
1131 b'KEY_UP': b'line-up',
1130 b'KEY_UP': b'line-up',
1132 b'J': b'down',
1131 b'J': b'down',
1133 b'K': b'up',
1132 b'K': b'up',
1134 },
1133 },
1135 MODE_HELP: {},
1134 MODE_HELP: {},
1136 }
1135 }
1137
1136
1138
1137
1139 def screen_size():
1138 def screen_size():
1140 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1139 return struct.unpack(b'hh', fcntl.ioctl(1, termios.TIOCGWINSZ, b' '))
1141
1140
1142
1141
1143 class histeditrule:
1142 class histeditrule:
1144 def __init__(self, ui, ctx, pos, action=b'pick'):
1143 def __init__(self, ui, ctx, pos, action=b'pick'):
1145 self.ui = ui
1144 self.ui = ui
1146 self.ctx = ctx
1145 self.ctx = ctx
1147 self.action = action
1146 self.action = action
1148 self.origpos = pos
1147 self.origpos = pos
1149 self.pos = pos
1148 self.pos = pos
1150 self.conflicts = []
1149 self.conflicts = []
1151
1150
1152 def __bytes__(self):
1151 def __bytes__(self):
1153 # Example display of several histeditrules:
1152 # Example display of several histeditrules:
1154 #
1153 #
1155 # #10 pick 316392:06a16c25c053 add option to skip tests
1154 # #10 pick 316392:06a16c25c053 add option to skip tests
1156 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1155 # #11 ^roll 316393:71313c964cc5 <RED>oops a fixup commit</RED>
1157 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1156 # #12 pick 316394:ab31f3973b0d include mfbt for mozilla-config.h
1158 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1157 # #13 ^fold 316395:14ce5803f4c3 fix warnings
1159 #
1158 #
1160 # The carets point to the changeset being folded into ("roll this
1159 # The carets point to the changeset being folded into ("roll this
1161 # changeset into the changeset above").
1160 # changeset into the changeset above").
1162 return b'%s%s' % (self.prefix, self.desc)
1161 return b'%s%s' % (self.prefix, self.desc)
1163
1162
1164 __str__ = encoding.strmethod(__bytes__)
1163 __str__ = encoding.strmethod(__bytes__)
1165
1164
1166 @property
1165 @property
1167 def prefix(self):
1166 def prefix(self):
1168 # Some actions ('fold' and 'roll') combine a patch with a
1167 # Some actions ('fold' and 'roll') combine a patch with a
1169 # previous one. Add a marker showing which patch they apply
1168 # previous one. Add a marker showing which patch they apply
1170 # to.
1169 # to.
1171 action = ACTION_LABELS.get(self.action, self.action)
1170 action = ACTION_LABELS.get(self.action, self.action)
1172
1171
1173 h = self.ctx.hex()[0:12]
1172 h = self.ctx.hex()[0:12]
1174 r = self.ctx.rev()
1173 r = self.ctx.rev()
1175
1174
1176 return b"#%s %s %d:%s " % (
1175 return b"#%s %s %d:%s " % (
1177 (b'%d' % self.origpos).ljust(2),
1176 (b'%d' % self.origpos).ljust(2),
1178 action.ljust(6),
1177 action.ljust(6),
1179 r,
1178 r,
1180 h,
1179 h,
1181 )
1180 )
1182
1181
1183 @util.propertycache
1182 @util.propertycache
1184 def desc(self):
1183 def desc(self):
1185 summary = cmdutil.rendertemplate(
1184 summary = cmdutil.rendertemplate(
1186 self.ctx, self.ui.config(b'histedit', b'summary-template')
1185 self.ctx, self.ui.config(b'histedit', b'summary-template')
1187 )
1186 )
1188 if summary:
1187 if summary:
1189 return summary
1188 return summary
1190 # This is split off from the prefix property so that we can
1189 # This is split off from the prefix property so that we can
1191 # separately make the description for 'roll' red (since it
1190 # separately make the description for 'roll' red (since it
1192 # will get discarded).
1191 # will get discarded).
1193 return stringutil.firstline(self.ctx.description())
1192 return stringutil.firstline(self.ctx.description())
1194
1193
1195 def checkconflicts(self, other):
1194 def checkconflicts(self, other):
1196 if other.pos > self.pos and other.origpos <= self.origpos:
1195 if other.pos > self.pos and other.origpos <= self.origpos:
1197 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1196 if set(other.ctx.files()) & set(self.ctx.files()) != set():
1198 self.conflicts.append(other)
1197 self.conflicts.append(other)
1199 return self.conflicts
1198 return self.conflicts
1200
1199
1201 if other in self.conflicts:
1200 if other in self.conflicts:
1202 self.conflicts.remove(other)
1201 self.conflicts.remove(other)
1203 return self.conflicts
1202 return self.conflicts
1204
1203
1205
1204
1206 def makecommands(rules):
1205 def makecommands(rules):
1207 """Returns a list of commands consumable by histedit --commands based on
1206 """Returns a list of commands consumable by histedit --commands based on
1208 our list of rules"""
1207 our list of rules"""
1209 commands = []
1208 commands = []
1210 for rules in rules:
1209 for rules in rules:
1211 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1210 commands.append(b'%s %s\n' % (rules.action, rules.ctx))
1212 return commands
1211 return commands
1213
1212
1214
1213
1215 def addln(win, y, x, line, color=None):
1214 def addln(win, y, x, line, color=None):
1216 """Add a line to the given window left padding but 100% filled with
1215 """Add a line to the given window left padding but 100% filled with
1217 whitespace characters, so that the color appears on the whole line"""
1216 whitespace characters, so that the color appears on the whole line"""
1218 maxy, maxx = win.getmaxyx()
1217 maxy, maxx = win.getmaxyx()
1219 length = maxx - 1 - x
1218 length = maxx - 1 - x
1220 line = bytes(line).ljust(length)[:length]
1219 line = bytes(line).ljust(length)[:length]
1221 if y < 0:
1220 if y < 0:
1222 y = maxy + y
1221 y = maxy + y
1223 if x < 0:
1222 if x < 0:
1224 x = maxx + x
1223 x = maxx + x
1225 if color:
1224 if color:
1226 win.addstr(y, x, line, color)
1225 win.addstr(y, x, line, color)
1227 else:
1226 else:
1228 win.addstr(y, x, line)
1227 win.addstr(y, x, line)
1229
1228
1230
1229
1231 def _trunc_head(line, n):
1230 def _trunc_head(line, n):
1232 if len(line) <= n:
1231 if len(line) <= n:
1233 return line
1232 return line
1234 return b'> ' + line[-(n - 2) :]
1233 return b'> ' + line[-(n - 2) :]
1235
1234
1236
1235
1237 def _trunc_tail(line, n):
1236 def _trunc_tail(line, n):
1238 if len(line) <= n:
1237 if len(line) <= n:
1239 return line
1238 return line
1240 return line[: n - 2] + b' >'
1239 return line[: n - 2] + b' >'
1241
1240
1242
1241
1243 class _chistedit_state:
1242 class _chistedit_state:
1244 def __init__(
1243 def __init__(
1245 self,
1244 self,
1246 repo,
1245 repo,
1247 rules,
1246 rules,
1248 stdscr,
1247 stdscr,
1249 ):
1248 ):
1250 self.repo = repo
1249 self.repo = repo
1251 self.rules = rules
1250 self.rules = rules
1252 self.stdscr = stdscr
1251 self.stdscr = stdscr
1253 self.later_on_top = repo.ui.configbool(
1252 self.later_on_top = repo.ui.configbool(
1254 b'histedit', b'later-commits-first'
1253 b'histedit', b'later-commits-first'
1255 )
1254 )
1256 # The current item in display order, initialized to point to the top
1255 # The current item in display order, initialized to point to the top
1257 # of the screen.
1256 # of the screen.
1258 self.pos = 0
1257 self.pos = 0
1259 self.selected = None
1258 self.selected = None
1260 self.mode = (MODE_INIT, MODE_INIT)
1259 self.mode = (MODE_INIT, MODE_INIT)
1261 self.page_height = None
1260 self.page_height = None
1262 self.modes = {
1261 self.modes = {
1263 MODE_RULES: {
1262 MODE_RULES: {
1264 b'line_offset': 0,
1263 b'line_offset': 0,
1265 },
1264 },
1266 MODE_PATCH: {
1265 MODE_PATCH: {
1267 b'line_offset': 0,
1266 b'line_offset': 0,
1268 },
1267 },
1269 }
1268 }
1270
1269
1271 def render_commit(self, win):
1270 def render_commit(self, win):
1272 """Renders the commit window that shows the log of the current selected
1271 """Renders the commit window that shows the log of the current selected
1273 commit"""
1272 commit"""
1274 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1273 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1275
1274
1276 ctx = rule.ctx
1275 ctx = rule.ctx
1277 win.box()
1276 win.box()
1278
1277
1279 maxy, maxx = win.getmaxyx()
1278 maxy, maxx = win.getmaxyx()
1280 length = maxx - 3
1279 length = maxx - 3
1281
1280
1282 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1281 line = b"changeset: %d:%s" % (ctx.rev(), ctx.hex()[:12])
1283 win.addstr(1, 1, line[:length])
1282 win.addstr(1, 1, line[:length])
1284
1283
1285 line = b"user: %s" % ctx.user()
1284 line = b"user: %s" % ctx.user()
1286 win.addstr(2, 1, line[:length])
1285 win.addstr(2, 1, line[:length])
1287
1286
1288 bms = self.repo.nodebookmarks(ctx.node())
1287 bms = self.repo.nodebookmarks(ctx.node())
1289 line = b"bookmark: %s" % b' '.join(bms)
1288 line = b"bookmark: %s" % b' '.join(bms)
1290 win.addstr(3, 1, line[:length])
1289 win.addstr(3, 1, line[:length])
1291
1290
1292 line = b"summary: %s" % stringutil.firstline(ctx.description())
1291 line = b"summary: %s" % stringutil.firstline(ctx.description())
1293 win.addstr(4, 1, line[:length])
1292 win.addstr(4, 1, line[:length])
1294
1293
1295 line = b"files: "
1294 line = b"files: "
1296 win.addstr(5, 1, line)
1295 win.addstr(5, 1, line)
1297 fnx = 1 + len(line)
1296 fnx = 1 + len(line)
1298 fnmaxx = length - fnx + 1
1297 fnmaxx = length - fnx + 1
1299 y = 5
1298 y = 5
1300 fnmaxn = maxy - (1 + y) - 1
1299 fnmaxn = maxy - (1 + y) - 1
1301 files = ctx.files()
1300 files = ctx.files()
1302 for i, line1 in enumerate(files):
1301 for i, line1 in enumerate(files):
1303 if len(files) > fnmaxn and i == fnmaxn - 1:
1302 if len(files) > fnmaxn and i == fnmaxn - 1:
1304 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1303 win.addstr(y, fnx, _trunc_tail(b','.join(files[i:]), fnmaxx))
1305 y = y + 1
1304 y = y + 1
1306 break
1305 break
1307 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1306 win.addstr(y, fnx, _trunc_head(line1, fnmaxx))
1308 y = y + 1
1307 y = y + 1
1309
1308
1310 conflicts = rule.conflicts
1309 conflicts = rule.conflicts
1311 if len(conflicts) > 0:
1310 if len(conflicts) > 0:
1312 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1311 conflictstr = b','.join(map(lambda r: r.ctx.hex()[:12], conflicts))
1313 conflictstr = b"changed files overlap with %s" % conflictstr
1312 conflictstr = b"changed files overlap with %s" % conflictstr
1314 else:
1313 else:
1315 conflictstr = b'no overlap'
1314 conflictstr = b'no overlap'
1316
1315
1317 win.addstr(y, 1, conflictstr[:length])
1316 win.addstr(y, 1, conflictstr[:length])
1318 win.noutrefresh()
1317 win.noutrefresh()
1319
1318
1320 def helplines(self):
1319 def helplines(self):
1321 if self.mode[0] == MODE_PATCH:
1320 if self.mode[0] == MODE_PATCH:
1322 help = b"""\
1321 help = b"""\
1323 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1322 ?: help, k/up: line up, j/down: line down, v: stop viewing patch
1324 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1323 pgup: prev page, space/pgdn: next page, c: commit, q: abort
1325 """
1324 """
1326 else:
1325 else:
1327 help = b"""\
1326 help = b"""\
1328 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1327 ?: help, k/up: move up, j/down: move down, space: select, v: view patch
1329 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1328 d: drop, e: edit, f: fold, m: mess, p: pick, r: roll
1330 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1329 pgup/K: move patch up, pgdn/J: move patch down, c: commit, q: abort
1331 """
1330 """
1332 if self.later_on_top:
1331 if self.later_on_top:
1333 help += b"Newer commits are shown above older commits.\n"
1332 help += b"Newer commits are shown above older commits.\n"
1334 else:
1333 else:
1335 help += b"Older commits are shown above newer commits.\n"
1334 help += b"Older commits are shown above newer commits.\n"
1336 return help.splitlines()
1335 return help.splitlines()
1337
1336
1338 def render_help(self, win):
1337 def render_help(self, win):
1339 maxy, maxx = win.getmaxyx()
1338 maxy, maxx = win.getmaxyx()
1340 for y, line in enumerate(self.helplines()):
1339 for y, line in enumerate(self.helplines()):
1341 if y >= maxy:
1340 if y >= maxy:
1342 break
1341 break
1343 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1342 addln(win, y, 0, line, curses.color_pair(COLOR_HELP))
1344 win.noutrefresh()
1343 win.noutrefresh()
1345
1344
1346 def layout(self):
1345 def layout(self):
1347 maxy, maxx = self.stdscr.getmaxyx()
1346 maxy, maxx = self.stdscr.getmaxyx()
1348 helplen = len(self.helplines())
1347 helplen = len(self.helplines())
1349 mainlen = maxy - helplen - 12
1348 mainlen = maxy - helplen - 12
1350 if mainlen < 1:
1349 if mainlen < 1:
1351 raise error.Abort(
1350 raise error.Abort(
1352 _(b"terminal dimensions %d by %d too small for curses histedit")
1351 _(b"terminal dimensions %d by %d too small for curses histedit")
1353 % (maxy, maxx),
1352 % (maxy, maxx),
1354 hint=_(
1353 hint=_(
1355 b"enlarge your terminal or use --config ui.interface=text"
1354 b"enlarge your terminal or use --config ui.interface=text"
1356 ),
1355 ),
1357 )
1356 )
1358 return {
1357 return {
1359 b'commit': (12, maxx),
1358 b'commit': (12, maxx),
1360 b'help': (helplen, maxx),
1359 b'help': (helplen, maxx),
1361 b'main': (mainlen, maxx),
1360 b'main': (mainlen, maxx),
1362 }
1361 }
1363
1362
1364 def display_pos_to_rule_pos(self, display_pos):
1363 def display_pos_to_rule_pos(self, display_pos):
1365 """Converts a position in display order to rule order.
1364 """Converts a position in display order to rule order.
1366
1365
1367 The `display_pos` is the order from the top in display order, not
1366 The `display_pos` is the order from the top in display order, not
1368 considering which items are currently visible on the screen. Thus,
1367 considering which items are currently visible on the screen. Thus,
1369 `display_pos=0` is the item at the top (possibly after scrolling to
1368 `display_pos=0` is the item at the top (possibly after scrolling to
1370 the top)
1369 the top)
1371 """
1370 """
1372 if self.later_on_top:
1371 if self.later_on_top:
1373 return len(self.rules) - 1 - display_pos
1372 return len(self.rules) - 1 - display_pos
1374 else:
1373 else:
1375 return display_pos
1374 return display_pos
1376
1375
1377 def render_rules(self, rulesscr):
1376 def render_rules(self, rulesscr):
1378 start = self.modes[MODE_RULES][b'line_offset']
1377 start = self.modes[MODE_RULES][b'line_offset']
1379
1378
1380 conflicts = [r.ctx for r in self.rules if r.conflicts]
1379 conflicts = [r.ctx for r in self.rules if r.conflicts]
1381 if len(conflicts) > 0:
1380 if len(conflicts) > 0:
1382 line = b"potential conflict in %s" % b','.join(
1381 line = b"potential conflict in %s" % b','.join(
1383 map(pycompat.bytestr, conflicts)
1382 map(pycompat.bytestr, conflicts)
1384 )
1383 )
1385 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1384 addln(rulesscr, -1, 0, line, curses.color_pair(COLOR_WARN))
1386
1385
1387 for display_pos in range(start, len(self.rules)):
1386 for display_pos in range(start, len(self.rules)):
1388 y = display_pos - start
1387 y = display_pos - start
1389 if y < 0 or y >= self.page_height:
1388 if y < 0 or y >= self.page_height:
1390 continue
1389 continue
1391 rule_pos = self.display_pos_to_rule_pos(display_pos)
1390 rule_pos = self.display_pos_to_rule_pos(display_pos)
1392 rule = self.rules[rule_pos]
1391 rule = self.rules[rule_pos]
1393 if len(rule.conflicts) > 0:
1392 if len(rule.conflicts) > 0:
1394 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1393 rulesscr.addstr(y, 0, b" ", curses.color_pair(COLOR_WARN))
1395 else:
1394 else:
1396 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1395 rulesscr.addstr(y, 0, b" ", curses.COLOR_BLACK)
1397
1396
1398 if display_pos == self.selected:
1397 if display_pos == self.selected:
1399 rollcolor = COLOR_ROLL_SELECTED
1398 rollcolor = COLOR_ROLL_SELECTED
1400 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1399 addln(rulesscr, y, 2, rule, curses.color_pair(COLOR_SELECTED))
1401 elif display_pos == self.pos:
1400 elif display_pos == self.pos:
1402 rollcolor = COLOR_ROLL_CURRENT
1401 rollcolor = COLOR_ROLL_CURRENT
1403 addln(
1402 addln(
1404 rulesscr,
1403 rulesscr,
1405 y,
1404 y,
1406 2,
1405 2,
1407 rule,
1406 rule,
1408 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1407 curses.color_pair(COLOR_CURRENT) | curses.A_BOLD,
1409 )
1408 )
1410 else:
1409 else:
1411 rollcolor = COLOR_ROLL
1410 rollcolor = COLOR_ROLL
1412 addln(rulesscr, y, 2, rule)
1411 addln(rulesscr, y, 2, rule)
1413
1412
1414 if rule.action == b'roll':
1413 if rule.action == b'roll':
1415 rulesscr.addstr(
1414 rulesscr.addstr(
1416 y,
1415 y,
1417 2 + len(rule.prefix),
1416 2 + len(rule.prefix),
1418 rule.desc,
1417 rule.desc,
1419 curses.color_pair(rollcolor),
1418 curses.color_pair(rollcolor),
1420 )
1419 )
1421
1420
1422 rulesscr.noutrefresh()
1421 rulesscr.noutrefresh()
1423
1422
1424 def render_string(self, win, output, diffcolors=False):
1423 def render_string(self, win, output, diffcolors=False):
1425 maxy, maxx = win.getmaxyx()
1424 maxy, maxx = win.getmaxyx()
1426 length = min(maxy - 1, len(output))
1425 length = min(maxy - 1, len(output))
1427 for y in range(0, length):
1426 for y in range(0, length):
1428 line = output[y]
1427 line = output[y]
1429 if diffcolors:
1428 if diffcolors:
1430 if line.startswith(b'+'):
1429 if line.startswith(b'+'):
1431 win.addstr(
1430 win.addstr(
1432 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1431 y, 0, line, curses.color_pair(COLOR_DIFF_ADD_LINE)
1433 )
1432 )
1434 elif line.startswith(b'-'):
1433 elif line.startswith(b'-'):
1435 win.addstr(
1434 win.addstr(
1436 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1435 y, 0, line, curses.color_pair(COLOR_DIFF_DEL_LINE)
1437 )
1436 )
1438 elif line.startswith(b'@@ '):
1437 elif line.startswith(b'@@ '):
1439 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1438 win.addstr(y, 0, line, curses.color_pair(COLOR_DIFF_OFFSET))
1440 else:
1439 else:
1441 win.addstr(y, 0, line)
1440 win.addstr(y, 0, line)
1442 else:
1441 else:
1443 win.addstr(y, 0, line)
1442 win.addstr(y, 0, line)
1444 win.noutrefresh()
1443 win.noutrefresh()
1445
1444
1446 def render_patch(self, win):
1445 def render_patch(self, win):
1447 start = self.modes[MODE_PATCH][b'line_offset']
1446 start = self.modes[MODE_PATCH][b'line_offset']
1448 content = self.modes[MODE_PATCH][b'patchcontents']
1447 content = self.modes[MODE_PATCH][b'patchcontents']
1449 self.render_string(win, content[start:], diffcolors=True)
1448 self.render_string(win, content[start:], diffcolors=True)
1450
1449
1451 def event(self, ch):
1450 def event(self, ch):
1452 """Change state based on the current character input
1451 """Change state based on the current character input
1453
1452
1454 This takes the current state and based on the current character input from
1453 This takes the current state and based on the current character input from
1455 the user we change the state.
1454 the user we change the state.
1456 """
1455 """
1457 oldpos = self.pos
1456 oldpos = self.pos
1458
1457
1459 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1458 if ch in (curses.KEY_RESIZE, b"KEY_RESIZE"):
1460 return E_RESIZE
1459 return E_RESIZE
1461
1460
1462 lookup_ch = ch
1461 lookup_ch = ch
1463 if ch is not None and b'0' <= ch <= b'9':
1462 if ch is not None and b'0' <= ch <= b'9':
1464 lookup_ch = b'0'
1463 lookup_ch = b'0'
1465
1464
1466 curmode, prevmode = self.mode
1465 curmode, prevmode = self.mode
1467 action = KEYTABLE[curmode].get(
1466 action = KEYTABLE[curmode].get(
1468 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1467 lookup_ch, KEYTABLE[b'global'].get(lookup_ch)
1469 )
1468 )
1470 if action is None:
1469 if action is None:
1471 return
1470 return
1472 if action in (b'down', b'move-down'):
1471 if action in (b'down', b'move-down'):
1473 newpos = min(oldpos + 1, len(self.rules) - 1)
1472 newpos = min(oldpos + 1, len(self.rules) - 1)
1474 self.move_cursor(oldpos, newpos)
1473 self.move_cursor(oldpos, newpos)
1475 if self.selected is not None or action == b'move-down':
1474 if self.selected is not None or action == b'move-down':
1476 self.swap(oldpos, newpos)
1475 self.swap(oldpos, newpos)
1477 elif action in (b'up', b'move-up'):
1476 elif action in (b'up', b'move-up'):
1478 newpos = max(0, oldpos - 1)
1477 newpos = max(0, oldpos - 1)
1479 self.move_cursor(oldpos, newpos)
1478 self.move_cursor(oldpos, newpos)
1480 if self.selected is not None or action == b'move-up':
1479 if self.selected is not None or action == b'move-up':
1481 self.swap(oldpos, newpos)
1480 self.swap(oldpos, newpos)
1482 elif action == b'next-action':
1481 elif action == b'next-action':
1483 self.cycle_action(oldpos, next=True)
1482 self.cycle_action(oldpos, next=True)
1484 elif action == b'prev-action':
1483 elif action == b'prev-action':
1485 self.cycle_action(oldpos, next=False)
1484 self.cycle_action(oldpos, next=False)
1486 elif action == b'select':
1485 elif action == b'select':
1487 self.selected = oldpos if self.selected is None else None
1486 self.selected = oldpos if self.selected is None else None
1488 self.make_selection(self.selected)
1487 self.make_selection(self.selected)
1489 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1488 elif action == b'goto' and int(ch) < len(self.rules) <= 10:
1490 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1489 newrule = next((r for r in self.rules if r.origpos == int(ch)))
1491 self.move_cursor(oldpos, newrule.pos)
1490 self.move_cursor(oldpos, newrule.pos)
1492 if self.selected is not None:
1491 if self.selected is not None:
1493 self.swap(oldpos, newrule.pos)
1492 self.swap(oldpos, newrule.pos)
1494 elif action.startswith(b'action-'):
1493 elif action.startswith(b'action-'):
1495 self.change_action(oldpos, action[7:])
1494 self.change_action(oldpos, action[7:])
1496 elif action == b'showpatch':
1495 elif action == b'showpatch':
1497 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1496 self.change_mode(MODE_PATCH if curmode != MODE_PATCH else prevmode)
1498 elif action == b'help':
1497 elif action == b'help':
1499 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1498 self.change_mode(MODE_HELP if curmode != MODE_HELP else prevmode)
1500 elif action == b'quit':
1499 elif action == b'quit':
1501 return E_QUIT
1500 return E_QUIT
1502 elif action == b'histedit':
1501 elif action == b'histedit':
1503 return E_HISTEDIT
1502 return E_HISTEDIT
1504 elif action == b'page-down':
1503 elif action == b'page-down':
1505 return E_PAGEDOWN
1504 return E_PAGEDOWN
1506 elif action == b'page-up':
1505 elif action == b'page-up':
1507 return E_PAGEUP
1506 return E_PAGEUP
1508 elif action == b'line-down':
1507 elif action == b'line-down':
1509 return E_LINEDOWN
1508 return E_LINEDOWN
1510 elif action == b'line-up':
1509 elif action == b'line-up':
1511 return E_LINEUP
1510 return E_LINEUP
1512
1511
1513 def patch_contents(self):
1512 def patch_contents(self):
1514 repo = self.repo
1513 repo = self.repo
1515 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1514 rule = self.rules[self.display_pos_to_rule_pos(self.pos)]
1516 displayer = logcmdutil.changesetdisplayer(
1515 displayer = logcmdutil.changesetdisplayer(
1517 repo.ui,
1516 repo.ui,
1518 repo,
1517 repo,
1519 {b"patch": True, b"template": b"status"},
1518 {b"patch": True, b"template": b"status"},
1520 buffered=True,
1519 buffered=True,
1521 )
1520 )
1522 overrides = {(b'ui', b'verbose'): True}
1521 overrides = {(b'ui', b'verbose'): True}
1523 with repo.ui.configoverride(overrides, source=b'histedit'):
1522 with repo.ui.configoverride(overrides, source=b'histedit'):
1524 displayer.show(rule.ctx)
1523 displayer.show(rule.ctx)
1525 displayer.close()
1524 displayer.close()
1526 return displayer.hunk[rule.ctx.rev()].splitlines()
1525 return displayer.hunk[rule.ctx.rev()].splitlines()
1527
1526
1528 def move_cursor(self, oldpos, newpos):
1527 def move_cursor(self, oldpos, newpos):
1529 """Change the rule/changeset that the cursor is pointing to, regardless of
1528 """Change the rule/changeset that the cursor is pointing to, regardless of
1530 current mode (you can switch between patches from the view patch window)."""
1529 current mode (you can switch between patches from the view patch window)."""
1531 self.pos = newpos
1530 self.pos = newpos
1532
1531
1533 mode, _ = self.mode
1532 mode, _ = self.mode
1534 if mode == MODE_RULES:
1533 if mode == MODE_RULES:
1535 # Scroll through the list by updating the view for MODE_RULES, so that
1534 # Scroll through the list by updating the view for MODE_RULES, so that
1536 # even if we are not currently viewing the rules, switching back will
1535 # even if we are not currently viewing the rules, switching back will
1537 # result in the cursor's rule being visible.
1536 # result in the cursor's rule being visible.
1538 modestate = self.modes[MODE_RULES]
1537 modestate = self.modes[MODE_RULES]
1539 if newpos < modestate[b'line_offset']:
1538 if newpos < modestate[b'line_offset']:
1540 modestate[b'line_offset'] = newpos
1539 modestate[b'line_offset'] = newpos
1541 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1540 elif newpos > modestate[b'line_offset'] + self.page_height - 1:
1542 modestate[b'line_offset'] = newpos - self.page_height + 1
1541 modestate[b'line_offset'] = newpos - self.page_height + 1
1543
1542
1544 # Reset the patch view region to the top of the new patch.
1543 # Reset the patch view region to the top of the new patch.
1545 self.modes[MODE_PATCH][b'line_offset'] = 0
1544 self.modes[MODE_PATCH][b'line_offset'] = 0
1546
1545
1547 def change_mode(self, mode):
1546 def change_mode(self, mode):
1548 curmode, _ = self.mode
1547 curmode, _ = self.mode
1549 self.mode = (mode, curmode)
1548 self.mode = (mode, curmode)
1550 if mode == MODE_PATCH:
1549 if mode == MODE_PATCH:
1551 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1550 self.modes[MODE_PATCH][b'patchcontents'] = self.patch_contents()
1552
1551
1553 def make_selection(self, pos):
1552 def make_selection(self, pos):
1554 self.selected = pos
1553 self.selected = pos
1555
1554
1556 def swap(self, oldpos, newpos):
1555 def swap(self, oldpos, newpos):
1557 """Swap two positions and calculate necessary conflicts in
1556 """Swap two positions and calculate necessary conflicts in
1558 O(|newpos-oldpos|) time"""
1557 O(|newpos-oldpos|) time"""
1559 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1558 old_rule_pos = self.display_pos_to_rule_pos(oldpos)
1560 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1559 new_rule_pos = self.display_pos_to_rule_pos(newpos)
1561
1560
1562 rules = self.rules
1561 rules = self.rules
1563 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1562 assert 0 <= old_rule_pos < len(rules) and 0 <= new_rule_pos < len(rules)
1564
1563
1565 rules[old_rule_pos], rules[new_rule_pos] = (
1564 rules[old_rule_pos], rules[new_rule_pos] = (
1566 rules[new_rule_pos],
1565 rules[new_rule_pos],
1567 rules[old_rule_pos],
1566 rules[old_rule_pos],
1568 )
1567 )
1569
1568
1570 # TODO: swap should not know about histeditrule's internals
1569 # TODO: swap should not know about histeditrule's internals
1571 rules[new_rule_pos].pos = new_rule_pos
1570 rules[new_rule_pos].pos = new_rule_pos
1572 rules[old_rule_pos].pos = old_rule_pos
1571 rules[old_rule_pos].pos = old_rule_pos
1573
1572
1574 start = min(old_rule_pos, new_rule_pos)
1573 start = min(old_rule_pos, new_rule_pos)
1575 end = max(old_rule_pos, new_rule_pos)
1574 end = max(old_rule_pos, new_rule_pos)
1576 for r in range(start, end + 1):
1575 for r in range(start, end + 1):
1577 rules[new_rule_pos].checkconflicts(rules[r])
1576 rules[new_rule_pos].checkconflicts(rules[r])
1578 rules[old_rule_pos].checkconflicts(rules[r])
1577 rules[old_rule_pos].checkconflicts(rules[r])
1579
1578
1580 if self.selected:
1579 if self.selected:
1581 self.make_selection(newpos)
1580 self.make_selection(newpos)
1582
1581
1583 def change_action(self, pos, action):
1582 def change_action(self, pos, action):
1584 """Change the action state on the given position to the new action"""
1583 """Change the action state on the given position to the new action"""
1585 assert 0 <= pos < len(self.rules)
1584 assert 0 <= pos < len(self.rules)
1586 self.rules[pos].action = action
1585 self.rules[pos].action = action
1587
1586
1588 def cycle_action(self, pos, next=False):
1587 def cycle_action(self, pos, next=False):
1589 """Changes the action state the next or the previous action from
1588 """Changes the action state the next or the previous action from
1590 the action list"""
1589 the action list"""
1591 assert 0 <= pos < len(self.rules)
1590 assert 0 <= pos < len(self.rules)
1592 current = self.rules[pos].action
1591 current = self.rules[pos].action
1593
1592
1594 assert current in KEY_LIST
1593 assert current in KEY_LIST
1595
1594
1596 index = KEY_LIST.index(current)
1595 index = KEY_LIST.index(current)
1597 if next:
1596 if next:
1598 index += 1
1597 index += 1
1599 else:
1598 else:
1600 index -= 1
1599 index -= 1
1601 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1600 self.change_action(pos, KEY_LIST[index % len(KEY_LIST)])
1602
1601
1603 def change_view(self, delta, unit):
1602 def change_view(self, delta, unit):
1604 """Change the region of whatever is being viewed (a patch or the list of
1603 """Change the region of whatever is being viewed (a patch or the list of
1605 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1604 changesets). 'delta' is an amount (+/- 1) and 'unit' is 'page' or 'line'."""
1606 mode, _ = self.mode
1605 mode, _ = self.mode
1607 if mode != MODE_PATCH:
1606 if mode != MODE_PATCH:
1608 return
1607 return
1609 mode_state = self.modes[mode]
1608 mode_state = self.modes[mode]
1610 num_lines = len(mode_state[b'patchcontents'])
1609 num_lines = len(mode_state[b'patchcontents'])
1611 page_height = self.page_height
1610 page_height = self.page_height
1612 unit = page_height if unit == b'page' else 1
1611 unit = page_height if unit == b'page' else 1
1613 num_pages = 1 + (num_lines - 1) // page_height
1612 num_pages = 1 + (num_lines - 1) // page_height
1614 max_offset = (num_pages - 1) * page_height
1613 max_offset = (num_pages - 1) * page_height
1615 newline = mode_state[b'line_offset'] + delta * unit
1614 newline = mode_state[b'line_offset'] + delta * unit
1616 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1615 mode_state[b'line_offset'] = max(0, min(max_offset, newline))
1617
1616
1618
1617
1619 def _chisteditmain(repo, rules, stdscr):
1618 def _chisteditmain(repo, rules, stdscr):
1620 try:
1619 try:
1621 curses.use_default_colors()
1620 curses.use_default_colors()
1622 except curses.error:
1621 except curses.error:
1623 pass
1622 pass
1624
1623
1625 # initialize color pattern
1624 # initialize color pattern
1626 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1625 curses.init_pair(COLOR_HELP, curses.COLOR_WHITE, curses.COLOR_BLUE)
1627 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1626 curses.init_pair(COLOR_SELECTED, curses.COLOR_BLACK, curses.COLOR_WHITE)
1628 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1627 curses.init_pair(COLOR_WARN, curses.COLOR_BLACK, curses.COLOR_YELLOW)
1629 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1628 curses.init_pair(COLOR_OK, curses.COLOR_BLACK, curses.COLOR_GREEN)
1630 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1629 curses.init_pair(COLOR_CURRENT, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
1631 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1630 curses.init_pair(COLOR_DIFF_ADD_LINE, curses.COLOR_GREEN, -1)
1632 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1631 curses.init_pair(COLOR_DIFF_DEL_LINE, curses.COLOR_RED, -1)
1633 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1632 curses.init_pair(COLOR_DIFF_OFFSET, curses.COLOR_MAGENTA, -1)
1634 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1633 curses.init_pair(COLOR_ROLL, curses.COLOR_RED, -1)
1635 curses.init_pair(
1634 curses.init_pair(
1636 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1635 COLOR_ROLL_CURRENT, curses.COLOR_BLACK, curses.COLOR_MAGENTA
1637 )
1636 )
1638 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1637 curses.init_pair(COLOR_ROLL_SELECTED, curses.COLOR_RED, curses.COLOR_WHITE)
1639
1638
1640 # don't display the cursor
1639 # don't display the cursor
1641 try:
1640 try:
1642 curses.curs_set(0)
1641 curses.curs_set(0)
1643 except curses.error:
1642 except curses.error:
1644 pass
1643 pass
1645
1644
1646 def drawvertwin(size, y, x):
1645 def drawvertwin(size, y, x):
1647 win = curses.newwin(size[0], size[1], y, x)
1646 win = curses.newwin(size[0], size[1], y, x)
1648 y += size[0]
1647 y += size[0]
1649 return win, y, x
1648 return win, y, x
1650
1649
1651 state = _chistedit_state(repo, rules, stdscr)
1650 state = _chistedit_state(repo, rules, stdscr)
1652
1651
1653 # eventloop
1652 # eventloop
1654 ch = None
1653 ch = None
1655 stdscr.clear()
1654 stdscr.clear()
1656 stdscr.refresh()
1655 stdscr.refresh()
1657 while True:
1656 while True:
1658 oldmode, unused = state.mode
1657 oldmode, unused = state.mode
1659 if oldmode == MODE_INIT:
1658 if oldmode == MODE_INIT:
1660 state.change_mode(MODE_RULES)
1659 state.change_mode(MODE_RULES)
1661 e = state.event(ch)
1660 e = state.event(ch)
1662
1661
1663 if e == E_QUIT:
1662 if e == E_QUIT:
1664 return False
1663 return False
1665 if e == E_HISTEDIT:
1664 if e == E_HISTEDIT:
1666 return state.rules
1665 return state.rules
1667 else:
1666 else:
1668 if e == E_RESIZE:
1667 if e == E_RESIZE:
1669 size = screen_size()
1668 size = screen_size()
1670 if size != stdscr.getmaxyx():
1669 if size != stdscr.getmaxyx():
1671 curses.resizeterm(*size)
1670 curses.resizeterm(*size)
1672
1671
1673 sizes = state.layout()
1672 sizes = state.layout()
1674 curmode, unused = state.mode
1673 curmode, unused = state.mode
1675 if curmode != oldmode:
1674 if curmode != oldmode:
1676 state.page_height = sizes[b'main'][0]
1675 state.page_height = sizes[b'main'][0]
1677 # Adjust the view to fit the current screen size.
1676 # Adjust the view to fit the current screen size.
1678 state.move_cursor(state.pos, state.pos)
1677 state.move_cursor(state.pos, state.pos)
1679
1678
1680 # Pack the windows against the top, each pane spread across the
1679 # Pack the windows against the top, each pane spread across the
1681 # full width of the screen.
1680 # full width of the screen.
1682 y, x = (0, 0)
1681 y, x = (0, 0)
1683 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1682 helpwin, y, x = drawvertwin(sizes[b'help'], y, x)
1684 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1683 mainwin, y, x = drawvertwin(sizes[b'main'], y, x)
1685 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1684 commitwin, y, x = drawvertwin(sizes[b'commit'], y, x)
1686
1685
1687 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1686 if e in (E_PAGEDOWN, E_PAGEUP, E_LINEDOWN, E_LINEUP):
1688 if e == E_PAGEDOWN:
1687 if e == E_PAGEDOWN:
1689 state.change_view(+1, b'page')
1688 state.change_view(+1, b'page')
1690 elif e == E_PAGEUP:
1689 elif e == E_PAGEUP:
1691 state.change_view(-1, b'page')
1690 state.change_view(-1, b'page')
1692 elif e == E_LINEDOWN:
1691 elif e == E_LINEDOWN:
1693 state.change_view(+1, b'line')
1692 state.change_view(+1, b'line')
1694 elif e == E_LINEUP:
1693 elif e == E_LINEUP:
1695 state.change_view(-1, b'line')
1694 state.change_view(-1, b'line')
1696
1695
1697 # start rendering
1696 # start rendering
1698 commitwin.erase()
1697 commitwin.erase()
1699 helpwin.erase()
1698 helpwin.erase()
1700 mainwin.erase()
1699 mainwin.erase()
1701 if curmode == MODE_PATCH:
1700 if curmode == MODE_PATCH:
1702 state.render_patch(mainwin)
1701 state.render_patch(mainwin)
1703 elif curmode == MODE_HELP:
1702 elif curmode == MODE_HELP:
1704 state.render_string(mainwin, __doc__.strip().splitlines())
1703 state.render_string(mainwin, __doc__.strip().splitlines())
1705 else:
1704 else:
1706 state.render_rules(mainwin)
1705 state.render_rules(mainwin)
1707 state.render_commit(commitwin)
1706 state.render_commit(commitwin)
1708 state.render_help(helpwin)
1707 state.render_help(helpwin)
1709 curses.doupdate()
1708 curses.doupdate()
1710 # done rendering
1709 # done rendering
1711 ch = encoding.strtolocal(stdscr.getkey())
1710 ch = encoding.strtolocal(stdscr.getkey())
1712
1711
1713
1712
1714 def _chistedit(ui, repo, freeargs, opts):
1713 def _chistedit(ui, repo, freeargs, opts):
1715 """interactively edit changeset history via a curses interface
1714 """interactively edit changeset history via a curses interface
1716
1715
1717 Provides a ncurses interface to histedit. Press ? in chistedit mode
1716 Provides a ncurses interface to histedit. Press ? in chistedit mode
1718 to see an extensive help. Requires python-curses to be installed."""
1717 to see an extensive help. Requires python-curses to be installed."""
1719
1718
1720 if curses is None:
1719 if curses is None:
1721 raise error.Abort(_(b"Python curses library required"))
1720 raise error.Abort(_(b"Python curses library required"))
1722
1721
1723 # disable color
1722 # disable color
1724 ui._colormode = None
1723 ui._colormode = None
1725
1724
1726 try:
1725 try:
1727 keep = opts.get(b'keep')
1726 keep = opts.get(b'keep')
1728 revs = opts.get(b'rev', [])[:]
1727 revs = opts.get(b'rev', [])[:]
1729 cmdutil.checkunfinished(repo)
1728 cmdutil.checkunfinished(repo)
1730 cmdutil.bailifchanged(repo)
1729 cmdutil.bailifchanged(repo)
1731
1730
1732 revs.extend(freeargs)
1731 revs.extend(freeargs)
1733 if not revs:
1732 if not revs:
1734 defaultrev = destutil.desthistedit(ui, repo)
1733 defaultrev = destutil.desthistedit(ui, repo)
1735 if defaultrev is not None:
1734 if defaultrev is not None:
1736 revs.append(defaultrev)
1735 revs.append(defaultrev)
1737 if len(revs) != 1:
1736 if len(revs) != 1:
1738 raise error.InputError(
1737 raise error.InputError(
1739 _(b'histedit requires exactly one ancestor revision')
1738 _(b'histedit requires exactly one ancestor revision')
1740 )
1739 )
1741
1740
1742 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1741 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
1743 if len(rr) != 1:
1742 if len(rr) != 1:
1744 raise error.InputError(
1743 raise error.InputError(
1745 _(
1744 _(
1746 b'The specified revisions must have '
1745 b'The specified revisions must have '
1747 b'exactly one common root'
1746 b'exactly one common root'
1748 )
1747 )
1749 )
1748 )
1750 root = rr[0].node()
1749 root = rr[0].node()
1751
1750
1752 topmost = repo.dirstate.p1()
1751 topmost = repo.dirstate.p1()
1753 revs = between(repo, root, topmost, keep)
1752 revs = between(repo, root, topmost, keep)
1754 if not revs:
1753 if not revs:
1755 raise error.InputError(
1754 raise error.InputError(
1756 _(b'%s is not an ancestor of working directory') % short(root)
1755 _(b'%s is not an ancestor of working directory') % short(root)
1757 )
1756 )
1758
1757
1759 rules = []
1758 rules = []
1760 for i, r in enumerate(revs):
1759 for i, r in enumerate(revs):
1761 rules.append(histeditrule(ui, repo[r], i))
1760 rules.append(histeditrule(ui, repo[r], i))
1762 with util.with_lc_ctype():
1761 with util.with_lc_ctype():
1763 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1762 rc = curses.wrapper(functools.partial(_chisteditmain, repo, rules))
1764 curses.echo()
1763 curses.echo()
1765 curses.endwin()
1764 curses.endwin()
1766 if rc is False:
1765 if rc is False:
1767 ui.write(_(b"histedit aborted\n"))
1766 ui.write(_(b"histedit aborted\n"))
1768 return 0
1767 return 0
1769 if type(rc) is list:
1768 if type(rc) is list:
1770 ui.status(_(b"performing changes\n"))
1769 ui.status(_(b"performing changes\n"))
1771 rules = makecommands(rc)
1770 rules = makecommands(rc)
1772 with repo.vfs(b'chistedit', b'w+') as fp:
1771 with repo.vfs(b'chistedit', b'w+') as fp:
1773 for r in rules:
1772 for r in rules:
1774 fp.write(r)
1773 fp.write(r)
1775 opts[b'commands'] = fp.name
1774 opts[b'commands'] = fp.name
1776 return _texthistedit(ui, repo, freeargs, opts)
1775 return _texthistedit(ui, repo, freeargs, opts)
1777 except KeyboardInterrupt:
1776 except KeyboardInterrupt:
1778 pass
1777 pass
1779 return -1
1778 return -1
1780
1779
1781
1780
1782 @command(
1781 @command(
1783 b'histedit',
1782 b'histedit',
1784 [
1783 [
1785 (
1784 (
1786 b'',
1785 b'',
1787 b'commands',
1786 b'commands',
1788 b'',
1787 b'',
1789 _(b'read history edits from the specified file'),
1788 _(b'read history edits from the specified file'),
1790 _(b'FILE'),
1789 _(b'FILE'),
1791 ),
1790 ),
1792 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1791 (b'c', b'continue', False, _(b'continue an edit already in progress')),
1793 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1792 (b'', b'edit-plan', False, _(b'edit remaining actions list')),
1794 (
1793 (
1795 b'k',
1794 b'k',
1796 b'keep',
1795 b'keep',
1797 False,
1796 False,
1798 _(b"don't strip old nodes after edit is complete"),
1797 _(b"don't strip old nodes after edit is complete"),
1799 ),
1798 ),
1800 (b'', b'abort', False, _(b'abort an edit in progress')),
1799 (b'', b'abort', False, _(b'abort an edit in progress')),
1801 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1800 (b'o', b'outgoing', False, _(b'changesets not found in destination')),
1802 (
1801 (
1803 b'f',
1802 b'f',
1804 b'force',
1803 b'force',
1805 False,
1804 False,
1806 _(b'force outgoing even for unrelated repositories'),
1805 _(b'force outgoing even for unrelated repositories'),
1807 ),
1806 ),
1808 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1807 (b'r', b'rev', [], _(b'first revision to be edited'), _(b'REV')),
1809 ]
1808 ]
1810 + cmdutil.formatteropts,
1809 + cmdutil.formatteropts,
1811 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1810 _(b"[OPTIONS] ([ANCESTOR] | --outgoing [URL])"),
1812 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1811 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
1813 )
1812 )
1814 def histedit(ui, repo, *freeargs, **opts):
1813 def histedit(ui, repo, *freeargs, **opts):
1815 """interactively edit changeset history
1814 """interactively edit changeset history
1816
1815
1817 This command lets you edit a linear series of changesets (up to
1816 This command lets you edit a linear series of changesets (up to
1818 and including the working directory, which should be clean).
1817 and including the working directory, which should be clean).
1819 You can:
1818 You can:
1820
1819
1821 - `pick` to [re]order a changeset
1820 - `pick` to [re]order a changeset
1822
1821
1823 - `drop` to omit changeset
1822 - `drop` to omit changeset
1824
1823
1825 - `mess` to reword the changeset commit message
1824 - `mess` to reword the changeset commit message
1826
1825
1827 - `fold` to combine it with the preceding changeset (using the later date)
1826 - `fold` to combine it with the preceding changeset (using the later date)
1828
1827
1829 - `roll` like fold, but discarding this commit's description and date
1828 - `roll` like fold, but discarding this commit's description and date
1830
1829
1831 - `edit` to edit this changeset (preserving date)
1830 - `edit` to edit this changeset (preserving date)
1832
1831
1833 - `base` to checkout changeset and apply further changesets from there
1832 - `base` to checkout changeset and apply further changesets from there
1834
1833
1835 There are a number of ways to select the root changeset:
1834 There are a number of ways to select the root changeset:
1836
1835
1837 - Specify ANCESTOR directly
1836 - Specify ANCESTOR directly
1838
1837
1839 - Use --outgoing -- it will be the first linear changeset not
1838 - Use --outgoing -- it will be the first linear changeset not
1840 included in destination. (See :hg:`help config.paths.default-push`)
1839 included in destination. (See :hg:`help config.paths.default-push`)
1841
1840
1842 - Otherwise, the value from the "histedit.defaultrev" config option
1841 - Otherwise, the value from the "histedit.defaultrev" config option
1843 is used as a revset to select the base revision when ANCESTOR is not
1842 is used as a revset to select the base revision when ANCESTOR is not
1844 specified. The first revision returned by the revset is used. By
1843 specified. The first revision returned by the revset is used. By
1845 default, this selects the editable history that is unique to the
1844 default, this selects the editable history that is unique to the
1846 ancestry of the working directory.
1845 ancestry of the working directory.
1847
1846
1848 .. container:: verbose
1847 .. container:: verbose
1849
1848
1850 If you use --outgoing, this command will abort if there are ambiguous
1849 If you use --outgoing, this command will abort if there are ambiguous
1851 outgoing revisions. For example, if there are multiple branches
1850 outgoing revisions. For example, if there are multiple branches
1852 containing outgoing revisions.
1851 containing outgoing revisions.
1853
1852
1854 Use "min(outgoing() and ::.)" or similar revset specification
1853 Use "min(outgoing() and ::.)" or similar revset specification
1855 instead of --outgoing to specify edit target revision exactly in
1854 instead of --outgoing to specify edit target revision exactly in
1856 such ambiguous situation. See :hg:`help revsets` for detail about
1855 such ambiguous situation. See :hg:`help revsets` for detail about
1857 selecting revisions.
1856 selecting revisions.
1858
1857
1859 .. container:: verbose
1858 .. container:: verbose
1860
1859
1861 Examples:
1860 Examples:
1862
1861
1863 - A number of changes have been made.
1862 - A number of changes have been made.
1864 Revision 3 is no longer needed.
1863 Revision 3 is no longer needed.
1865
1864
1866 Start history editing from revision 3::
1865 Start history editing from revision 3::
1867
1866
1868 hg histedit -r 3
1867 hg histedit -r 3
1869
1868
1870 An editor opens, containing the list of revisions,
1869 An editor opens, containing the list of revisions,
1871 with specific actions specified::
1870 with specific actions specified::
1872
1871
1873 pick 5339bf82f0ca 3 Zworgle the foobar
1872 pick 5339bf82f0ca 3 Zworgle the foobar
1874 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1873 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1875 pick 0a9639fcda9d 5 Morgify the cromulancy
1874 pick 0a9639fcda9d 5 Morgify the cromulancy
1876
1875
1877 Additional information about the possible actions
1876 Additional information about the possible actions
1878 to take appears below the list of revisions.
1877 to take appears below the list of revisions.
1879
1878
1880 To remove revision 3 from the history,
1879 To remove revision 3 from the history,
1881 its action (at the beginning of the relevant line)
1880 its action (at the beginning of the relevant line)
1882 is changed to 'drop'::
1881 is changed to 'drop'::
1883
1882
1884 drop 5339bf82f0ca 3 Zworgle the foobar
1883 drop 5339bf82f0ca 3 Zworgle the foobar
1885 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1884 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1886 pick 0a9639fcda9d 5 Morgify the cromulancy
1885 pick 0a9639fcda9d 5 Morgify the cromulancy
1887
1886
1888 - A number of changes have been made.
1887 - A number of changes have been made.
1889 Revision 2 and 4 need to be swapped.
1888 Revision 2 and 4 need to be swapped.
1890
1889
1891 Start history editing from revision 2::
1890 Start history editing from revision 2::
1892
1891
1893 hg histedit -r 2
1892 hg histedit -r 2
1894
1893
1895 An editor opens, containing the list of revisions,
1894 An editor opens, containing the list of revisions,
1896 with specific actions specified::
1895 with specific actions specified::
1897
1896
1898 pick 252a1af424ad 2 Blorb a morgwazzle
1897 pick 252a1af424ad 2 Blorb a morgwazzle
1899 pick 5339bf82f0ca 3 Zworgle the foobar
1898 pick 5339bf82f0ca 3 Zworgle the foobar
1900 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1899 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1901
1900
1902 To swap revision 2 and 4, its lines are swapped
1901 To swap revision 2 and 4, its lines are swapped
1903 in the editor::
1902 in the editor::
1904
1903
1905 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1904 pick 8ef592ce7cc4 4 Bedazzle the zerlog
1906 pick 5339bf82f0ca 3 Zworgle the foobar
1905 pick 5339bf82f0ca 3 Zworgle the foobar
1907 pick 252a1af424ad 2 Blorb a morgwazzle
1906 pick 252a1af424ad 2 Blorb a morgwazzle
1908
1907
1909 Returns 0 on success, 1 if user intervention is required (not only
1908 Returns 0 on success, 1 if user intervention is required (not only
1910 for intentional "edit" command, but also for resolving unexpected
1909 for intentional "edit" command, but also for resolving unexpected
1911 conflicts).
1910 conflicts).
1912 """
1911 """
1913 opts = pycompat.byteskwargs(opts)
1912 opts = pycompat.byteskwargs(opts)
1914
1913
1915 # kludge: _chistedit only works for starting an edit, not aborting
1914 # kludge: _chistedit only works for starting an edit, not aborting
1916 # or continuing, so fall back to regular _texthistedit for those
1915 # or continuing, so fall back to regular _texthistedit for those
1917 # operations.
1916 # operations.
1918 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1917 if ui.interface(b'histedit') == b'curses' and _getgoal(opts) == goalnew:
1919 return _chistedit(ui, repo, freeargs, opts)
1918 return _chistedit(ui, repo, freeargs, opts)
1920 return _texthistedit(ui, repo, freeargs, opts)
1919 return _texthistedit(ui, repo, freeargs, opts)
1921
1920
1922
1921
1923 def _texthistedit(ui, repo, freeargs, opts):
1922 def _texthistedit(ui, repo, freeargs, opts):
1924 state = histeditstate(repo)
1923 state = histeditstate(repo)
1925 with repo.wlock() as wlock, repo.lock() as lock:
1924 with repo.wlock() as wlock, repo.lock() as lock:
1926 state.wlock = wlock
1925 state.wlock = wlock
1927 state.lock = lock
1926 state.lock = lock
1928 _histedit(ui, repo, state, freeargs, opts)
1927 _histedit(ui, repo, state, freeargs, opts)
1929
1928
1930
1929
1931 goalcontinue = b'continue'
1930 goalcontinue = b'continue'
1932 goalabort = b'abort'
1931 goalabort = b'abort'
1933 goaleditplan = b'edit-plan'
1932 goaleditplan = b'edit-plan'
1934 goalnew = b'new'
1933 goalnew = b'new'
1935
1934
1936
1935
1937 def _getgoal(opts):
1936 def _getgoal(opts):
1938 if opts.get(b'continue'):
1937 if opts.get(b'continue'):
1939 return goalcontinue
1938 return goalcontinue
1940 if opts.get(b'abort'):
1939 if opts.get(b'abort'):
1941 return goalabort
1940 return goalabort
1942 if opts.get(b'edit_plan'):
1941 if opts.get(b'edit_plan'):
1943 return goaleditplan
1942 return goaleditplan
1944 return goalnew
1943 return goalnew
1945
1944
1946
1945
1947 def _readfile(ui, path):
1946 def _readfile(ui, path):
1948 if path == b'-':
1947 if path == b'-':
1949 with ui.timeblockedsection(b'histedit'):
1948 with ui.timeblockedsection(b'histedit'):
1950 return ui.fin.read()
1949 return ui.fin.read()
1951 else:
1950 else:
1952 with open(path, b'rb') as f:
1951 with open(path, b'rb') as f:
1953 return f.read()
1952 return f.read()
1954
1953
1955
1954
1956 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1955 def _validateargs(ui, repo, freeargs, opts, goal, rules, revs):
1957 # TODO only abort if we try to histedit mq patches, not just
1956 # TODO only abort if we try to histedit mq patches, not just
1958 # blanket if mq patches are applied somewhere
1957 # blanket if mq patches are applied somewhere
1959 mq = getattr(repo, 'mq', None)
1958 mq = getattr(repo, 'mq', None)
1960 if mq and mq.applied:
1959 if mq and mq.applied:
1961 raise error.StateError(_(b'source has mq patches applied'))
1960 raise error.StateError(_(b'source has mq patches applied'))
1962
1961
1963 # basic argument incompatibility processing
1962 # basic argument incompatibility processing
1964 outg = opts.get(b'outgoing')
1963 outg = opts.get(b'outgoing')
1965 editplan = opts.get(b'edit_plan')
1964 editplan = opts.get(b'edit_plan')
1966 abort = opts.get(b'abort')
1965 abort = opts.get(b'abort')
1967 force = opts.get(b'force')
1966 force = opts.get(b'force')
1968 if force and not outg:
1967 if force and not outg:
1969 raise error.InputError(_(b'--force only allowed with --outgoing'))
1968 raise error.InputError(_(b'--force only allowed with --outgoing'))
1970 if goal == b'continue':
1969 if goal == b'continue':
1971 if any((outg, abort, revs, freeargs, rules, editplan)):
1970 if any((outg, abort, revs, freeargs, rules, editplan)):
1972 raise error.InputError(_(b'no arguments allowed with --continue'))
1971 raise error.InputError(_(b'no arguments allowed with --continue'))
1973 elif goal == b'abort':
1972 elif goal == b'abort':
1974 if any((outg, revs, freeargs, rules, editplan)):
1973 if any((outg, revs, freeargs, rules, editplan)):
1975 raise error.InputError(_(b'no arguments allowed with --abort'))
1974 raise error.InputError(_(b'no arguments allowed with --abort'))
1976 elif goal == b'edit-plan':
1975 elif goal == b'edit-plan':
1977 if any((outg, revs, freeargs)):
1976 if any((outg, revs, freeargs)):
1978 raise error.InputError(
1977 raise error.InputError(
1979 _(b'only --commands argument allowed with --edit-plan')
1978 _(b'only --commands argument allowed with --edit-plan')
1980 )
1979 )
1981 else:
1980 else:
1982 if outg:
1981 if outg:
1983 if revs:
1982 if revs:
1984 raise error.InputError(
1983 raise error.InputError(
1985 _(b'no revisions allowed with --outgoing')
1984 _(b'no revisions allowed with --outgoing')
1986 )
1985 )
1987 if len(freeargs) > 1:
1986 if len(freeargs) > 1:
1988 raise error.InputError(
1987 raise error.InputError(
1989 _(b'only one repo argument allowed with --outgoing')
1988 _(b'only one repo argument allowed with --outgoing')
1990 )
1989 )
1991 else:
1990 else:
1992 revs.extend(freeargs)
1991 revs.extend(freeargs)
1993 if len(revs) == 0:
1992 if len(revs) == 0:
1994 defaultrev = destutil.desthistedit(ui, repo)
1993 defaultrev = destutil.desthistedit(ui, repo)
1995 if defaultrev is not None:
1994 if defaultrev is not None:
1996 revs.append(defaultrev)
1995 revs.append(defaultrev)
1997
1996
1998 if len(revs) != 1:
1997 if len(revs) != 1:
1999 raise error.InputError(
1998 raise error.InputError(
2000 _(b'histedit requires exactly one ancestor revision')
1999 _(b'histedit requires exactly one ancestor revision')
2001 )
2000 )
2002
2001
2003
2002
2004 def _histedit(ui, repo, state, freeargs, opts):
2003 def _histedit(ui, repo, state, freeargs, opts):
2005 fm = ui.formatter(b'histedit', opts)
2004 fm = ui.formatter(b'histedit', opts)
2006 fm.startitem()
2005 fm.startitem()
2007 goal = _getgoal(opts)
2006 goal = _getgoal(opts)
2008 revs = opts.get(b'rev', [])
2007 revs = opts.get(b'rev', [])
2009 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2008 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2010 rules = opts.get(b'commands', b'')
2009 rules = opts.get(b'commands', b'')
2011 state.keep = opts.get(b'keep', False)
2010 state.keep = opts.get(b'keep', False)
2012
2011
2013 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2012 _validateargs(ui, repo, freeargs, opts, goal, rules, revs)
2014
2013
2015 hastags = False
2014 hastags = False
2016 if revs:
2015 if revs:
2017 revs = logcmdutil.revrange(repo, revs)
2016 revs = logcmdutil.revrange(repo, revs)
2018 ctxs = [repo[rev] for rev in revs]
2017 ctxs = [repo[rev] for rev in revs]
2019 for ctx in ctxs:
2018 for ctx in ctxs:
2020 tags = [tag for tag in ctx.tags() if tag != b'tip']
2019 tags = [tag for tag in ctx.tags() if tag != b'tip']
2021 if not hastags:
2020 if not hastags:
2022 hastags = len(tags)
2021 hastags = len(tags)
2023 if hastags:
2022 if hastags:
2024 if ui.promptchoice(
2023 if ui.promptchoice(
2025 _(
2024 _(
2026 b'warning: tags associated with the given'
2025 b'warning: tags associated with the given'
2027 b' changeset will be lost after histedit.\n'
2026 b' changeset will be lost after histedit.\n'
2028 b'do you want to continue (yN)? $$ &Yes $$ &No'
2027 b'do you want to continue (yN)? $$ &Yes $$ &No'
2029 ),
2028 ),
2030 default=1,
2029 default=1,
2031 ):
2030 ):
2032 raise error.CanceledError(_(b'histedit cancelled\n'))
2031 raise error.CanceledError(_(b'histedit cancelled\n'))
2033 # rebuild state
2032 # rebuild state
2034 if goal == goalcontinue:
2033 if goal == goalcontinue:
2035 state.read()
2034 state.read()
2036 state = bootstrapcontinue(ui, state, opts)
2035 state = bootstrapcontinue(ui, state, opts)
2037 elif goal == goaleditplan:
2036 elif goal == goaleditplan:
2038 _edithisteditplan(ui, repo, state, rules)
2037 _edithisteditplan(ui, repo, state, rules)
2039 return
2038 return
2040 elif goal == goalabort:
2039 elif goal == goalabort:
2041 _aborthistedit(ui, repo, state, nobackup=nobackup)
2040 _aborthistedit(ui, repo, state, nobackup=nobackup)
2042 return
2041 return
2043 else:
2042 else:
2044 # goal == goalnew
2043 # goal == goalnew
2045 _newhistedit(ui, repo, state, revs, freeargs, opts)
2044 _newhistedit(ui, repo, state, revs, freeargs, opts)
2046
2045
2047 _continuehistedit(ui, repo, state)
2046 _continuehistedit(ui, repo, state)
2048 _finishhistedit(ui, repo, state, fm)
2047 _finishhistedit(ui, repo, state, fm)
2049 fm.end()
2048 fm.end()
2050
2049
2051
2050
2052 def _continuehistedit(ui, repo, state):
2051 def _continuehistedit(ui, repo, state):
2053 """This function runs after either:
2052 """This function runs after either:
2054 - bootstrapcontinue (if the goal is 'continue')
2053 - bootstrapcontinue (if the goal is 'continue')
2055 - _newhistedit (if the goal is 'new')
2054 - _newhistedit (if the goal is 'new')
2056 """
2055 """
2057 # preprocess rules so that we can hide inner folds from the user
2056 # preprocess rules so that we can hide inner folds from the user
2058 # and only show one editor
2057 # and only show one editor
2059 actions = state.actions[:]
2058 actions = state.actions[:]
2060 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2059 for idx, (action, nextact) in enumerate(zip(actions, actions[1:] + [None])):
2061 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2060 if action.verb == b'fold' and nextact and nextact.verb == b'fold':
2062 state.actions[idx].__class__ = _multifold
2061 state.actions[idx].__class__ = _multifold
2063
2062
2064 # Force an initial state file write, so the user can run --abort/continue
2063 # Force an initial state file write, so the user can run --abort/continue
2065 # even if there's an exception before the first transaction serialize.
2064 # even if there's an exception before the first transaction serialize.
2066 state.write()
2065 state.write()
2067
2066
2068 tr = None
2067 tr = None
2069 # Don't use singletransaction by default since it rolls the entire
2068 # Don't use singletransaction by default since it rolls the entire
2070 # transaction back if an unexpected exception happens (like a
2069 # transaction back if an unexpected exception happens (like a
2071 # pretxncommit hook throws, or the user aborts the commit msg editor).
2070 # pretxncommit hook throws, or the user aborts the commit msg editor).
2072 if ui.configbool(b"histedit", b"singletransaction"):
2071 if ui.configbool(b"histedit", b"singletransaction"):
2073 # Don't use a 'with' for the transaction, since actions may close
2072 # Don't use a 'with' for the transaction, since actions may close
2074 # and reopen a transaction. For example, if the action executes an
2073 # and reopen a transaction. For example, if the action executes an
2075 # external process it may choose to commit the transaction first.
2074 # external process it may choose to commit the transaction first.
2076 tr = repo.transaction(b'histedit')
2075 tr = repo.transaction(b'histedit')
2077 progress = ui.makeprogress(
2076 progress = ui.makeprogress(
2078 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2077 _(b"editing"), unit=_(b'changes'), total=len(state.actions)
2079 )
2078 )
2080 with progress, util.acceptintervention(tr):
2079 with progress, util.acceptintervention(tr):
2081 while state.actions:
2080 while state.actions:
2082 state.write(tr=tr)
2081 state.write(tr=tr)
2083 actobj = state.actions[0]
2082 actobj = state.actions[0]
2084 progress.increment(item=actobj.torule())
2083 progress.increment(item=actobj.torule())
2085 ui.debug(
2084 ui.debug(
2086 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2085 b'histedit: processing %s %s\n' % (actobj.verb, actobj.torule())
2087 )
2086 )
2088 parentctx, replacement_ = actobj.run()
2087 parentctx, replacement_ = actobj.run()
2089 state.parentctxnode = parentctx.node()
2088 state.parentctxnode = parentctx.node()
2090 state.replacements.extend(replacement_)
2089 state.replacements.extend(replacement_)
2091 state.actions.pop(0)
2090 state.actions.pop(0)
2092
2091
2093 state.write()
2092 state.write()
2094
2093
2095
2094
2096 def _finishhistedit(ui, repo, state, fm):
2095 def _finishhistedit(ui, repo, state, fm):
2097 """This action runs when histedit is finishing its session"""
2096 """This action runs when histedit is finishing its session"""
2098 mergemod.update(repo[state.parentctxnode])
2097 mergemod.update(repo[state.parentctxnode])
2099
2098
2100 mapping, tmpnodes, created, ntm = processreplacement(state)
2099 mapping, tmpnodes, created, ntm = processreplacement(state)
2101 if mapping:
2100 if mapping:
2102 for prec, succs in mapping.items():
2101 for prec, succs in mapping.items():
2103 if not succs:
2102 if not succs:
2104 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2103 ui.debug(b'histedit: %s is dropped\n' % short(prec))
2105 else:
2104 else:
2106 ui.debug(
2105 ui.debug(
2107 b'histedit: %s is replaced by %s\n'
2106 b'histedit: %s is replaced by %s\n'
2108 % (short(prec), short(succs[0]))
2107 % (short(prec), short(succs[0]))
2109 )
2108 )
2110 if len(succs) > 1:
2109 if len(succs) > 1:
2111 m = b'histedit: %s'
2110 m = b'histedit: %s'
2112 for n in succs[1:]:
2111 for n in succs[1:]:
2113 ui.debug(m % short(n))
2112 ui.debug(m % short(n))
2114
2113
2115 if not state.keep:
2114 if not state.keep:
2116 if mapping:
2115 if mapping:
2117 movetopmostbookmarks(repo, state.topmost, ntm)
2116 movetopmostbookmarks(repo, state.topmost, ntm)
2118 # TODO update mq state
2117 # TODO update mq state
2119 else:
2118 else:
2120 mapping = {}
2119 mapping = {}
2121
2120
2122 for n in tmpnodes:
2121 for n in tmpnodes:
2123 if n in repo:
2122 if n in repo:
2124 mapping[n] = ()
2123 mapping[n] = ()
2125
2124
2126 # remove entries about unknown nodes
2125 # remove entries about unknown nodes
2127 has_node = repo.unfiltered().changelog.index.has_node
2126 has_node = repo.unfiltered().changelog.index.has_node
2128 mapping = {
2127 mapping = {
2129 k: v
2128 k: v
2130 for k, v in mapping.items()
2129 for k, v in mapping.items()
2131 if has_node(k) and all(has_node(n) for n in v)
2130 if has_node(k) and all(has_node(n) for n in v)
2132 }
2131 }
2133 scmutil.cleanupnodes(repo, mapping, b'histedit')
2132 scmutil.cleanupnodes(repo, mapping, b'histedit')
2134 hf = fm.hexfunc
2133 hf = fm.hexfunc
2135 fl = fm.formatlist
2134 fl = fm.formatlist
2136 fd = fm.formatdict
2135 fd = fm.formatdict
2137 nodechanges = fd(
2136 nodechanges = fd(
2138 {
2137 {
2139 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2138 hf(oldn): fl([hf(n) for n in newn], name=b'node')
2140 for oldn, newn in mapping.items()
2139 for oldn, newn in mapping.items()
2141 },
2140 },
2142 key=b"oldnode",
2141 key=b"oldnode",
2143 value=b"newnodes",
2142 value=b"newnodes",
2144 )
2143 )
2145 fm.data(nodechanges=nodechanges)
2144 fm.data(nodechanges=nodechanges)
2146
2145
2147 state.clear()
2146 state.clear()
2148 if os.path.exists(repo.sjoin(b'undo')):
2147 if os.path.exists(repo.sjoin(b'undo')):
2149 os.unlink(repo.sjoin(b'undo'))
2148 os.unlink(repo.sjoin(b'undo'))
2150 if repo.vfs.exists(b'histedit-last-edit.txt'):
2149 if repo.vfs.exists(b'histedit-last-edit.txt'):
2151 repo.vfs.unlink(b'histedit-last-edit.txt')
2150 repo.vfs.unlink(b'histedit-last-edit.txt')
2152
2151
2153
2152
2154 def _aborthistedit(ui, repo, state, nobackup=False):
2153 def _aborthistedit(ui, repo, state, nobackup=False):
2155 try:
2154 try:
2156 state.read()
2155 state.read()
2157 __, leafs, tmpnodes, __ = processreplacement(state)
2156 __, leafs, tmpnodes, __ = processreplacement(state)
2158 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2157 ui.debug(b'restore wc to old parent %s\n' % short(state.topmost))
2159
2158
2160 # Recover our old commits if necessary
2159 # Recover our old commits if necessary
2161 if not state.topmost in repo and state.backupfile:
2160 if not state.topmost in repo and state.backupfile:
2162 backupfile = repo.vfs.join(state.backupfile)
2161 backupfile = repo.vfs.join(state.backupfile)
2163 f = hg.openpath(ui, backupfile)
2162 f = hg.openpath(ui, backupfile)
2164 gen = exchange.readbundle(ui, f, backupfile)
2163 gen = exchange.readbundle(ui, f, backupfile)
2165 with repo.transaction(b'histedit.abort') as tr:
2164 with repo.transaction(b'histedit.abort') as tr:
2166 bundle2.applybundle(
2165 bundle2.applybundle(
2167 repo,
2166 repo,
2168 gen,
2167 gen,
2169 tr,
2168 tr,
2170 source=b'histedit',
2169 source=b'histedit',
2171 url=b'bundle:' + backupfile,
2170 url=b'bundle:' + backupfile,
2172 )
2171 )
2173
2172
2174 os.remove(backupfile)
2173 os.remove(backupfile)
2175
2174
2176 # check whether we should update away
2175 # check whether we should update away
2177 if repo.unfiltered().revs(
2176 if repo.unfiltered().revs(
2178 b'parents() and (%n or %ln::)',
2177 b'parents() and (%n or %ln::)',
2179 state.parentctxnode,
2178 state.parentctxnode,
2180 leafs | tmpnodes,
2179 leafs | tmpnodes,
2181 ):
2180 ):
2182 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2181 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
2183 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2182 cleanupnode(ui, repo, tmpnodes, nobackup=nobackup)
2184 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2183 cleanupnode(ui, repo, leafs, nobackup=nobackup)
2185 except Exception:
2184 except Exception:
2186 if state.inprogress():
2185 if state.inprogress():
2187 ui.warn(
2186 ui.warn(
2188 _(
2187 _(
2189 b'warning: encountered an exception during histedit '
2188 b'warning: encountered an exception during histedit '
2190 b'--abort; the repository may not have been completely '
2189 b'--abort; the repository may not have been completely '
2191 b'cleaned up\n'
2190 b'cleaned up\n'
2192 )
2191 )
2193 )
2192 )
2194 raise
2193 raise
2195 finally:
2194 finally:
2196 state.clear()
2195 state.clear()
2197
2196
2198
2197
2199 def hgaborthistedit(ui, repo):
2198 def hgaborthistedit(ui, repo):
2200 state = histeditstate(repo)
2199 state = histeditstate(repo)
2201 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2200 nobackup = not ui.configbool(b'rewrite', b'backup-bundle')
2202 with repo.wlock() as wlock, repo.lock() as lock:
2201 with repo.wlock() as wlock, repo.lock() as lock:
2203 state.wlock = wlock
2202 state.wlock = wlock
2204 state.lock = lock
2203 state.lock = lock
2205 _aborthistedit(ui, repo, state, nobackup=nobackup)
2204 _aborthistedit(ui, repo, state, nobackup=nobackup)
2206
2205
2207
2206
2208 def _edithisteditplan(ui, repo, state, rules):
2207 def _edithisteditplan(ui, repo, state, rules):
2209 state.read()
2208 state.read()
2210 if not rules:
2209 if not rules:
2211 comment = geteditcomment(
2210 comment = geteditcomment(
2212 ui, short(state.parentctxnode), short(state.topmost)
2211 ui, short(state.parentctxnode), short(state.topmost)
2213 )
2212 )
2214 rules = ruleeditor(repo, ui, state.actions, comment)
2213 rules = ruleeditor(repo, ui, state.actions, comment)
2215 else:
2214 else:
2216 rules = _readfile(ui, rules)
2215 rules = _readfile(ui, rules)
2217 actions = parserules(rules, state)
2216 actions = parserules(rules, state)
2218 ctxs = [repo[act.node] for act in state.actions if act.node]
2217 ctxs = [repo[act.node] for act in state.actions if act.node]
2219 warnverifyactions(ui, repo, actions, state, ctxs)
2218 warnverifyactions(ui, repo, actions, state, ctxs)
2220 state.actions = actions
2219 state.actions = actions
2221 state.write()
2220 state.write()
2222
2221
2223
2222
2224 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2223 def _newhistedit(ui, repo, state, revs, freeargs, opts):
2225 outg = opts.get(b'outgoing')
2224 outg = opts.get(b'outgoing')
2226 rules = opts.get(b'commands', b'')
2225 rules = opts.get(b'commands', b'')
2227 force = opts.get(b'force')
2226 force = opts.get(b'force')
2228
2227
2229 cmdutil.checkunfinished(repo)
2228 cmdutil.checkunfinished(repo)
2230 cmdutil.bailifchanged(repo)
2229 cmdutil.bailifchanged(repo)
2231
2230
2232 topmost = repo.dirstate.p1()
2231 topmost = repo.dirstate.p1()
2233 if outg:
2232 if outg:
2234 if freeargs:
2233 if freeargs:
2235 remote = freeargs[0]
2234 remote = freeargs[0]
2236 else:
2235 else:
2237 remote = None
2236 remote = None
2238 root = findoutgoing(ui, repo, remote, force, opts)
2237 root = findoutgoing(ui, repo, remote, force, opts)
2239 else:
2238 else:
2240 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2239 rr = list(repo.set(b'roots(%ld)', logcmdutil.revrange(repo, revs)))
2241 if len(rr) != 1:
2240 if len(rr) != 1:
2242 raise error.InputError(
2241 raise error.InputError(
2243 _(
2242 _(
2244 b'The specified revisions must have '
2243 b'The specified revisions must have '
2245 b'exactly one common root'
2244 b'exactly one common root'
2246 )
2245 )
2247 )
2246 )
2248 root = rr[0].node()
2247 root = rr[0].node()
2249
2248
2250 revs = between(repo, root, topmost, state.keep)
2249 revs = between(repo, root, topmost, state.keep)
2251 if not revs:
2250 if not revs:
2252 raise error.InputError(
2251 raise error.InputError(
2253 _(b'%s is not an ancestor of working directory') % short(root)
2252 _(b'%s is not an ancestor of working directory') % short(root)
2254 )
2253 )
2255
2254
2256 ctxs = [repo[r] for r in revs]
2255 ctxs = [repo[r] for r in revs]
2257
2256
2258 wctx = repo[None]
2257 wctx = repo[None]
2259 # Please don't ask me why `ancestors` is this value. I figured it
2258 # Please don't ask me why `ancestors` is this value. I figured it
2260 # out with print-debugging, not by actually understanding what the
2259 # out with print-debugging, not by actually understanding what the
2261 # merge code is doing. :(
2260 # merge code is doing. :(
2262 ancs = [repo[b'.']]
2261 ancs = [repo[b'.']]
2263 # Sniff-test to make sure we won't collide with untracked files in
2262 # Sniff-test to make sure we won't collide with untracked files in
2264 # the working directory. If we don't do this, we can get a
2263 # the working directory. If we don't do this, we can get a
2265 # collision after we've started histedit and backing out gets ugly
2264 # collision after we've started histedit and backing out gets ugly
2266 # for everyone, especially the user.
2265 # for everyone, especially the user.
2267 for c in [ctxs[0].p1()] + ctxs:
2266 for c in [ctxs[0].p1()] + ctxs:
2268 try:
2267 try:
2269 mergemod.calculateupdates(
2268 mergemod.calculateupdates(
2270 repo,
2269 repo,
2271 wctx,
2270 wctx,
2272 c,
2271 c,
2273 ancs,
2272 ancs,
2274 # These parameters were determined by print-debugging
2273 # These parameters were determined by print-debugging
2275 # what happens later on inside histedit.
2274 # what happens later on inside histedit.
2276 branchmerge=False,
2275 branchmerge=False,
2277 force=False,
2276 force=False,
2278 acceptremote=False,
2277 acceptremote=False,
2279 followcopies=False,
2278 followcopies=False,
2280 )
2279 )
2281 except error.Abort:
2280 except error.Abort:
2282 raise error.StateError(
2281 raise error.StateError(
2283 _(
2282 _(
2284 b"untracked files in working directory conflict with files in %s"
2283 b"untracked files in working directory conflict with files in %s"
2285 )
2284 )
2286 % c
2285 % c
2287 )
2286 )
2288
2287
2289 if not rules:
2288 if not rules:
2290 comment = geteditcomment(ui, short(root), short(topmost))
2289 comment = geteditcomment(ui, short(root), short(topmost))
2291 actions = [pick(state, r) for r in revs]
2290 actions = [pick(state, r) for r in revs]
2292 rules = ruleeditor(repo, ui, actions, comment)
2291 rules = ruleeditor(repo, ui, actions, comment)
2293 else:
2292 else:
2294 rules = _readfile(ui, rules)
2293 rules = _readfile(ui, rules)
2295 actions = parserules(rules, state)
2294 actions = parserules(rules, state)
2296 warnverifyactions(ui, repo, actions, state, ctxs)
2295 warnverifyactions(ui, repo, actions, state, ctxs)
2297
2296
2298 parentctxnode = repo[root].p1().node()
2297 parentctxnode = repo[root].p1().node()
2299
2298
2300 state.parentctxnode = parentctxnode
2299 state.parentctxnode = parentctxnode
2301 state.actions = actions
2300 state.actions = actions
2302 state.topmost = topmost
2301 state.topmost = topmost
2303 state.replacements = []
2302 state.replacements = []
2304
2303
2305 ui.log(
2304 ui.log(
2306 b"histedit",
2305 b"histedit",
2307 b"%d actions to histedit\n",
2306 b"%d actions to histedit\n",
2308 len(actions),
2307 len(actions),
2309 histedit_num_actions=len(actions),
2308 histedit_num_actions=len(actions),
2310 )
2309 )
2311
2310
2312 # Create a backup so we can always abort completely.
2311 # Create a backup so we can always abort completely.
2313 backupfile = None
2312 backupfile = None
2314 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2313 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2315 backupfile = repair.backupbundle(
2314 backupfile = repair.backupbundle(
2316 repo, [parentctxnode], [topmost], root, b'histedit'
2315 repo, [parentctxnode], [topmost], root, b'histedit'
2317 )
2316 )
2318 state.backupfile = backupfile
2317 state.backupfile = backupfile
2319
2318
2320
2319
2321 def _getsummary(ctx):
2320 def _getsummary(ctx):
2322 return stringutil.firstline(ctx.description())
2321 return stringutil.firstline(ctx.description())
2323
2322
2324
2323
2325 def bootstrapcontinue(ui, state, opts):
2324 def bootstrapcontinue(ui, state, opts):
2326 repo = state.repo
2325 repo = state.repo
2327
2326
2328 ms = mergestatemod.mergestate.read(repo)
2327 ms = mergestatemod.mergestate.read(repo)
2329 mergeutil.checkunresolved(ms)
2328 mergeutil.checkunresolved(ms)
2330
2329
2331 if state.actions:
2330 if state.actions:
2332 actobj = state.actions.pop(0)
2331 actobj = state.actions.pop(0)
2333
2332
2334 if _isdirtywc(repo):
2333 if _isdirtywc(repo):
2335 actobj.continuedirty()
2334 actobj.continuedirty()
2336 if _isdirtywc(repo):
2335 if _isdirtywc(repo):
2337 abortdirty()
2336 abortdirty()
2338
2337
2339 parentctx, replacements = actobj.continueclean()
2338 parentctx, replacements = actobj.continueclean()
2340
2339
2341 state.parentctxnode = parentctx.node()
2340 state.parentctxnode = parentctx.node()
2342 state.replacements.extend(replacements)
2341 state.replacements.extend(replacements)
2343
2342
2344 return state
2343 return state
2345
2344
2346
2345
2347 def between(repo, old, new, keep):
2346 def between(repo, old, new, keep):
2348 """select and validate the set of revision to edit
2347 """select and validate the set of revision to edit
2349
2348
2350 When keep is false, the specified set can't have children."""
2349 When keep is false, the specified set can't have children."""
2351 revs = repo.revs(b'%n::%n', old, new)
2350 revs = repo.revs(b'%n::%n', old, new)
2352 if revs and not keep:
2351 if revs and not keep:
2353 rewriteutil.precheck(repo, revs, b'edit')
2352 rewriteutil.precheck(repo, revs, b'edit')
2354 if repo.revs(b'(%ld) and merge()', revs):
2353 if repo.revs(b'(%ld) and merge()', revs):
2355 raise error.StateError(
2354 raise error.StateError(
2356 _(b'cannot edit history that contains merges')
2355 _(b'cannot edit history that contains merges')
2357 )
2356 )
2358 return pycompat.maplist(repo.changelog.node, revs)
2357 return pycompat.maplist(repo.changelog.node, revs)
2359
2358
2360
2359
2361 def ruleeditor(repo, ui, actions, editcomment=b""):
2360 def ruleeditor(repo, ui, actions, editcomment=b""):
2362 """open an editor to edit rules
2361 """open an editor to edit rules
2363
2362
2364 rules are in the format [ [act, ctx], ...] like in state.rules
2363 rules are in the format [ [act, ctx], ...] like in state.rules
2365 """
2364 """
2366 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2365 if repo.ui.configbool(b"experimental", b"histedit.autoverb"):
2367 newact = util.sortdict()
2366 newact = util.sortdict()
2368 for act in actions:
2367 for act in actions:
2369 ctx = repo[act.node]
2368 ctx = repo[act.node]
2370 summary = _getsummary(ctx)
2369 summary = _getsummary(ctx)
2371 fword = summary.split(b' ', 1)[0].lower()
2370 fword = summary.split(b' ', 1)[0].lower()
2372 added = False
2371 added = False
2373
2372
2374 # if it doesn't end with the special character '!' just skip this
2373 # if it doesn't end with the special character '!' just skip this
2375 if fword.endswith(b'!'):
2374 if fword.endswith(b'!'):
2376 fword = fword[:-1]
2375 fword = fword[:-1]
2377 if fword in primaryactions | secondaryactions | tertiaryactions:
2376 if fword in primaryactions | secondaryactions | tertiaryactions:
2378 act.verb = fword
2377 act.verb = fword
2379 # get the target summary
2378 # get the target summary
2380 tsum = summary[len(fword) + 1 :].lstrip()
2379 tsum = summary[len(fword) + 1 :].lstrip()
2381 # safe but slow: reverse iterate over the actions so we
2380 # safe but slow: reverse iterate over the actions so we
2382 # don't clash on two commits having the same summary
2381 # don't clash on two commits having the same summary
2383 for na, l in reversed(list(newact.items())):
2382 for na, l in reversed(list(newact.items())):
2384 actx = repo[na.node]
2383 actx = repo[na.node]
2385 asum = _getsummary(actx)
2384 asum = _getsummary(actx)
2386 if asum == tsum:
2385 if asum == tsum:
2387 added = True
2386 added = True
2388 l.append(act)
2387 l.append(act)
2389 break
2388 break
2390
2389
2391 if not added:
2390 if not added:
2392 newact[act] = []
2391 newact[act] = []
2393
2392
2394 # copy over and flatten the new list
2393 # copy over and flatten the new list
2395 actions = []
2394 actions = []
2396 for na, l in newact.items():
2395 for na, l in newact.items():
2397 actions.append(na)
2396 actions.append(na)
2398 actions += l
2397 actions += l
2399
2398
2400 rules = b'\n'.join([act.torule() for act in actions])
2399 rules = b'\n'.join([act.torule() for act in actions])
2401 rules += b'\n\n'
2400 rules += b'\n\n'
2402 rules += editcomment
2401 rules += editcomment
2403 rules = ui.edit(
2402 rules = ui.edit(
2404 rules,
2403 rules,
2405 ui.username(),
2404 ui.username(),
2406 {b'prefix': b'histedit'},
2405 {b'prefix': b'histedit'},
2407 repopath=repo.path,
2406 repopath=repo.path,
2408 action=b'histedit',
2407 action=b'histedit',
2409 )
2408 )
2410
2409
2411 # Save edit rules in .hg/histedit-last-edit.txt in case
2410 # Save edit rules in .hg/histedit-last-edit.txt in case
2412 # the user needs to ask for help after something
2411 # the user needs to ask for help after something
2413 # surprising happens.
2412 # surprising happens.
2414 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2413 with repo.vfs(b'histedit-last-edit.txt', b'wb') as f:
2415 f.write(rules)
2414 f.write(rules)
2416
2415
2417 return rules
2416 return rules
2418
2417
2419
2418
2420 def parserules(rules, state):
2419 def parserules(rules, state):
2421 """Read the histedit rules string and return list of action objects"""
2420 """Read the histedit rules string and return list of action objects"""
2422 rules = [
2421 rules = [
2423 l
2422 l
2424 for l in (r.strip() for r in rules.splitlines())
2423 for l in (r.strip() for r in rules.splitlines())
2425 if l and not l.startswith(b'#')
2424 if l and not l.startswith(b'#')
2426 ]
2425 ]
2427 actions = []
2426 actions = []
2428 for r in rules:
2427 for r in rules:
2429 if b' ' not in r:
2428 if b' ' not in r:
2430 raise error.ParseError(_(b'malformed line "%s"') % r)
2429 raise error.ParseError(_(b'malformed line "%s"') % r)
2431 verb, rest = r.split(b' ', 1)
2430 verb, rest = r.split(b' ', 1)
2432
2431
2433 if verb not in actiontable:
2432 if verb not in actiontable:
2434 raise error.ParseError(_(b'unknown action "%s"') % verb)
2433 raise error.ParseError(_(b'unknown action "%s"') % verb)
2435
2434
2436 action = actiontable[verb].fromrule(state, rest)
2435 action = actiontable[verb].fromrule(state, rest)
2437 actions.append(action)
2436 actions.append(action)
2438 return actions
2437 return actions
2439
2438
2440
2439
2441 def warnverifyactions(ui, repo, actions, state, ctxs):
2440 def warnverifyactions(ui, repo, actions, state, ctxs):
2442 try:
2441 try:
2443 verifyactions(actions, state, ctxs)
2442 verifyactions(actions, state, ctxs)
2444 except error.ParseError:
2443 except error.ParseError:
2445 if repo.vfs.exists(b'histedit-last-edit.txt'):
2444 if repo.vfs.exists(b'histedit-last-edit.txt'):
2446 ui.warn(
2445 ui.warn(
2447 _(
2446 _(
2448 b'warning: histedit rules saved '
2447 b'warning: histedit rules saved '
2449 b'to: .hg/histedit-last-edit.txt\n'
2448 b'to: .hg/histedit-last-edit.txt\n'
2450 )
2449 )
2451 )
2450 )
2452 raise
2451 raise
2453
2452
2454
2453
2455 def verifyactions(actions, state, ctxs):
2454 def verifyactions(actions, state, ctxs):
2456 """Verify that there exists exactly one action per given changeset and
2455 """Verify that there exists exactly one action per given changeset and
2457 other constraints.
2456 other constraints.
2458
2457
2459 Will abort if there are to many or too few rules, a malformed rule,
2458 Will abort if there are to many or too few rules, a malformed rule,
2460 or a rule on a changeset outside of the user-given range.
2459 or a rule on a changeset outside of the user-given range.
2461 """
2460 """
2462 expected = {c.node() for c in ctxs}
2461 expected = {c.node() for c in ctxs}
2463 seen = set()
2462 seen = set()
2464 prev = None
2463 prev = None
2465
2464
2466 if actions and actions[0].verb in [b'roll', b'fold']:
2465 if actions and actions[0].verb in [b'roll', b'fold']:
2467 raise error.ParseError(
2466 raise error.ParseError(
2468 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2467 _(b'first changeset cannot use verb "%s"') % actions[0].verb
2469 )
2468 )
2470
2469
2471 for action in actions:
2470 for action in actions:
2472 action.verify(prev, expected, seen)
2471 action.verify(prev, expected, seen)
2473 prev = action
2472 prev = action
2474 if action.node is not None:
2473 if action.node is not None:
2475 seen.add(action.node)
2474 seen.add(action.node)
2476 missing = sorted(expected - seen) # sort to stabilize output
2475 missing = sorted(expected - seen) # sort to stabilize output
2477
2476
2478 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2477 if state.repo.ui.configbool(b'histedit', b'dropmissing'):
2479 if len(actions) == 0:
2478 if len(actions) == 0:
2480 raise error.ParseError(
2479 raise error.ParseError(
2481 _(b'no rules provided'),
2480 _(b'no rules provided'),
2482 hint=_(b'use strip extension to remove commits'),
2481 hint=_(b'use strip extension to remove commits'),
2483 )
2482 )
2484
2483
2485 drops = [drop(state, n) for n in missing]
2484 drops = [drop(state, n) for n in missing]
2486 # put the in the beginning so they execute immediately and
2485 # put the in the beginning so they execute immediately and
2487 # don't show in the edit-plan in the future
2486 # don't show in the edit-plan in the future
2488 actions[:0] = drops
2487 actions[:0] = drops
2489 elif missing:
2488 elif missing:
2490 raise error.ParseError(
2489 raise error.ParseError(
2491 _(b'missing rules for changeset %s') % short(missing[0]),
2490 _(b'missing rules for changeset %s') % short(missing[0]),
2492 hint=_(
2491 hint=_(
2493 b'use "drop %s" to discard, see also: '
2492 b'use "drop %s" to discard, see also: '
2494 b"'hg help -e histedit.config'"
2493 b"'hg help -e histedit.config'"
2495 )
2494 )
2496 % short(missing[0]),
2495 % short(missing[0]),
2497 )
2496 )
2498
2497
2499
2498
2500 def adjustreplacementsfrommarkers(repo, oldreplacements):
2499 def adjustreplacementsfrommarkers(repo, oldreplacements):
2501 """Adjust replacements from obsolescence markers
2500 """Adjust replacements from obsolescence markers
2502
2501
2503 Replacements structure is originally generated based on
2502 Replacements structure is originally generated based on
2504 histedit's state and does not account for changes that are
2503 histedit's state and does not account for changes that are
2505 not recorded there. This function fixes that by adding
2504 not recorded there. This function fixes that by adding
2506 data read from obsolescence markers"""
2505 data read from obsolescence markers"""
2507 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2506 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
2508 return oldreplacements
2507 return oldreplacements
2509
2508
2510 unfi = repo.unfiltered()
2509 unfi = repo.unfiltered()
2511 get_rev = unfi.changelog.index.get_rev
2510 get_rev = unfi.changelog.index.get_rev
2512 obsstore = repo.obsstore
2511 obsstore = repo.obsstore
2513 newreplacements = list(oldreplacements)
2512 newreplacements = list(oldreplacements)
2514 oldsuccs = [r[1] for r in oldreplacements]
2513 oldsuccs = [r[1] for r in oldreplacements]
2515 # successors that have already been added to succstocheck once
2514 # successors that have already been added to succstocheck once
2516 seensuccs = set().union(
2515 seensuccs = set().union(
2517 *oldsuccs
2516 *oldsuccs
2518 ) # create a set from an iterable of tuples
2517 ) # create a set from an iterable of tuples
2519 succstocheck = list(seensuccs)
2518 succstocheck = list(seensuccs)
2520 while succstocheck:
2519 while succstocheck:
2521 n = succstocheck.pop()
2520 n = succstocheck.pop()
2522 missing = get_rev(n) is None
2521 missing = get_rev(n) is None
2523 markers = obsstore.successors.get(n, ())
2522 markers = obsstore.successors.get(n, ())
2524 if missing and not markers:
2523 if missing and not markers:
2525 # dead end, mark it as such
2524 # dead end, mark it as such
2526 newreplacements.append((n, ()))
2525 newreplacements.append((n, ()))
2527 for marker in markers:
2526 for marker in markers:
2528 nsuccs = marker[1]
2527 nsuccs = marker[1]
2529 newreplacements.append((n, nsuccs))
2528 newreplacements.append((n, nsuccs))
2530 for nsucc in nsuccs:
2529 for nsucc in nsuccs:
2531 if nsucc not in seensuccs:
2530 if nsucc not in seensuccs:
2532 seensuccs.add(nsucc)
2531 seensuccs.add(nsucc)
2533 succstocheck.append(nsucc)
2532 succstocheck.append(nsucc)
2534
2533
2535 return newreplacements
2534 return newreplacements
2536
2535
2537
2536
2538 def processreplacement(state):
2537 def processreplacement(state):
2539 """process the list of replacements to return
2538 """process the list of replacements to return
2540
2539
2541 1) the final mapping between original and created nodes
2540 1) the final mapping between original and created nodes
2542 2) the list of temporary node created by histedit
2541 2) the list of temporary node created by histedit
2543 3) the list of new commit created by histedit"""
2542 3) the list of new commit created by histedit"""
2544 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2543 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
2545 allsuccs = set()
2544 allsuccs = set()
2546 replaced = set()
2545 replaced = set()
2547 fullmapping = {}
2546 fullmapping = {}
2548 # initialize basic set
2547 # initialize basic set
2549 # fullmapping records all operations recorded in replacement
2548 # fullmapping records all operations recorded in replacement
2550 for rep in replacements:
2549 for rep in replacements:
2551 allsuccs.update(rep[1])
2550 allsuccs.update(rep[1])
2552 replaced.add(rep[0])
2551 replaced.add(rep[0])
2553 fullmapping.setdefault(rep[0], set()).update(rep[1])
2552 fullmapping.setdefault(rep[0], set()).update(rep[1])
2554 new = allsuccs - replaced
2553 new = allsuccs - replaced
2555 tmpnodes = allsuccs & replaced
2554 tmpnodes = allsuccs & replaced
2556 # Reduce content fullmapping into direct relation between original nodes
2555 # Reduce content fullmapping into direct relation between original nodes
2557 # and final node created during history edition
2556 # and final node created during history edition
2558 # Dropped changeset are replaced by an empty list
2557 # Dropped changeset are replaced by an empty list
2559 toproceed = set(fullmapping)
2558 toproceed = set(fullmapping)
2560 final = {}
2559 final = {}
2561 while toproceed:
2560 while toproceed:
2562 for x in list(toproceed):
2561 for x in list(toproceed):
2563 succs = fullmapping[x]
2562 succs = fullmapping[x]
2564 for s in list(succs):
2563 for s in list(succs):
2565 if s in toproceed:
2564 if s in toproceed:
2566 # non final node with unknown closure
2565 # non final node with unknown closure
2567 # We can't process this now
2566 # We can't process this now
2568 break
2567 break
2569 elif s in final:
2568 elif s in final:
2570 # non final node, replace with closure
2569 # non final node, replace with closure
2571 succs.remove(s)
2570 succs.remove(s)
2572 succs.update(final[s])
2571 succs.update(final[s])
2573 else:
2572 else:
2574 final[x] = succs
2573 final[x] = succs
2575 toproceed.remove(x)
2574 toproceed.remove(x)
2576 # remove tmpnodes from final mapping
2575 # remove tmpnodes from final mapping
2577 for n in tmpnodes:
2576 for n in tmpnodes:
2578 del final[n]
2577 del final[n]
2579 # we expect all changes involved in final to exist in the repo
2578 # we expect all changes involved in final to exist in the repo
2580 # turn `final` into list (topologically sorted)
2579 # turn `final` into list (topologically sorted)
2581 get_rev = state.repo.changelog.index.get_rev
2580 get_rev = state.repo.changelog.index.get_rev
2582 for prec, succs in final.items():
2581 for prec, succs in final.items():
2583 final[prec] = sorted(succs, key=get_rev)
2582 final[prec] = sorted(succs, key=get_rev)
2584
2583
2585 # computed topmost element (necessary for bookmark)
2584 # computed topmost element (necessary for bookmark)
2586 if new:
2585 if new:
2587 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2586 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
2588 elif not final:
2587 elif not final:
2589 # Nothing rewritten at all. we won't need `newtopmost`
2588 # Nothing rewritten at all. we won't need `newtopmost`
2590 # It is the same as `oldtopmost` and `processreplacement` know it
2589 # It is the same as `oldtopmost` and `processreplacement` know it
2591 newtopmost = None
2590 newtopmost = None
2592 else:
2591 else:
2593 # every body died. The newtopmost is the parent of the root.
2592 # every body died. The newtopmost is the parent of the root.
2594 r = state.repo.changelog.rev
2593 r = state.repo.changelog.rev
2595 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2594 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
2596
2595
2597 return final, tmpnodes, new, newtopmost
2596 return final, tmpnodes, new, newtopmost
2598
2597
2599
2598
2600 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2599 def movetopmostbookmarks(repo, oldtopmost, newtopmost):
2601 """Move bookmark from oldtopmost to newly created topmost
2600 """Move bookmark from oldtopmost to newly created topmost
2602
2601
2603 This is arguably a feature and we may only want that for the active
2602 This is arguably a feature and we may only want that for the active
2604 bookmark. But the behavior is kept compatible with the old version for now.
2603 bookmark. But the behavior is kept compatible with the old version for now.
2605 """
2604 """
2606 if not oldtopmost or not newtopmost:
2605 if not oldtopmost or not newtopmost:
2607 return
2606 return
2608 oldbmarks = repo.nodebookmarks(oldtopmost)
2607 oldbmarks = repo.nodebookmarks(oldtopmost)
2609 if oldbmarks:
2608 if oldbmarks:
2610 with repo.lock(), repo.transaction(b'histedit') as tr:
2609 with repo.lock(), repo.transaction(b'histedit') as tr:
2611 marks = repo._bookmarks
2610 marks = repo._bookmarks
2612 changes = []
2611 changes = []
2613 for name in oldbmarks:
2612 for name in oldbmarks:
2614 changes.append((name, newtopmost))
2613 changes.append((name, newtopmost))
2615 marks.applychanges(repo, tr, changes)
2614 marks.applychanges(repo, tr, changes)
2616
2615
2617
2616
2618 def cleanupnode(ui, repo, nodes, nobackup=False):
2617 def cleanupnode(ui, repo, nodes, nobackup=False):
2619 """strip a group of nodes from the repository
2618 """strip a group of nodes from the repository
2620
2619
2621 The set of node to strip may contains unknown nodes."""
2620 The set of node to strip may contains unknown nodes."""
2622 with repo.lock():
2621 with repo.lock():
2623 # do not let filtering get in the way of the cleanse
2622 # do not let filtering get in the way of the cleanse
2624 # we should probably get rid of obsolescence marker created during the
2623 # we should probably get rid of obsolescence marker created during the
2625 # histedit, but we currently do not have such information.
2624 # histedit, but we currently do not have such information.
2626 repo = repo.unfiltered()
2625 repo = repo.unfiltered()
2627 # Find all nodes that need to be stripped
2626 # Find all nodes that need to be stripped
2628 # (we use %lr instead of %ln to silently ignore unknown items)
2627 # (we use %lr instead of %ln to silently ignore unknown items)
2629 has_node = repo.changelog.index.has_node
2628 has_node = repo.changelog.index.has_node
2630 nodes = sorted(n for n in nodes if has_node(n))
2629 nodes = sorted(n for n in nodes if has_node(n))
2631 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2630 roots = [c.node() for c in repo.set(b"roots(%ln)", nodes)]
2632 if roots:
2631 if roots:
2633 backup = not nobackup
2632 backup = not nobackup
2634 repair.strip(ui, repo, roots, backup=backup)
2633 repair.strip(ui, repo, roots, backup=backup)
2635
2634
2636
2635
2637 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2636 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
2638 if isinstance(nodelist, bytes):
2637 if isinstance(nodelist, bytes):
2639 nodelist = [nodelist]
2638 nodelist = [nodelist]
2640 state = histeditstate(repo)
2639 state = histeditstate(repo)
2641 if state.inprogress():
2640 if state.inprogress():
2642 state.read()
2641 state.read()
2643 histedit_nodes = {
2642 histedit_nodes = {
2644 action.node for action in state.actions if action.node
2643 action.node for action in state.actions if action.node
2645 }
2644 }
2646 common_nodes = histedit_nodes & set(nodelist)
2645 common_nodes = histedit_nodes & set(nodelist)
2647 if common_nodes:
2646 if common_nodes:
2648 raise error.Abort(
2647 raise error.Abort(
2649 _(b"histedit in progress, can't strip %s")
2648 _(b"histedit in progress, can't strip %s")
2650 % b', '.join(short(x) for x in common_nodes)
2649 % b', '.join(short(x) for x in common_nodes)
2651 )
2650 )
2652 return orig(ui, repo, nodelist, *args, **kwargs)
2651 return orig(ui, repo, nodelist, *args, **kwargs)
2653
2652
2654
2653
2655 extensions.wrapfunction(repair, 'strip', stripwrapper)
2654 extensions.wrapfunction(repair, 'strip', stripwrapper)
2656
2655
2657
2656
2658 def summaryhook(ui, repo):
2657 def summaryhook(ui, repo):
2659 state = histeditstate(repo)
2658 state = histeditstate(repo)
2660 if not state.inprogress():
2659 if not state.inprogress():
2661 return
2660 return
2662 state.read()
2661 state.read()
2663 if state.actions:
2662 if state.actions:
2664 # i18n: column positioning for "hg summary"
2663 # i18n: column positioning for "hg summary"
2665 ui.write(
2664 ui.write(
2666 _(b'hist: %s (histedit --continue)\n')
2665 _(b'hist: %s (histedit --continue)\n')
2667 % (
2666 % (
2668 ui.label(_(b'%d remaining'), b'histedit.remaining')
2667 ui.label(_(b'%d remaining'), b'histedit.remaining')
2669 % len(state.actions)
2668 % len(state.actions)
2670 )
2669 )
2671 )
2670 )
2672
2671
2673
2672
2674 def extsetup(ui):
2673 def extsetup(ui):
2675 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2674 cmdutil.summaryhooks.add(b'histedit', summaryhook)
2676 statemod.addunfinished(
2675 statemod.addunfinished(
2677 b'histedit',
2676 b'histedit',
2678 fname=b'histedit-state',
2677 fname=b'histedit-state',
2679 allowcommit=True,
2678 allowcommit=True,
2680 continueflag=True,
2679 continueflag=True,
2681 abortfunc=hgaborthistedit,
2680 abortfunc=hgaborthistedit,
2682 )
2681 )
@@ -1,894 +1,893 b''
1 # keyword.py - $Keyword$ expansion for Mercurial
1 # keyword.py - $Keyword$ expansion for Mercurial
2 #
2 #
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 #
7 #
8 # $Id$
8 # $Id$
9 #
9 #
10 # Keyword expansion hack against the grain of a Distributed SCM
10 # Keyword expansion hack against the grain of a Distributed SCM
11 #
11 #
12 # There are many good reasons why this is not needed in a distributed
12 # There are many good reasons why this is not needed in a distributed
13 # SCM, still it may be useful in very small projects based on single
13 # SCM, still it may be useful in very small projects based on single
14 # files (like LaTeX packages), that are mostly addressed to an
14 # files (like LaTeX packages), that are mostly addressed to an
15 # audience not running a version control system.
15 # audience not running a version control system.
16 #
16 #
17 # For in-depth discussion refer to
17 # For in-depth discussion refer to
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 #
19 #
20 # Keyword expansion is based on Mercurial's changeset template mappings.
20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 #
21 #
22 # Binary files are not touched.
22 # Binary files are not touched.
23 #
23 #
24 # Files to act upon/ignore are specified in the [keyword] section.
24 # Files to act upon/ignore are specified in the [keyword] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
25 # Customized keyword template mappings in the [keywordmaps] section.
26 #
26 #
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
27 # Run 'hg help keyword' and 'hg kwdemo' to get info on configuration.
28
28
29 '''expand keywords in tracked files
29 '''expand keywords in tracked files
30
30
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 tracked text files selected by your configuration.
32 tracked text files selected by your configuration.
33
33
34 Keywords are only expanded in local repositories and not stored in the
34 Keywords are only expanded in local repositories and not stored in the
35 change history. The mechanism can be regarded as a convenience for the
35 change history. The mechanism can be regarded as a convenience for the
36 current user or for archive distribution.
36 current user or for archive distribution.
37
37
38 Keywords expand to the changeset data pertaining to the latest change
38 Keywords expand to the changeset data pertaining to the latest change
39 relative to the working directory parent of each file.
39 relative to the working directory parent of each file.
40
40
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 sections of hgrc files.
42 sections of hgrc files.
43
43
44 Example::
44 Example::
45
45
46 [keyword]
46 [keyword]
47 # expand keywords in every python file except those matching "x*"
47 # expand keywords in every python file except those matching "x*"
48 **.py =
48 **.py =
49 x* = ignore
49 x* = ignore
50
50
51 [keywordset]
51 [keywordset]
52 # prefer svn- over cvs-like default keywordmaps
52 # prefer svn- over cvs-like default keywordmaps
53 svn = True
53 svn = True
54
54
55 .. note::
55 .. note::
56
56
57 The more specific you are in your filename patterns the less you
57 The more specific you are in your filename patterns the less you
58 lose speed in huge repositories.
58 lose speed in huge repositories.
59
59
60 For [keywordmaps] template mapping and expansion demonstration and
60 For [keywordmaps] template mapping and expansion demonstration and
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 available templates and filters.
62 available templates and filters.
63
63
64 Three additional date template filters are provided:
64 Three additional date template filters are provided:
65
65
66 :``utcdate``: "2006/09/18 15:13:13"
66 :``utcdate``: "2006/09/18 15:13:13"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69
69
70 The default template mappings (view with :hg:`kwdemo -d`) can be
70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 replaced with customized keywords and templates. Again, run
71 replaced with customized keywords and templates. Again, run
72 :hg:`kwdemo` to control the results of your configuration changes.
72 :hg:`kwdemo` to control the results of your configuration changes.
73
73
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 to avoid storing expanded keywords in the change history.
75 to avoid storing expanded keywords in the change history.
76
76
77 To force expansion after enabling it, or a configuration change, run
77 To force expansion after enabling it, or a configuration change, run
78 :hg:`kwexpand`.
78 :hg:`kwexpand`.
79
79
80 Expansions spanning more than one line and incremental expansions,
80 Expansions spanning more than one line and incremental expansions,
81 like CVS' $Log$, are not supported. A keyword template map "Log =
81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 {desc}" expands to the first line of the changeset description.
82 {desc}" expands to the first line of the changeset description.
83 '''
83 '''
84
84
85
85
86 import os
86 import os
87 import re
87 import re
88 import weakref
88 import weakref
89
89
90 from mercurial.i18n import _
90 from mercurial.i18n import _
91 from mercurial.pycompat import getattr
92 from mercurial.hgweb import webcommands
91 from mercurial.hgweb import webcommands
93
92
94 from mercurial import (
93 from mercurial import (
95 cmdutil,
94 cmdutil,
96 context,
95 context,
97 dispatch,
96 dispatch,
98 error,
97 error,
99 extensions,
98 extensions,
100 filelog,
99 filelog,
101 localrepo,
100 localrepo,
102 logcmdutil,
101 logcmdutil,
103 match,
102 match,
104 patch,
103 patch,
105 pathutil,
104 pathutil,
106 pycompat,
105 pycompat,
107 registrar,
106 registrar,
108 scmutil,
107 scmutil,
109 templatefilters,
108 templatefilters,
110 templateutil,
109 templateutil,
111 util,
110 util,
112 )
111 )
113 from mercurial.utils import (
112 from mercurial.utils import (
114 dateutil,
113 dateutil,
115 stringutil,
114 stringutil,
116 )
115 )
117 from mercurial.dirstateutils import timestamp
116 from mercurial.dirstateutils import timestamp
118
117
119 cmdtable = {}
118 cmdtable = {}
120 command = registrar.command(cmdtable)
119 command = registrar.command(cmdtable)
121 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
120 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
122 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
121 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
123 # be specifying the version(s) of Mercurial they are tested with, or
122 # be specifying the version(s) of Mercurial they are tested with, or
124 # leave the attribute unspecified.
123 # leave the attribute unspecified.
125 testedwith = b'ships-with-hg-core'
124 testedwith = b'ships-with-hg-core'
126
125
127 # hg commands that do not act on keywords
126 # hg commands that do not act on keywords
128 nokwcommands = (
127 nokwcommands = (
129 b'add addremove annotate bundle export grep incoming init log'
128 b'add addremove annotate bundle export grep incoming init log'
130 b' outgoing push tip verify convert email glog'
129 b' outgoing push tip verify convert email glog'
131 )
130 )
132
131
133 # webcommands that do not act on keywords
132 # webcommands that do not act on keywords
134 nokwwebcommands = 'annotate changeset rev filediff diff comparison'
133 nokwwebcommands = 'annotate changeset rev filediff diff comparison'
135
134
136 # hg commands that trigger expansion only when writing to working dir,
135 # hg commands that trigger expansion only when writing to working dir,
137 # not when reading filelog, and unexpand when reading from working dir
136 # not when reading filelog, and unexpand when reading from working dir
138 restricted = (
137 restricted = (
139 b'merge kwexpand kwshrink record qrecord resolve transplant'
138 b'merge kwexpand kwshrink record qrecord resolve transplant'
140 b' unshelve rebase graft backout histedit fetch'
139 b' unshelve rebase graft backout histedit fetch'
141 )
140 )
142
141
143 # names of extensions using dorecord
142 # names of extensions using dorecord
144 recordextensions = b'record'
143 recordextensions = b'record'
145
144
146 colortable = {
145 colortable = {
147 b'kwfiles.enabled': b'green bold',
146 b'kwfiles.enabled': b'green bold',
148 b'kwfiles.deleted': b'cyan bold underline',
147 b'kwfiles.deleted': b'cyan bold underline',
149 b'kwfiles.enabledunknown': b'green',
148 b'kwfiles.enabledunknown': b'green',
150 b'kwfiles.ignored': b'bold',
149 b'kwfiles.ignored': b'bold',
151 b'kwfiles.ignoredunknown': b'none',
150 b'kwfiles.ignoredunknown': b'none',
152 }
151 }
153
152
154 templatefilter = registrar.templatefilter()
153 templatefilter = registrar.templatefilter()
155
154
156 configtable = {}
155 configtable = {}
157 configitem = registrar.configitem(configtable)
156 configitem = registrar.configitem(configtable)
158
157
159 configitem(
158 configitem(
160 b'keywordset',
159 b'keywordset',
161 b'svn',
160 b'svn',
162 default=False,
161 default=False,
163 )
162 )
164 # date like in cvs' $Date
163 # date like in cvs' $Date
165 @templatefilter(b'utcdate', intype=templateutil.date)
164 @templatefilter(b'utcdate', intype=templateutil.date)
166 def utcdate(date):
165 def utcdate(date):
167 """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13"."""
166 """Date. Returns a UTC-date in this format: "2009/08/18 11:00:13"."""
168 dateformat = b'%Y/%m/%d %H:%M:%S'
167 dateformat = b'%Y/%m/%d %H:%M:%S'
169 return dateutil.datestr((date[0], 0), dateformat)
168 return dateutil.datestr((date[0], 0), dateformat)
170
169
171
170
172 # date like in svn's $Date
171 # date like in svn's $Date
173 @templatefilter(b'svnisodate', intype=templateutil.date)
172 @templatefilter(b'svnisodate', intype=templateutil.date)
174 def svnisodate(date):
173 def svnisodate(date):
175 """Date. Returns a date in this format: "2009-08-18 13:00:13
174 """Date. Returns a date in this format: "2009-08-18 13:00:13
176 +0200 (Tue, 18 Aug 2009)".
175 +0200 (Tue, 18 Aug 2009)".
177 """
176 """
178 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
177 return dateutil.datestr(date, b'%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
179
178
180
179
181 # date like in svn's $Id
180 # date like in svn's $Id
182 @templatefilter(b'svnutcdate', intype=templateutil.date)
181 @templatefilter(b'svnutcdate', intype=templateutil.date)
183 def svnutcdate(date):
182 def svnutcdate(date):
184 """Date. Returns a UTC-date in this format: "2009-08-18
183 """Date. Returns a UTC-date in this format: "2009-08-18
185 11:00:13Z".
184 11:00:13Z".
186 """
185 """
187 dateformat = b'%Y-%m-%d %H:%M:%SZ'
186 dateformat = b'%Y-%m-%d %H:%M:%SZ'
188 return dateutil.datestr((date[0], 0), dateformat)
187 return dateutil.datestr((date[0], 0), dateformat)
189
188
190
189
191 # make keyword tools accessible
190 # make keyword tools accessible
192 kwtools = {b'hgcmd': b''}
191 kwtools = {b'hgcmd': b''}
193
192
194
193
195 def _defaultkwmaps(ui):
194 def _defaultkwmaps(ui):
196 '''Returns default keywordmaps according to keywordset configuration.'''
195 '''Returns default keywordmaps according to keywordset configuration.'''
197 templates = {
196 templates = {
198 b'Revision': b'{node|short}',
197 b'Revision': b'{node|short}',
199 b'Author': b'{author|user}',
198 b'Author': b'{author|user}',
200 }
199 }
201 kwsets = (
200 kwsets = (
202 {
201 {
203 b'Date': b'{date|utcdate}',
202 b'Date': b'{date|utcdate}',
204 b'RCSfile': b'{file|basename},v',
203 b'RCSfile': b'{file|basename},v',
205 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
204 b'RCSFile': b'{file|basename},v', # kept for backwards compatibility
206 # with hg-keyword
205 # with hg-keyword
207 b'Source': b'{root}/{file},v',
206 b'Source': b'{root}/{file},v',
208 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
207 b'Id': b'{file|basename},v {node|short} {date|utcdate} {author|user}',
209 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
208 b'Header': b'{root}/{file},v {node|short} {date|utcdate} {author|user}',
210 },
209 },
211 {
210 {
212 b'Date': b'{date|svnisodate}',
211 b'Date': b'{date|svnisodate}',
213 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
212 b'Id': b'{file|basename},v {node|short} {date|svnutcdate} {author|user}',
214 b'LastChangedRevision': b'{node|short}',
213 b'LastChangedRevision': b'{node|short}',
215 b'LastChangedBy': b'{author|user}',
214 b'LastChangedBy': b'{author|user}',
216 b'LastChangedDate': b'{date|svnisodate}',
215 b'LastChangedDate': b'{date|svnisodate}',
217 },
216 },
218 )
217 )
219 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
218 templates.update(kwsets[ui.configbool(b'keywordset', b'svn')])
220 return templates
219 return templates
221
220
222
221
223 def _shrinktext(text, subfunc):
222 def _shrinktext(text, subfunc):
224 """Helper for keyword expansion removal in text.
223 """Helper for keyword expansion removal in text.
225 Depending on subfunc also returns number of substitutions."""
224 Depending on subfunc also returns number of substitutions."""
226 return subfunc(br'$\1$', text)
225 return subfunc(br'$\1$', text)
227
226
228
227
229 def _preselect(wstatus, changed):
228 def _preselect(wstatus, changed):
230 """Retrieves modified and added files from a working directory state
229 """Retrieves modified and added files from a working directory state
231 and returns the subset of each contained in given changed files
230 and returns the subset of each contained in given changed files
232 retrieved from a change context."""
231 retrieved from a change context."""
233 modified = [f for f in wstatus.modified if f in changed]
232 modified = [f for f in wstatus.modified if f in changed]
234 added = [f for f in wstatus.added if f in changed]
233 added = [f for f in wstatus.added if f in changed]
235 return modified, added
234 return modified, added
236
235
237
236
238 class kwtemplater:
237 class kwtemplater:
239 """
238 """
240 Sets up keyword templates, corresponding keyword regex, and
239 Sets up keyword templates, corresponding keyword regex, and
241 provides keyword substitution functions.
240 provides keyword substitution functions.
242 """
241 """
243
242
244 def __init__(self, ui, repo, inc, exc):
243 def __init__(self, ui, repo, inc, exc):
245 self.ui = ui
244 self.ui = ui
246 self._repo = weakref.ref(repo)
245 self._repo = weakref.ref(repo)
247 self.match = match.match(repo.root, b'', [], inc, exc)
246 self.match = match.match(repo.root, b'', [], inc, exc)
248 self.restrict = kwtools[b'hgcmd'] in restricted.split()
247 self.restrict = kwtools[b'hgcmd'] in restricted.split()
249 self.postcommit = False
248 self.postcommit = False
250
249
251 kwmaps = self.ui.configitems(b'keywordmaps')
250 kwmaps = self.ui.configitems(b'keywordmaps')
252 if kwmaps: # override default templates
251 if kwmaps: # override default templates
253 self.templates = dict(kwmaps)
252 self.templates = dict(kwmaps)
254 else:
253 else:
255 self.templates = _defaultkwmaps(self.ui)
254 self.templates = _defaultkwmaps(self.ui)
256
255
257 @property
256 @property
258 def repo(self):
257 def repo(self):
259 return self._repo()
258 return self._repo()
260
259
261 @util.propertycache
260 @util.propertycache
262 def escape(self):
261 def escape(self):
263 '''Returns bar-separated and escaped keywords.'''
262 '''Returns bar-separated and escaped keywords.'''
264 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
263 return b'|'.join(map(stringutil.reescape, self.templates.keys()))
265
264
266 @util.propertycache
265 @util.propertycache
267 def rekw(self):
266 def rekw(self):
268 '''Returns regex for unexpanded keywords.'''
267 '''Returns regex for unexpanded keywords.'''
269 return re.compile(br'\$(%s)\$' % self.escape)
268 return re.compile(br'\$(%s)\$' % self.escape)
270
269
271 @util.propertycache
270 @util.propertycache
272 def rekwexp(self):
271 def rekwexp(self):
273 '''Returns regex for expanded keywords.'''
272 '''Returns regex for expanded keywords.'''
274 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
273 return re.compile(br'\$(%s): [^$\n\r]*? \$' % self.escape)
275
274
276 def substitute(self, data, path, ctx, subfunc):
275 def substitute(self, data, path, ctx, subfunc):
277 '''Replaces keywords in data with expanded template.'''
276 '''Replaces keywords in data with expanded template.'''
278
277
279 def kwsub(mobj):
278 def kwsub(mobj):
280 kw = mobj.group(1)
279 kw = mobj.group(1)
281 ct = logcmdutil.maketemplater(
280 ct = logcmdutil.maketemplater(
282 self.ui, self.repo, self.templates[kw]
281 self.ui, self.repo, self.templates[kw]
283 )
282 )
284 self.ui.pushbuffer()
283 self.ui.pushbuffer()
285 ct.show(ctx, root=self.repo.root, file=path)
284 ct.show(ctx, root=self.repo.root, file=path)
286 ekw = templatefilters.firstline(self.ui.popbuffer())
285 ekw = templatefilters.firstline(self.ui.popbuffer())
287 return b'$%s: %s $' % (kw, ekw)
286 return b'$%s: %s $' % (kw, ekw)
288
287
289 return subfunc(kwsub, data)
288 return subfunc(kwsub, data)
290
289
291 def linkctx(self, path, fileid):
290 def linkctx(self, path, fileid):
292 '''Similar to filelog.linkrev, but returns a changectx.'''
291 '''Similar to filelog.linkrev, but returns a changectx.'''
293 return self.repo.filectx(path, fileid=fileid).changectx()
292 return self.repo.filectx(path, fileid=fileid).changectx()
294
293
295 def expand(self, path, node, data):
294 def expand(self, path, node, data):
296 '''Returns data with keywords expanded.'''
295 '''Returns data with keywords expanded.'''
297 if (
296 if (
298 not self.restrict
297 not self.restrict
299 and self.match(path)
298 and self.match(path)
300 and not stringutil.binary(data)
299 and not stringutil.binary(data)
301 ):
300 ):
302 ctx = self.linkctx(path, node)
301 ctx = self.linkctx(path, node)
303 return self.substitute(data, path, ctx, self.rekw.sub)
302 return self.substitute(data, path, ctx, self.rekw.sub)
304 return data
303 return data
305
304
306 def iskwfile(self, cand, ctx):
305 def iskwfile(self, cand, ctx):
307 """Returns subset of candidates which are configured for keyword
306 """Returns subset of candidates which are configured for keyword
308 expansion but are not symbolic links."""
307 expansion but are not symbolic links."""
309 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
308 return [f for f in cand if self.match(f) and b'l' not in ctx.flags(f)]
310
309
311 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
310 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
312 '''Overwrites selected files expanding/shrinking keywords.'''
311 '''Overwrites selected files expanding/shrinking keywords.'''
313 if self.restrict or lookup or self.postcommit: # exclude kw_copy
312 if self.restrict or lookup or self.postcommit: # exclude kw_copy
314 candidates = self.iskwfile(candidates, ctx)
313 candidates = self.iskwfile(candidates, ctx)
315 if not candidates:
314 if not candidates:
316 return
315 return
317 kwcmd = self.restrict and lookup # kwexpand/kwshrink
316 kwcmd = self.restrict and lookup # kwexpand/kwshrink
318 if self.restrict or expand and lookup:
317 if self.restrict or expand and lookup:
319 mf = ctx.manifest()
318 mf = ctx.manifest()
320 if self.restrict or rekw:
319 if self.restrict or rekw:
321 re_kw = self.rekw
320 re_kw = self.rekw
322 else:
321 else:
323 re_kw = self.rekwexp
322 re_kw = self.rekwexp
324 if expand:
323 if expand:
325 msg = _(b'overwriting %s expanding keywords\n')
324 msg = _(b'overwriting %s expanding keywords\n')
326 else:
325 else:
327 msg = _(b'overwriting %s shrinking keywords\n')
326 msg = _(b'overwriting %s shrinking keywords\n')
328 wctx = self.repo[None]
327 wctx = self.repo[None]
329 for f in candidates:
328 for f in candidates:
330 if self.restrict:
329 if self.restrict:
331 data = self.repo.file(f).read(mf[f])
330 data = self.repo.file(f).read(mf[f])
332 else:
331 else:
333 data = self.repo.wread(f)
332 data = self.repo.wread(f)
334 if stringutil.binary(data):
333 if stringutil.binary(data):
335 continue
334 continue
336 if expand:
335 if expand:
337 parents = ctx.parents()
336 parents = ctx.parents()
338 if lookup:
337 if lookup:
339 ctx = self.linkctx(f, mf[f])
338 ctx = self.linkctx(f, mf[f])
340 elif self.restrict and len(parents) > 1:
339 elif self.restrict and len(parents) > 1:
341 # merge commit
340 # merge commit
342 # in case of conflict f is in modified state during
341 # in case of conflict f is in modified state during
343 # merge, even if f does not differ from f in parent
342 # merge, even if f does not differ from f in parent
344 for p in parents:
343 for p in parents:
345 if f in p and not p[f].cmp(ctx[f]):
344 if f in p and not p[f].cmp(ctx[f]):
346 ctx = p[f].changectx()
345 ctx = p[f].changectx()
347 break
346 break
348 data, found = self.substitute(data, f, ctx, re_kw.subn)
347 data, found = self.substitute(data, f, ctx, re_kw.subn)
349 elif self.restrict:
348 elif self.restrict:
350 found = re_kw.search(data)
349 found = re_kw.search(data)
351 else:
350 else:
352 data, found = _shrinktext(data, re_kw.subn)
351 data, found = _shrinktext(data, re_kw.subn)
353 if found:
352 if found:
354 self.ui.note(msg % f)
353 self.ui.note(msg % f)
355 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
354 fp = self.repo.wvfs(f, b"wb", atomictemp=True)
356 fp.write(data)
355 fp.write(data)
357 fp.close()
356 fp.close()
358 if kwcmd:
357 if kwcmd:
359 s = wctx[f].lstat()
358 s = wctx[f].lstat()
360 mode = s.st_mode
359 mode = s.st_mode
361 size = s.st_size
360 size = s.st_size
362 mtime = timestamp.mtime_of(s)
361 mtime = timestamp.mtime_of(s)
363 cache_data = (mode, size, mtime)
362 cache_data = (mode, size, mtime)
364 self.repo.dirstate.set_clean(f, cache_data)
363 self.repo.dirstate.set_clean(f, cache_data)
365 elif self.postcommit:
364 elif self.postcommit:
366 self.repo.dirstate.update_file_p1(f, p1_tracked=True)
365 self.repo.dirstate.update_file_p1(f, p1_tracked=True)
367
366
368 def shrink(self, fname, text):
367 def shrink(self, fname, text):
369 '''Returns text with all keyword substitutions removed.'''
368 '''Returns text with all keyword substitutions removed.'''
370 if self.match(fname) and not stringutil.binary(text):
369 if self.match(fname) and not stringutil.binary(text):
371 return _shrinktext(text, self.rekwexp.sub)
370 return _shrinktext(text, self.rekwexp.sub)
372 return text
371 return text
373
372
374 def shrinklines(self, fname, lines):
373 def shrinklines(self, fname, lines):
375 '''Returns lines with keyword substitutions removed.'''
374 '''Returns lines with keyword substitutions removed.'''
376 if self.match(fname):
375 if self.match(fname):
377 text = b''.join(lines)
376 text = b''.join(lines)
378 if not stringutil.binary(text):
377 if not stringutil.binary(text):
379 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
378 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
380 return lines
379 return lines
381
380
382 def wread(self, fname, data):
381 def wread(self, fname, data):
383 """If in restricted mode returns data read from wdir with
382 """If in restricted mode returns data read from wdir with
384 keyword substitutions removed."""
383 keyword substitutions removed."""
385 if self.restrict:
384 if self.restrict:
386 return self.shrink(fname, data)
385 return self.shrink(fname, data)
387 return data
386 return data
388
387
389
388
390 class kwfilelog(filelog.filelog):
389 class kwfilelog(filelog.filelog):
391 """
390 """
392 Subclass of filelog to hook into its read, add, cmp methods.
391 Subclass of filelog to hook into its read, add, cmp methods.
393 Keywords are "stored" unexpanded, and processed on reading.
392 Keywords are "stored" unexpanded, and processed on reading.
394 """
393 """
395
394
396 def __init__(self, opener, kwt, path):
395 def __init__(self, opener, kwt, path):
397 super(kwfilelog, self).__init__(opener, path)
396 super(kwfilelog, self).__init__(opener, path)
398 self.kwt = kwt
397 self.kwt = kwt
399 self.path = path
398 self.path = path
400
399
401 def read(self, node):
400 def read(self, node):
402 '''Expands keywords when reading filelog.'''
401 '''Expands keywords when reading filelog.'''
403 data = super(kwfilelog, self).read(node)
402 data = super(kwfilelog, self).read(node)
404 if self.renamed(node):
403 if self.renamed(node):
405 return data
404 return data
406 return self.kwt.expand(self.path, node, data)
405 return self.kwt.expand(self.path, node, data)
407
406
408 def add(self, text, meta, tr, link, p1=None, p2=None):
407 def add(self, text, meta, tr, link, p1=None, p2=None):
409 '''Removes keyword substitutions when adding to filelog.'''
408 '''Removes keyword substitutions when adding to filelog.'''
410 text = self.kwt.shrink(self.path, text)
409 text = self.kwt.shrink(self.path, text)
411 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
410 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
412
411
413 def cmp(self, node, text):
412 def cmp(self, node, text):
414 '''Removes keyword substitutions for comparison.'''
413 '''Removes keyword substitutions for comparison.'''
415 text = self.kwt.shrink(self.path, text)
414 text = self.kwt.shrink(self.path, text)
416 return super(kwfilelog, self).cmp(node, text)
415 return super(kwfilelog, self).cmp(node, text)
417
416
418
417
419 def _status(ui, repo, wctx, kwt, *pats, **opts):
418 def _status(ui, repo, wctx, kwt, *pats, **opts):
420 """Bails out if [keyword] configuration is not active.
419 """Bails out if [keyword] configuration is not active.
421 Returns status of working directory."""
420 Returns status of working directory."""
422 if kwt:
421 if kwt:
423 return repo.status(
422 return repo.status(
424 match=scmutil.match(wctx, pats, pycompat.byteskwargs(opts)),
423 match=scmutil.match(wctx, pats, pycompat.byteskwargs(opts)),
425 clean=True,
424 clean=True,
426 unknown=opts.get('unknown') or opts.get('all'),
425 unknown=opts.get('unknown') or opts.get('all'),
427 )
426 )
428 if ui.configitems(b'keyword'):
427 if ui.configitems(b'keyword'):
429 raise error.Abort(_(b'[keyword] patterns cannot match'))
428 raise error.Abort(_(b'[keyword] patterns cannot match'))
430 raise error.Abort(_(b'no [keyword] patterns configured'))
429 raise error.Abort(_(b'no [keyword] patterns configured'))
431
430
432
431
433 def _kwfwrite(ui, repo, expand, *pats, **opts):
432 def _kwfwrite(ui, repo, expand, *pats, **opts):
434 '''Selects files and passes them to kwtemplater.overwrite.'''
433 '''Selects files and passes them to kwtemplater.overwrite.'''
435 wctx = repo[None]
434 wctx = repo[None]
436 if len(wctx.parents()) > 1:
435 if len(wctx.parents()) > 1:
437 raise error.Abort(_(b'outstanding uncommitted merge'))
436 raise error.Abort(_(b'outstanding uncommitted merge'))
438 kwt = getattr(repo, '_keywordkwt', None)
437 kwt = getattr(repo, '_keywordkwt', None)
439 with repo.wlock(), repo.dirstate.changing_files(repo):
438 with repo.wlock(), repo.dirstate.changing_files(repo):
440 status = _status(ui, repo, wctx, kwt, *pats, **opts)
439 status = _status(ui, repo, wctx, kwt, *pats, **opts)
441 if status.modified or status.added or status.removed or status.deleted:
440 if status.modified or status.added or status.removed or status.deleted:
442 raise error.Abort(_(b'outstanding uncommitted changes'))
441 raise error.Abort(_(b'outstanding uncommitted changes'))
443 kwt.overwrite(wctx, status.clean, True, expand)
442 kwt.overwrite(wctx, status.clean, True, expand)
444
443
445
444
446 @command(
445 @command(
447 b'kwdemo',
446 b'kwdemo',
448 [
447 [
449 (b'd', b'default', None, _(b'show default keyword template maps')),
448 (b'd', b'default', None, _(b'show default keyword template maps')),
450 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
449 (b'f', b'rcfile', b'', _(b'read maps from rcfile'), _(b'FILE')),
451 ],
450 ],
452 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
451 _(b'hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
453 optionalrepo=True,
452 optionalrepo=True,
454 )
453 )
455 def demo(ui, repo, *args, **opts):
454 def demo(ui, repo, *args, **opts):
456 """print [keywordmaps] configuration and an expansion example
455 """print [keywordmaps] configuration and an expansion example
457
456
458 Show current, custom, or default keyword template maps and their
457 Show current, custom, or default keyword template maps and their
459 expansions.
458 expansions.
460
459
461 Extend the current configuration by specifying maps as arguments
460 Extend the current configuration by specifying maps as arguments
462 and using -f/--rcfile to source an external hgrc file.
461 and using -f/--rcfile to source an external hgrc file.
463
462
464 Use -d/--default to disable current configuration.
463 Use -d/--default to disable current configuration.
465
464
466 See :hg:`help templates` for information on templates and filters.
465 See :hg:`help templates` for information on templates and filters.
467 """
466 """
468
467
469 def demoitems(section, items):
468 def demoitems(section, items):
470 ui.write(b'[%s]\n' % section)
469 ui.write(b'[%s]\n' % section)
471 for k, v in sorted(items):
470 for k, v in sorted(items):
472 if isinstance(v, bool):
471 if isinstance(v, bool):
473 v = stringutil.pprint(v)
472 v = stringutil.pprint(v)
474 ui.write(b'%s = %s\n' % (k, v))
473 ui.write(b'%s = %s\n' % (k, v))
475
474
476 fn = b'demo.txt'
475 fn = b'demo.txt'
477 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
476 tmpdir = pycompat.mkdtemp(b'', b'kwdemo.')
478 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
477 ui.note(_(b'creating temporary repository at %s\n') % tmpdir)
479 if repo is None:
478 if repo is None:
480 baseui = ui
479 baseui = ui
481 else:
480 else:
482 baseui = repo.baseui
481 baseui = repo.baseui
483 repo = localrepo.instance(baseui, tmpdir, create=True)
482 repo = localrepo.instance(baseui, tmpdir, create=True)
484 ui.setconfig(b'keyword', fn, b'', b'keyword')
483 ui.setconfig(b'keyword', fn, b'', b'keyword')
485 svn = ui.configbool(b'keywordset', b'svn')
484 svn = ui.configbool(b'keywordset', b'svn')
486 # explicitly set keywordset for demo output
485 # explicitly set keywordset for demo output
487 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
486 ui.setconfig(b'keywordset', b'svn', svn, b'keyword')
488
487
489 uikwmaps = ui.configitems(b'keywordmaps')
488 uikwmaps = ui.configitems(b'keywordmaps')
490 if args or opts.get('rcfile'):
489 if args or opts.get('rcfile'):
491 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
490 ui.status(_(b'\n\tconfiguration using custom keyword template maps\n'))
492 if uikwmaps:
491 if uikwmaps:
493 ui.status(_(b'\textending current template maps\n'))
492 ui.status(_(b'\textending current template maps\n'))
494 if opts.get('default') or not uikwmaps:
493 if opts.get('default') or not uikwmaps:
495 if svn:
494 if svn:
496 ui.status(_(b'\toverriding default svn keywordset\n'))
495 ui.status(_(b'\toverriding default svn keywordset\n'))
497 else:
496 else:
498 ui.status(_(b'\toverriding default cvs keywordset\n'))
497 ui.status(_(b'\toverriding default cvs keywordset\n'))
499 if opts.get('rcfile'):
498 if opts.get('rcfile'):
500 ui.readconfig(opts.get(b'rcfile'))
499 ui.readconfig(opts.get(b'rcfile'))
501 if args:
500 if args:
502 # simulate hgrc parsing
501 # simulate hgrc parsing
503 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
502 rcmaps = b'[keywordmaps]\n%s\n' % b'\n'.join(args)
504 repo.vfs.write(b'hgrc', rcmaps)
503 repo.vfs.write(b'hgrc', rcmaps)
505 ui.readconfig(repo.vfs.join(b'hgrc'))
504 ui.readconfig(repo.vfs.join(b'hgrc'))
506 kwmaps = dict(ui.configitems(b'keywordmaps'))
505 kwmaps = dict(ui.configitems(b'keywordmaps'))
507 elif opts.get('default'):
506 elif opts.get('default'):
508 if svn:
507 if svn:
509 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
508 ui.status(_(b'\n\tconfiguration using default svn keywordset\n'))
510 else:
509 else:
511 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
510 ui.status(_(b'\n\tconfiguration using default cvs keywordset\n'))
512 kwmaps = _defaultkwmaps(ui)
511 kwmaps = _defaultkwmaps(ui)
513 if uikwmaps:
512 if uikwmaps:
514 ui.status(_(b'\tdisabling current template maps\n'))
513 ui.status(_(b'\tdisabling current template maps\n'))
515 for k, v in kwmaps.items():
514 for k, v in kwmaps.items():
516 ui.setconfig(b'keywordmaps', k, v, b'keyword')
515 ui.setconfig(b'keywordmaps', k, v, b'keyword')
517 else:
516 else:
518 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
517 ui.status(_(b'\n\tconfiguration using current keyword template maps\n'))
519 if uikwmaps:
518 if uikwmaps:
520 kwmaps = dict(uikwmaps)
519 kwmaps = dict(uikwmaps)
521 else:
520 else:
522 kwmaps = _defaultkwmaps(ui)
521 kwmaps = _defaultkwmaps(ui)
523
522
524 uisetup(ui)
523 uisetup(ui)
525 reposetup(ui, repo)
524 reposetup(ui, repo)
526 ui.writenoi18n(b'[extensions]\nkeyword =\n')
525 ui.writenoi18n(b'[extensions]\nkeyword =\n')
527 demoitems(b'keyword', ui.configitems(b'keyword'))
526 demoitems(b'keyword', ui.configitems(b'keyword'))
528 demoitems(b'keywordset', ui.configitems(b'keywordset'))
527 demoitems(b'keywordset', ui.configitems(b'keywordset'))
529 demoitems(b'keywordmaps', kwmaps.items())
528 demoitems(b'keywordmaps', kwmaps.items())
530 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
529 keywords = b'$' + b'$\n$'.join(sorted(kwmaps.keys())) + b'$\n'
531 repo.wvfs.write(fn, keywords)
530 repo.wvfs.write(fn, keywords)
532 with repo.wlock():
531 with repo.wlock():
533 with repo.dirstate.changing_files(repo):
532 with repo.dirstate.changing_files(repo):
534 repo[None].add([fn])
533 repo[None].add([fn])
535 ui.note(_(b'\nkeywords written to %s:\n') % fn)
534 ui.note(_(b'\nkeywords written to %s:\n') % fn)
536 ui.note(keywords)
535 ui.note(keywords)
537 repo.dirstate.setbranch(b'demobranch', repo.currenttransaction())
536 repo.dirstate.setbranch(b'demobranch', repo.currenttransaction())
538 for name, cmd in ui.configitems(b'hooks'):
537 for name, cmd in ui.configitems(b'hooks'):
539 if name.split(b'.', 1)[0].find(b'commit') > -1:
538 if name.split(b'.', 1)[0].find(b'commit') > -1:
540 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
539 repo.ui.setconfig(b'hooks', name, b'', b'keyword')
541 msg = _(b'hg keyword configuration and expansion example')
540 msg = _(b'hg keyword configuration and expansion example')
542 ui.note((b"hg ci -m '%s'\n" % msg))
541 ui.note((b"hg ci -m '%s'\n" % msg))
543 repo.commit(text=msg)
542 repo.commit(text=msg)
544 ui.status(_(b'\n\tkeywords expanded\n'))
543 ui.status(_(b'\n\tkeywords expanded\n'))
545 ui.write(repo.wread(fn))
544 ui.write(repo.wread(fn))
546 repo.wvfs.rmtree(repo.root)
545 repo.wvfs.rmtree(repo.root)
547
546
548
547
549 @command(
548 @command(
550 b'kwexpand',
549 b'kwexpand',
551 cmdutil.walkopts,
550 cmdutil.walkopts,
552 _(b'hg kwexpand [OPTION]... [FILE]...'),
551 _(b'hg kwexpand [OPTION]... [FILE]...'),
553 inferrepo=True,
552 inferrepo=True,
554 )
553 )
555 def expand(ui, repo, *pats, **opts):
554 def expand(ui, repo, *pats, **opts):
556 """expand keywords in the working directory
555 """expand keywords in the working directory
557
556
558 Run after (re)enabling keyword expansion.
557 Run after (re)enabling keyword expansion.
559
558
560 kwexpand refuses to run if given files contain local changes.
559 kwexpand refuses to run if given files contain local changes.
561 """
560 """
562 # 3rd argument sets expansion to True
561 # 3rd argument sets expansion to True
563 _kwfwrite(ui, repo, True, *pats, **opts)
562 _kwfwrite(ui, repo, True, *pats, **opts)
564
563
565
564
566 @command(
565 @command(
567 b'kwfiles',
566 b'kwfiles',
568 [
567 [
569 (b'A', b'all', None, _(b'show keyword status flags of all files')),
568 (b'A', b'all', None, _(b'show keyword status flags of all files')),
570 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
569 (b'i', b'ignore', None, _(b'show files excluded from expansion')),
571 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
570 (b'u', b'unknown', None, _(b'only show unknown (not tracked) files')),
572 ]
571 ]
573 + cmdutil.walkopts,
572 + cmdutil.walkopts,
574 _(b'hg kwfiles [OPTION]... [FILE]...'),
573 _(b'hg kwfiles [OPTION]... [FILE]...'),
575 inferrepo=True,
574 inferrepo=True,
576 )
575 )
577 def files(ui, repo, *pats, **opts):
576 def files(ui, repo, *pats, **opts):
578 """show files configured for keyword expansion
577 """show files configured for keyword expansion
579
578
580 List which files in the working directory are matched by the
579 List which files in the working directory are matched by the
581 [keyword] configuration patterns.
580 [keyword] configuration patterns.
582
581
583 Useful to prevent inadvertent keyword expansion and to speed up
582 Useful to prevent inadvertent keyword expansion and to speed up
584 execution by including only files that are actual candidates for
583 execution by including only files that are actual candidates for
585 expansion.
584 expansion.
586
585
587 See :hg:`help keyword` on how to construct patterns both for
586 See :hg:`help keyword` on how to construct patterns both for
588 inclusion and exclusion of files.
587 inclusion and exclusion of files.
589
588
590 With -A/--all and -v/--verbose the codes used to show the status
589 With -A/--all and -v/--verbose the codes used to show the status
591 of files are::
590 of files are::
592
591
593 K = keyword expansion candidate
592 K = keyword expansion candidate
594 k = keyword expansion candidate (not tracked)
593 k = keyword expansion candidate (not tracked)
595 I = ignored
594 I = ignored
596 i = ignored (not tracked)
595 i = ignored (not tracked)
597 """
596 """
598 kwt = getattr(repo, '_keywordkwt', None)
597 kwt = getattr(repo, '_keywordkwt', None)
599 wctx = repo[None]
598 wctx = repo[None]
600 status = _status(ui, repo, wctx, kwt, *pats, **opts)
599 status = _status(ui, repo, wctx, kwt, *pats, **opts)
601 if pats:
600 if pats:
602 cwd = repo.getcwd()
601 cwd = repo.getcwd()
603 else:
602 else:
604 cwd = b''
603 cwd = b''
605 files = []
604 files = []
606
605
607 if not opts.get('unknown') or opts.get('all'):
606 if not opts.get('unknown') or opts.get('all'):
608 files = sorted(status.modified + status.added + status.clean)
607 files = sorted(status.modified + status.added + status.clean)
609 kwfiles = kwt.iskwfile(files, wctx)
608 kwfiles = kwt.iskwfile(files, wctx)
610 kwdeleted = kwt.iskwfile(status.deleted, wctx)
609 kwdeleted = kwt.iskwfile(status.deleted, wctx)
611 kwunknown = kwt.iskwfile(status.unknown, wctx)
610 kwunknown = kwt.iskwfile(status.unknown, wctx)
612 if not opts.get('ignore') or opts.get('all'):
611 if not opts.get('ignore') or opts.get('all'):
613 showfiles = kwfiles, kwdeleted, kwunknown
612 showfiles = kwfiles, kwdeleted, kwunknown
614 else:
613 else:
615 showfiles = [], [], []
614 showfiles = [], [], []
616 if opts.get('all') or opts.get('ignore'):
615 if opts.get('all') or opts.get('ignore'):
617 showfiles += (
616 showfiles += (
618 [f for f in files if f not in kwfiles],
617 [f for f in files if f not in kwfiles],
619 [f for f in status.unknown if f not in kwunknown],
618 [f for f in status.unknown if f not in kwunknown],
620 )
619 )
621 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
620 kwlabels = b'enabled deleted enabledunknown ignored ignoredunknown'.split()
622 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
621 kwstates = zip(kwlabels, pycompat.bytestr(b'K!kIi'), showfiles)
623 fm = ui.formatter(b'kwfiles', pycompat.byteskwargs(opts))
622 fm = ui.formatter(b'kwfiles', pycompat.byteskwargs(opts))
624 fmt = b'%.0s%s\n'
623 fmt = b'%.0s%s\n'
625 if opts.get('all') or ui.verbose:
624 if opts.get('all') or ui.verbose:
626 fmt = b'%s %s\n'
625 fmt = b'%s %s\n'
627 for kwstate, char, filenames in kwstates:
626 for kwstate, char, filenames in kwstates:
628 label = b'kwfiles.' + kwstate
627 label = b'kwfiles.' + kwstate
629 for f in filenames:
628 for f in filenames:
630 fm.startitem()
629 fm.startitem()
631 fm.data(kwstatus=char, path=f)
630 fm.data(kwstatus=char, path=f)
632 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
631 fm.plain(fmt % (char, repo.pathto(f, cwd)), label=label)
633 fm.end()
632 fm.end()
634
633
635
634
636 @command(
635 @command(
637 b'kwshrink',
636 b'kwshrink',
638 cmdutil.walkopts,
637 cmdutil.walkopts,
639 _(b'hg kwshrink [OPTION]... [FILE]...'),
638 _(b'hg kwshrink [OPTION]... [FILE]...'),
640 inferrepo=True,
639 inferrepo=True,
641 )
640 )
642 def shrink(ui, repo, *pats, **opts):
641 def shrink(ui, repo, *pats, **opts):
643 """revert expanded keywords in the working directory
642 """revert expanded keywords in the working directory
644
643
645 Must be run before changing/disabling active keywords.
644 Must be run before changing/disabling active keywords.
646
645
647 kwshrink refuses to run if given files contain local changes.
646 kwshrink refuses to run if given files contain local changes.
648 """
647 """
649 # 3rd argument sets expansion to False
648 # 3rd argument sets expansion to False
650 _kwfwrite(ui, repo, False, *pats, **opts)
649 _kwfwrite(ui, repo, False, *pats, **opts)
651
650
652
651
653 # monkeypatches
652 # monkeypatches
654
653
655
654
656 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
655 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
657 """Monkeypatch/wrap patch.patchfile.__init__ to avoid
656 """Monkeypatch/wrap patch.patchfile.__init__ to avoid
658 rejects or conflicts due to expanded keywords in working dir."""
657 rejects or conflicts due to expanded keywords in working dir."""
659 orig(self, ui, gp, backend, store, eolmode)
658 orig(self, ui, gp, backend, store, eolmode)
660 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
659 kwt = getattr(getattr(backend, 'repo', None), '_keywordkwt', None)
661 if kwt:
660 if kwt:
662 # shrink keywords read from working dir
661 # shrink keywords read from working dir
663 self.lines = kwt.shrinklines(self.fname, self.lines)
662 self.lines = kwt.shrinklines(self.fname, self.lines)
664
663
665
664
666 def kwdiff(orig, repo, *args, **kwargs):
665 def kwdiff(orig, repo, *args, **kwargs):
667 '''Monkeypatch patch.diff to avoid expansion.'''
666 '''Monkeypatch patch.diff to avoid expansion.'''
668 kwt = getattr(repo, '_keywordkwt', None)
667 kwt = getattr(repo, '_keywordkwt', None)
669 if kwt:
668 if kwt:
670 restrict = kwt.restrict
669 restrict = kwt.restrict
671 kwt.restrict = True
670 kwt.restrict = True
672 try:
671 try:
673 for chunk in orig(repo, *args, **kwargs):
672 for chunk in orig(repo, *args, **kwargs):
674 yield chunk
673 yield chunk
675 finally:
674 finally:
676 if kwt:
675 if kwt:
677 kwt.restrict = restrict
676 kwt.restrict = restrict
678
677
679
678
680 def kwweb_skip(orig, web):
679 def kwweb_skip(orig, web):
681 '''Wraps webcommands.x turning off keyword expansion.'''
680 '''Wraps webcommands.x turning off keyword expansion.'''
682 kwt = getattr(web.repo, '_keywordkwt', None)
681 kwt = getattr(web.repo, '_keywordkwt', None)
683 if kwt:
682 if kwt:
684 origmatch = kwt.match
683 origmatch = kwt.match
685 kwt.match = util.never
684 kwt.match = util.never
686 try:
685 try:
687 for chunk in orig(web):
686 for chunk in orig(web):
688 yield chunk
687 yield chunk
689 finally:
688 finally:
690 if kwt:
689 if kwt:
691 kwt.match = origmatch
690 kwt.match = origmatch
692
691
693
692
694 def kw_amend(orig, ui, repo, old, extra, pats, opts):
693 def kw_amend(orig, ui, repo, old, extra, pats, opts):
695 '''Wraps cmdutil.amend expanding keywords after amend.'''
694 '''Wraps cmdutil.amend expanding keywords after amend.'''
696 kwt = getattr(repo, '_keywordkwt', None)
695 kwt = getattr(repo, '_keywordkwt', None)
697 if kwt is None:
696 if kwt is None:
698 return orig(ui, repo, old, extra, pats, opts)
697 return orig(ui, repo, old, extra, pats, opts)
699 with repo.wlock(), repo.dirstate.changing_parents(repo):
698 with repo.wlock(), repo.dirstate.changing_parents(repo):
700 kwt.postcommit = True
699 kwt.postcommit = True
701 newid = orig(ui, repo, old, extra, pats, opts)
700 newid = orig(ui, repo, old, extra, pats, opts)
702 if newid != old.node():
701 if newid != old.node():
703 ctx = repo[newid]
702 ctx = repo[newid]
704 kwt.restrict = True
703 kwt.restrict = True
705 kwt.overwrite(ctx, ctx.files(), False, True)
704 kwt.overwrite(ctx, ctx.files(), False, True)
706 kwt.restrict = False
705 kwt.restrict = False
707 return newid
706 return newid
708
707
709
708
710 def kw_copy(orig, ui, repo, pats, opts, rename=False):
709 def kw_copy(orig, ui, repo, pats, opts, rename=False):
711 """Wraps cmdutil.copy so that copy/rename destinations do not
710 """Wraps cmdutil.copy so that copy/rename destinations do not
712 contain expanded keywords.
711 contain expanded keywords.
713 Note that the source of a regular file destination may also be a
712 Note that the source of a regular file destination may also be a
714 symlink:
713 symlink:
715 hg cp sym x -> x is symlink
714 hg cp sym x -> x is symlink
716 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
715 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
717 For the latter we have to follow the symlink to find out whether its
716 For the latter we have to follow the symlink to find out whether its
718 target is configured for expansion and we therefore must unexpand the
717 target is configured for expansion and we therefore must unexpand the
719 keywords in the destination."""
718 keywords in the destination."""
720 kwt = getattr(repo, '_keywordkwt', None)
719 kwt = getattr(repo, '_keywordkwt', None)
721 if kwt is None:
720 if kwt is None:
722 return orig(ui, repo, pats, opts, rename)
721 return orig(ui, repo, pats, opts, rename)
723 with repo.wlock():
722 with repo.wlock():
724 orig(ui, repo, pats, opts, rename)
723 orig(ui, repo, pats, opts, rename)
725 if opts.get(b'dry_run'):
724 if opts.get(b'dry_run'):
726 return
725 return
727 wctx = repo[None]
726 wctx = repo[None]
728 cwd = repo.getcwd()
727 cwd = repo.getcwd()
729
728
730 def haskwsource(dest):
729 def haskwsource(dest):
731 """Returns true if dest is a regular file and configured for
730 """Returns true if dest is a regular file and configured for
732 expansion or a symlink which points to a file configured for
731 expansion or a symlink which points to a file configured for
733 expansion."""
732 expansion."""
734 source = repo.dirstate.copied(dest)
733 source = repo.dirstate.copied(dest)
735 if b'l' in wctx.flags(source):
734 if b'l' in wctx.flags(source):
736 source = pathutil.canonpath(
735 source = pathutil.canonpath(
737 repo.root, cwd, os.path.realpath(source)
736 repo.root, cwd, os.path.realpath(source)
738 )
737 )
739 return kwt.match(source)
738 return kwt.match(source)
740
739
741 candidates = [
740 candidates = [
742 f
741 f
743 for f in repo.dirstate.copies()
742 for f in repo.dirstate.copies()
744 if b'l' not in wctx.flags(f) and haskwsource(f)
743 if b'l' not in wctx.flags(f) and haskwsource(f)
745 ]
744 ]
746 kwt.overwrite(wctx, candidates, False, False)
745 kwt.overwrite(wctx, candidates, False, False)
747
746
748
747
749 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
748 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
750 '''Wraps record.dorecord expanding keywords after recording.'''
749 '''Wraps record.dorecord expanding keywords after recording.'''
751 kwt = getattr(repo, '_keywordkwt', None)
750 kwt = getattr(repo, '_keywordkwt', None)
752 if kwt is None:
751 if kwt is None:
753 return orig(ui, repo, commitfunc, *pats, **opts)
752 return orig(ui, repo, commitfunc, *pats, **opts)
754 with repo.wlock():
753 with repo.wlock():
755 # record returns 0 even when nothing has changed
754 # record returns 0 even when nothing has changed
756 # therefore compare nodes before and after
755 # therefore compare nodes before and after
757 kwt.postcommit = True
756 kwt.postcommit = True
758 ctx = repo[b'.']
757 ctx = repo[b'.']
759 wstatus = ctx.status()
758 wstatus = ctx.status()
760 ret = orig(ui, repo, commitfunc, *pats, **opts)
759 ret = orig(ui, repo, commitfunc, *pats, **opts)
761 recctx = repo[b'.']
760 recctx = repo[b'.']
762 if ctx != recctx:
761 if ctx != recctx:
763 modified, added = _preselect(wstatus, recctx.files())
762 modified, added = _preselect(wstatus, recctx.files())
764 kwt.restrict = False
763 kwt.restrict = False
765 with repo.dirstate.changing_parents(repo):
764 with repo.dirstate.changing_parents(repo):
766 kwt.overwrite(recctx, modified, False, True)
765 kwt.overwrite(recctx, modified, False, True)
767 kwt.overwrite(recctx, added, False, True, True)
766 kwt.overwrite(recctx, added, False, True, True)
768 kwt.restrict = True
767 kwt.restrict = True
769 return ret
768 return ret
770
769
771
770
772 def kwfilectx_cmp(orig, self, fctx):
771 def kwfilectx_cmp(orig, self, fctx):
773 if fctx._customcmp:
772 if fctx._customcmp:
774 return fctx.cmp(self)
773 return fctx.cmp(self)
775 kwt = getattr(self._repo, '_keywordkwt', None)
774 kwt = getattr(self._repo, '_keywordkwt', None)
776 if kwt is None:
775 if kwt is None:
777 return orig(self, fctx)
776 return orig(self, fctx)
778 # keyword affects data size, comparing wdir and filelog size does
777 # keyword affects data size, comparing wdir and filelog size does
779 # not make sense
778 # not make sense
780 if (
779 if (
781 fctx._filenode is None
780 fctx._filenode is None
782 and (
781 and (
783 self._repo._encodefilterpats
782 self._repo._encodefilterpats
784 or kwt.match(fctx.path())
783 or kwt.match(fctx.path())
785 and b'l' not in fctx.flags()
784 and b'l' not in fctx.flags()
786 or self.size() - 4 == fctx.size()
785 or self.size() - 4 == fctx.size()
787 )
786 )
788 or self.size() == fctx.size()
787 or self.size() == fctx.size()
789 ):
788 ):
790 return self._filelog.cmp(self._filenode, fctx.data())
789 return self._filelog.cmp(self._filenode, fctx.data())
791 return True
790 return True
792
791
793
792
794 def uisetup(ui):
793 def uisetup(ui):
795 """Monkeypatches dispatch._parse to retrieve user command.
794 """Monkeypatches dispatch._parse to retrieve user command.
796 Overrides file method to return kwfilelog instead of filelog
795 Overrides file method to return kwfilelog instead of filelog
797 if file matches user configuration.
796 if file matches user configuration.
798 Wraps commit to overwrite configured files with updated
797 Wraps commit to overwrite configured files with updated
799 keyword substitutions.
798 keyword substitutions.
800 Monkeypatches patch and webcommands."""
799 Monkeypatches patch and webcommands."""
801
800
802 def kwdispatch_parse(orig, ui, args):
801 def kwdispatch_parse(orig, ui, args):
803 '''Monkeypatch dispatch._parse to obtain running hg command.'''
802 '''Monkeypatch dispatch._parse to obtain running hg command.'''
804 cmd, func, args, options, cmdoptions = orig(ui, args)
803 cmd, func, args, options, cmdoptions = orig(ui, args)
805 kwtools[b'hgcmd'] = cmd
804 kwtools[b'hgcmd'] = cmd
806 return cmd, func, args, options, cmdoptions
805 return cmd, func, args, options, cmdoptions
807
806
808 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
807 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
809
808
810 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
809 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
811 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
810 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
812 extensions.wrapfunction(patch, 'diff', kwdiff)
811 extensions.wrapfunction(patch, 'diff', kwdiff)
813 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
812 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
814 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
813 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
815 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
814 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
816 for c in nokwwebcommands.split():
815 for c in nokwwebcommands.split():
817 extensions.wrapfunction(webcommands, c, kwweb_skip)
816 extensions.wrapfunction(webcommands, c, kwweb_skip)
818
817
819
818
820 def reposetup(ui, repo):
819 def reposetup(ui, repo):
821 '''Sets up repo as kwrepo for keyword substitution.'''
820 '''Sets up repo as kwrepo for keyword substitution.'''
822
821
823 try:
822 try:
824 if (
823 if (
825 not repo.local()
824 not repo.local()
826 or kwtools[b'hgcmd'] in nokwcommands.split()
825 or kwtools[b'hgcmd'] in nokwcommands.split()
827 or b'.hg' in util.splitpath(repo.root)
826 or b'.hg' in util.splitpath(repo.root)
828 or repo._url.startswith(b'bundle:')
827 or repo._url.startswith(b'bundle:')
829 ):
828 ):
830 return
829 return
831 except AttributeError:
830 except AttributeError:
832 pass
831 pass
833
832
834 inc, exc = [], [b'.hg*']
833 inc, exc = [], [b'.hg*']
835 for pat, opt in ui.configitems(b'keyword'):
834 for pat, opt in ui.configitems(b'keyword'):
836 if opt != b'ignore':
835 if opt != b'ignore':
837 inc.append(pat)
836 inc.append(pat)
838 else:
837 else:
839 exc.append(pat)
838 exc.append(pat)
840 if not inc:
839 if not inc:
841 return
840 return
842
841
843 kwt = kwtemplater(ui, repo, inc, exc)
842 kwt = kwtemplater(ui, repo, inc, exc)
844
843
845 class kwrepo(repo.__class__):
844 class kwrepo(repo.__class__):
846 def file(self, f):
845 def file(self, f):
847 if f[0] == b'/':
846 if f[0] == b'/':
848 f = f[1:]
847 f = f[1:]
849 return kwfilelog(self.svfs, kwt, f)
848 return kwfilelog(self.svfs, kwt, f)
850
849
851 def wread(self, filename):
850 def wread(self, filename):
852 data = super(kwrepo, self).wread(filename)
851 data = super(kwrepo, self).wread(filename)
853 return kwt.wread(filename, data)
852 return kwt.wread(filename, data)
854
853
855 def commit(self, *args, **opts):
854 def commit(self, *args, **opts):
856 # use custom commitctx for user commands
855 # use custom commitctx for user commands
857 # other extensions can still wrap repo.commitctx directly
856 # other extensions can still wrap repo.commitctx directly
858 self.commitctx = self.kwcommitctx
857 self.commitctx = self.kwcommitctx
859 try:
858 try:
860 return super(kwrepo, self).commit(*args, **opts)
859 return super(kwrepo, self).commit(*args, **opts)
861 finally:
860 finally:
862 del self.commitctx
861 del self.commitctx
863
862
864 def kwcommitctx(self, ctx, error=False, origctx=None):
863 def kwcommitctx(self, ctx, error=False, origctx=None):
865 n = super(kwrepo, self).commitctx(ctx, error, origctx)
864 n = super(kwrepo, self).commitctx(ctx, error, origctx)
866 # no lock needed, only called from repo.commit() which already locks
865 # no lock needed, only called from repo.commit() which already locks
867 if not kwt.postcommit:
866 if not kwt.postcommit:
868 restrict = kwt.restrict
867 restrict = kwt.restrict
869 kwt.restrict = True
868 kwt.restrict = True
870 kwt.overwrite(
869 kwt.overwrite(
871 self[n], sorted(ctx.added() + ctx.modified()), False, True
870 self[n], sorted(ctx.added() + ctx.modified()), False, True
872 )
871 )
873 kwt.restrict = restrict
872 kwt.restrict = restrict
874 return n
873 return n
875
874
876 def rollback(self, dryrun=False, force=False):
875 def rollback(self, dryrun=False, force=False):
877 with self.wlock():
876 with self.wlock():
878 origrestrict = kwt.restrict
877 origrestrict = kwt.restrict
879 try:
878 try:
880 if not dryrun:
879 if not dryrun:
881 changed = self[b'.'].files()
880 changed = self[b'.'].files()
882 ret = super(kwrepo, self).rollback(dryrun, force)
881 ret = super(kwrepo, self).rollback(dryrun, force)
883 if not dryrun:
882 if not dryrun:
884 ctx = self[b'.']
883 ctx = self[b'.']
885 modified, added = _preselect(ctx.status(), changed)
884 modified, added = _preselect(ctx.status(), changed)
886 kwt.restrict = False
885 kwt.restrict = False
887 kwt.overwrite(ctx, modified, True, True)
886 kwt.overwrite(ctx, modified, True, True)
888 kwt.overwrite(ctx, added, True, False)
887 kwt.overwrite(ctx, added, True, False)
889 return ret
888 return ret
890 finally:
889 finally:
891 kwt.restrict = origrestrict
890 kwt.restrict = origrestrict
892
891
893 repo.__class__ = kwrepo
892 repo.__class__ = kwrepo
894 repo._keywordkwt = kwt
893 repo._keywordkwt = kwt
@@ -1,96 +1,95 b''
1 # This software may be used and distributed according to the terms of the
1 # This software may be used and distributed according to the terms of the
2 # GNU General Public License version 2 or any later version.
2 # GNU General Public License version 2 or any later version.
3
3
4
4
5 import re
5 import re
6
6
7 from mercurial.i18n import _
7 from mercurial.i18n import _
8 from mercurial.pycompat import getattr
9 from mercurial import (
8 from mercurial import (
10 error,
9 error,
11 hg,
10 hg,
12 util,
11 util,
13 )
12 )
14 from mercurial.utils import (
13 from mercurial.utils import (
15 urlutil,
14 urlutil,
16 )
15 )
17
16
18 from . import (
17 from . import (
19 lfutil,
18 lfutil,
20 localstore,
19 localstore,
21 wirestore,
20 wirestore,
22 )
21 )
23
22
24
23
25 # During clone this function is passed the src's ui object
24 # During clone this function is passed the src's ui object
26 # but it needs the dest's ui object so it can read out of
25 # but it needs the dest's ui object so it can read out of
27 # the config file. Use repo.ui instead.
26 # the config file. Use repo.ui instead.
28 def openstore(repo=None, remote=None, put=False, ui=None):
27 def openstore(repo=None, remote=None, put=False, ui=None):
29 if ui is None:
28 if ui is None:
30 ui = repo.ui
29 ui = repo.ui
31
30
32 if not remote:
31 if not remote:
33 lfpullsource = getattr(repo, 'lfpullsource', None)
32 lfpullsource = getattr(repo, 'lfpullsource', None)
34 if put:
33 if put:
35 path = urlutil.get_unique_push_path(
34 path = urlutil.get_unique_push_path(
36 b'lfpullsource', repo, ui, lfpullsource
35 b'lfpullsource', repo, ui, lfpullsource
37 )
36 )
38 else:
37 else:
39 path = urlutil.get_unique_pull_path_obj(
38 path = urlutil.get_unique_pull_path_obj(
40 b'lfpullsource', ui, lfpullsource
39 b'lfpullsource', ui, lfpullsource
41 )
40 )
42
41
43 # XXX we should not explicitly pass b'default', as this will result in
42 # XXX we should not explicitly pass b'default', as this will result in
44 # b'default' being returned if no `paths.default` was defined. We
43 # b'default' being returned if no `paths.default` was defined. We
45 # should explicitely handle the lack of value instead.
44 # should explicitely handle the lack of value instead.
46 if repo is None:
45 if repo is None:
47 path = urlutil.get_unique_pull_path_obj(
46 path = urlutil.get_unique_pull_path_obj(
48 b'lfs',
47 b'lfs',
49 ui,
48 ui,
50 b'default',
49 b'default',
51 )
50 )
52 remote = hg.peer(repo or ui, {}, path)
51 remote = hg.peer(repo or ui, {}, path)
53 elif path.loc == b'default-push' or path.loc == b'default':
52 elif path.loc == b'default-push' or path.loc == b'default':
54 remote = repo
53 remote = repo
55 else:
54 else:
56 remote = hg.peer(repo or ui, {}, path)
55 remote = hg.peer(repo or ui, {}, path)
57
56
58 # The path could be a scheme so use Mercurial's normal functionality
57 # The path could be a scheme so use Mercurial's normal functionality
59 # to resolve the scheme to a repository and use its path
58 # to resolve the scheme to a repository and use its path
60 path = hasattr(remote, 'url') and remote.url() or remote.path
59 path = hasattr(remote, 'url') and remote.url() or remote.path
61
60
62 match = _scheme_re.match(path)
61 match = _scheme_re.match(path)
63 if not match: # regular filesystem path
62 if not match: # regular filesystem path
64 scheme = b'file'
63 scheme = b'file'
65 else:
64 else:
66 scheme = match.group(1)
65 scheme = match.group(1)
67
66
68 try:
67 try:
69 storeproviders = _storeprovider[scheme]
68 storeproviders = _storeprovider[scheme]
70 except KeyError:
69 except KeyError:
71 raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
70 raise error.Abort(_(b'unsupported URL scheme %r') % scheme)
72
71
73 for classobj in storeproviders:
72 for classobj in storeproviders:
74 try:
73 try:
75 return classobj(ui, repo, remote)
74 return classobj(ui, repo, remote)
76 except lfutil.storeprotonotcapable:
75 except lfutil.storeprotonotcapable:
77 pass
76 pass
78
77
79 raise error.Abort(
78 raise error.Abort(
80 _(b'%s does not appear to be a largefile store')
79 _(b'%s does not appear to be a largefile store')
81 % urlutil.hidepassword(path)
80 % urlutil.hidepassword(path)
82 )
81 )
83
82
84
83
85 _storeprovider = {
84 _storeprovider = {
86 b'file': [localstore.localstore],
85 b'file': [localstore.localstore],
87 b'http': [wirestore.wirestore],
86 b'http': [wirestore.wirestore],
88 b'https': [wirestore.wirestore],
87 b'https': [wirestore.wirestore],
89 b'ssh': [wirestore.wirestore],
88 b'ssh': [wirestore.wirestore],
90 }
89 }
91
90
92 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
91 _scheme_re = re.compile(br'^([a-zA-Z0-9+-.]+)://')
93
92
94
93
95 def getlfile(ui, hash):
94 def getlfile(ui, hash):
96 return util.chunkbuffer(openstore(ui=ui)._get(hash))
95 return util.chunkbuffer(openstore(ui=ui)._get(hash))
@@ -1,789 +1,788 b''
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
1 # blobstore.py - local and remote (speaking Git-LFS protocol) blob storages
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import contextlib
9 import contextlib
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import json
12 import json
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.pycompat import getattr
19 from mercurial.node import hex
18 from mercurial.node import hex
20
19
21 from mercurial import (
20 from mercurial import (
22 encoding,
21 encoding,
23 error,
22 error,
24 httpconnection as httpconnectionmod,
23 httpconnection as httpconnectionmod,
25 pathutil,
24 pathutil,
26 pycompat,
25 pycompat,
27 url as urlmod,
26 url as urlmod,
28 util,
27 util,
29 vfs as vfsmod,
28 vfs as vfsmod,
30 worker,
29 worker,
31 )
30 )
32
31
33 from mercurial.utils import (
32 from mercurial.utils import (
34 stringutil,
33 stringutil,
35 urlutil,
34 urlutil,
36 )
35 )
37
36
38 from ..largefiles import lfutil
37 from ..largefiles import lfutil
39
38
40 # 64 bytes for SHA256
39 # 64 bytes for SHA256
41 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
40 _lfsre = re.compile(br'\A[a-f0-9]{64}\Z')
42
41
43
42
44 class lfsvfs(vfsmod.vfs):
43 class lfsvfs(vfsmod.vfs):
45 def join(self, path):
44 def join(self, path):
46 """split the path at first two characters, like: XX/XXXXX..."""
45 """split the path at first two characters, like: XX/XXXXX..."""
47 if not _lfsre.match(path):
46 if not _lfsre.match(path):
48 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
47 raise error.ProgrammingError(b'unexpected lfs path: %s' % path)
49 return super(lfsvfs, self).join(path[0:2], path[2:])
48 return super(lfsvfs, self).join(path[0:2], path[2:])
50
49
51 def walk(self, path=None, onerror=None):
50 def walk(self, path=None, onerror=None):
52 """Yield (dirpath, [], oids) tuple for blobs under path
51 """Yield (dirpath, [], oids) tuple for blobs under path
53
52
54 Oids only exist in the root of this vfs, so dirpath is always ''.
53 Oids only exist in the root of this vfs, so dirpath is always ''.
55 """
54 """
56 root = os.path.normpath(self.base)
55 root = os.path.normpath(self.base)
57 # when dirpath == root, dirpath[prefixlen:] becomes empty
56 # when dirpath == root, dirpath[prefixlen:] becomes empty
58 # because len(dirpath) < prefixlen.
57 # because len(dirpath) < prefixlen.
59 prefixlen = len(pathutil.normasprefix(root))
58 prefixlen = len(pathutil.normasprefix(root))
60 oids = []
59 oids = []
61
60
62 for dirpath, dirs, files in os.walk(
61 for dirpath, dirs, files in os.walk(
63 self.reljoin(self.base, path or b''), onerror=onerror
62 self.reljoin(self.base, path or b''), onerror=onerror
64 ):
63 ):
65 dirpath = dirpath[prefixlen:]
64 dirpath = dirpath[prefixlen:]
66
65
67 # Silently skip unexpected files and directories
66 # Silently skip unexpected files and directories
68 if len(dirpath) == 2:
67 if len(dirpath) == 2:
69 oids.extend(
68 oids.extend(
70 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
69 [dirpath + f for f in files if _lfsre.match(dirpath + f)]
71 )
70 )
72
71
73 yield (b'', [], oids)
72 yield (b'', [], oids)
74
73
75
74
76 class nullvfs(lfsvfs):
75 class nullvfs(lfsvfs):
77 def __init__(self):
76 def __init__(self):
78 pass
77 pass
79
78
80 def exists(self, oid):
79 def exists(self, oid):
81 return False
80 return False
82
81
83 def read(self, oid):
82 def read(self, oid):
84 # store.read() calls into here if the blob doesn't exist in its
83 # store.read() calls into here if the blob doesn't exist in its
85 # self.vfs. Raise the same error as a normal vfs when asked to read a
84 # self.vfs. Raise the same error as a normal vfs when asked to read a
86 # file that doesn't exist. The only difference is the full file path
85 # file that doesn't exist. The only difference is the full file path
87 # isn't available in the error.
86 # isn't available in the error.
88 raise IOError(
87 raise IOError(
89 errno.ENOENT,
88 errno.ENOENT,
90 pycompat.sysstr(b'%s: No such file or directory' % oid),
89 pycompat.sysstr(b'%s: No such file or directory' % oid),
91 )
90 )
92
91
93 def walk(self, path=None, onerror=None):
92 def walk(self, path=None, onerror=None):
94 return (b'', [], [])
93 return (b'', [], [])
95
94
96 def write(self, oid, data):
95 def write(self, oid, data):
97 pass
96 pass
98
97
99
98
100 class lfsuploadfile(httpconnectionmod.httpsendfile):
99 class lfsuploadfile(httpconnectionmod.httpsendfile):
101 """a file-like object that supports keepalive."""
100 """a file-like object that supports keepalive."""
102
101
103 def __init__(self, ui, filename):
102 def __init__(self, ui, filename):
104 super(lfsuploadfile, self).__init__(ui, filename, b'rb')
103 super(lfsuploadfile, self).__init__(ui, filename, b'rb')
105 self.read = self._data.read
104 self.read = self._data.read
106
105
107 def _makeprogress(self):
106 def _makeprogress(self):
108 return None # progress is handled by the worker client
107 return None # progress is handled by the worker client
109
108
110
109
111 class local:
110 class local:
112 """Local blobstore for large file contents.
111 """Local blobstore for large file contents.
113
112
114 This blobstore is used both as a cache and as a staging area for large blobs
113 This blobstore is used both as a cache and as a staging area for large blobs
115 to be uploaded to the remote blobstore.
114 to be uploaded to the remote blobstore.
116 """
115 """
117
116
118 def __init__(self, repo):
117 def __init__(self, repo):
119 fullpath = repo.svfs.join(b'lfs/objects')
118 fullpath = repo.svfs.join(b'lfs/objects')
120 self.vfs = lfsvfs(fullpath)
119 self.vfs = lfsvfs(fullpath)
121
120
122 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
121 if repo.ui.configbool(b'experimental', b'lfs.disableusercache'):
123 self.cachevfs = nullvfs()
122 self.cachevfs = nullvfs()
124 else:
123 else:
125 usercache = lfutil._usercachedir(repo.ui, b'lfs')
124 usercache = lfutil._usercachedir(repo.ui, b'lfs')
126 self.cachevfs = lfsvfs(usercache)
125 self.cachevfs = lfsvfs(usercache)
127 self.ui = repo.ui
126 self.ui = repo.ui
128
127
129 def open(self, oid):
128 def open(self, oid):
130 """Open a read-only file descriptor to the named blob, in either the
129 """Open a read-only file descriptor to the named blob, in either the
131 usercache or the local store."""
130 usercache or the local store."""
132 return open(self.path(oid), 'rb')
131 return open(self.path(oid), 'rb')
133
132
134 def path(self, oid):
133 def path(self, oid):
135 """Build the path for the given blob ``oid``.
134 """Build the path for the given blob ``oid``.
136
135
137 If the blob exists locally, the path may point to either the usercache
136 If the blob exists locally, the path may point to either the usercache
138 or the local store. If it doesn't, it will point to the local store.
137 or the local store. If it doesn't, it will point to the local store.
139 This is meant for situations where existing code that isn't LFS aware
138 This is meant for situations where existing code that isn't LFS aware
140 needs to open a blob. Generally, prefer the ``open`` method on this
139 needs to open a blob. Generally, prefer the ``open`` method on this
141 class.
140 class.
142 """
141 """
143 # The usercache is the most likely place to hold the file. Commit will
142 # The usercache is the most likely place to hold the file. Commit will
144 # write to both it and the local store, as will anything that downloads
143 # write to both it and the local store, as will anything that downloads
145 # the blobs. However, things like clone without an update won't
144 # the blobs. However, things like clone without an update won't
146 # populate the local store. For an init + push of a local clone,
145 # populate the local store. For an init + push of a local clone,
147 # the usercache is the only place it _could_ be. If not present, the
146 # the usercache is the only place it _could_ be. If not present, the
148 # missing file msg here will indicate the local repo, not the usercache.
147 # missing file msg here will indicate the local repo, not the usercache.
149 if self.cachevfs.exists(oid):
148 if self.cachevfs.exists(oid):
150 return self.cachevfs.join(oid)
149 return self.cachevfs.join(oid)
151
150
152 return self.vfs.join(oid)
151 return self.vfs.join(oid)
153
152
154 def download(self, oid, src, content_length):
153 def download(self, oid, src, content_length):
155 """Read the blob from the remote source in chunks, verify the content,
154 """Read the blob from the remote source in chunks, verify the content,
156 and write to this local blobstore."""
155 and write to this local blobstore."""
157 sha256 = hashlib.sha256()
156 sha256 = hashlib.sha256()
158 size = 0
157 size = 0
159
158
160 with self.vfs(oid, b'wb', atomictemp=True) as fp:
159 with self.vfs(oid, b'wb', atomictemp=True) as fp:
161 for chunk in util.filechunkiter(src, size=1048576):
160 for chunk in util.filechunkiter(src, size=1048576):
162 fp.write(chunk)
161 fp.write(chunk)
163 sha256.update(chunk)
162 sha256.update(chunk)
164 size += len(chunk)
163 size += len(chunk)
165
164
166 # If the server advertised a length longer than what we actually
165 # If the server advertised a length longer than what we actually
167 # received, then we should expect that the server crashed while
166 # received, then we should expect that the server crashed while
168 # producing the response (but the server has no way of telling us
167 # producing the response (but the server has no way of telling us
169 # that), and we really don't need to try to write the response to
168 # that), and we really don't need to try to write the response to
170 # the localstore, because it's not going to match the expected.
169 # the localstore, because it's not going to match the expected.
171 # The server also uses this method to store data uploaded by the
170 # The server also uses this method to store data uploaded by the
172 # client, so if this happens on the server side, it's possible
171 # client, so if this happens on the server side, it's possible
173 # that the client crashed or an antivirus interfered with the
172 # that the client crashed or an antivirus interfered with the
174 # upload.
173 # upload.
175 if content_length is not None and int(content_length) != size:
174 if content_length is not None and int(content_length) != size:
176 msg = (
175 msg = (
177 b"Response length (%d) does not match Content-Length "
176 b"Response length (%d) does not match Content-Length "
178 b"header (%d) for %s"
177 b"header (%d) for %s"
179 )
178 )
180 raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
179 raise LfsRemoteError(_(msg) % (size, int(content_length), oid))
181
180
182 realoid = hex(sha256.digest())
181 realoid = hex(sha256.digest())
183 if realoid != oid:
182 if realoid != oid:
184 raise LfsCorruptionError(
183 raise LfsCorruptionError(
185 _(b'corrupt remote lfs object: %s') % oid
184 _(b'corrupt remote lfs object: %s') % oid
186 )
185 )
187
186
188 self._linktousercache(oid)
187 self._linktousercache(oid)
189
188
190 def write(self, oid, data):
189 def write(self, oid, data):
191 """Write blob to local blobstore.
190 """Write blob to local blobstore.
192
191
193 This should only be called from the filelog during a commit or similar.
192 This should only be called from the filelog during a commit or similar.
194 As such, there is no need to verify the data. Imports from a remote
193 As such, there is no need to verify the data. Imports from a remote
195 store must use ``download()`` instead."""
194 store must use ``download()`` instead."""
196 with self.vfs(oid, b'wb', atomictemp=True) as fp:
195 with self.vfs(oid, b'wb', atomictemp=True) as fp:
197 fp.write(data)
196 fp.write(data)
198
197
199 self._linktousercache(oid)
198 self._linktousercache(oid)
200
199
201 def linkfromusercache(self, oid):
200 def linkfromusercache(self, oid):
202 """Link blobs found in the user cache into this store.
201 """Link blobs found in the user cache into this store.
203
202
204 The server module needs to do this when it lets the client know not to
203 The server module needs to do this when it lets the client know not to
205 upload the blob, to ensure it is always available in this store.
204 upload the blob, to ensure it is always available in this store.
206 Normally this is done implicitly when the client reads or writes the
205 Normally this is done implicitly when the client reads or writes the
207 blob, but that doesn't happen when the server tells the client that it
206 blob, but that doesn't happen when the server tells the client that it
208 already has the blob.
207 already has the blob.
209 """
208 """
210 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
209 if not isinstance(self.cachevfs, nullvfs) and not self.vfs.exists(oid):
211 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
210 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
212 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
211 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
213
212
214 def _linktousercache(self, oid):
213 def _linktousercache(self, oid):
215 # XXX: should we verify the content of the cache, and hardlink back to
214 # XXX: should we verify the content of the cache, and hardlink back to
216 # the local store on success, but truncate, write and link on failure?
215 # the local store on success, but truncate, write and link on failure?
217 if not self.cachevfs.exists(oid) and not isinstance(
216 if not self.cachevfs.exists(oid) and not isinstance(
218 self.cachevfs, nullvfs
217 self.cachevfs, nullvfs
219 ):
218 ):
220 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
219 self.ui.note(_(b'lfs: adding %s to the usercache\n') % oid)
221 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
220 lfutil.link(self.vfs.join(oid), self.cachevfs.join(oid))
222
221
223 def read(self, oid, verify=True):
222 def read(self, oid, verify=True):
224 """Read blob from local blobstore."""
223 """Read blob from local blobstore."""
225 if not self.vfs.exists(oid):
224 if not self.vfs.exists(oid):
226 blob = self._read(self.cachevfs, oid, verify)
225 blob = self._read(self.cachevfs, oid, verify)
227
226
228 # Even if revlog will verify the content, it needs to be verified
227 # Even if revlog will verify the content, it needs to be verified
229 # now before making the hardlink to avoid propagating corrupt blobs.
228 # now before making the hardlink to avoid propagating corrupt blobs.
230 # Don't abort if corruption is detected, because `hg verify` will
229 # Don't abort if corruption is detected, because `hg verify` will
231 # give more useful info about the corruption- simply don't add the
230 # give more useful info about the corruption- simply don't add the
232 # hardlink.
231 # hardlink.
233 if verify or hex(hashlib.sha256(blob).digest()) == oid:
232 if verify or hex(hashlib.sha256(blob).digest()) == oid:
234 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
233 self.ui.note(_(b'lfs: found %s in the usercache\n') % oid)
235 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
234 lfutil.link(self.cachevfs.join(oid), self.vfs.join(oid))
236 else:
235 else:
237 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
236 self.ui.note(_(b'lfs: found %s in the local lfs store\n') % oid)
238 blob = self._read(self.vfs, oid, verify)
237 blob = self._read(self.vfs, oid, verify)
239 return blob
238 return blob
240
239
241 def _read(self, vfs, oid, verify):
240 def _read(self, vfs, oid, verify):
242 """Read blob (after verifying) from the given store"""
241 """Read blob (after verifying) from the given store"""
243 blob = vfs.read(oid)
242 blob = vfs.read(oid)
244 if verify:
243 if verify:
245 _verify(oid, blob)
244 _verify(oid, blob)
246 return blob
245 return blob
247
246
248 def verify(self, oid):
247 def verify(self, oid):
249 """Indicate whether or not the hash of the underlying file matches its
248 """Indicate whether or not the hash of the underlying file matches its
250 name."""
249 name."""
251 sha256 = hashlib.sha256()
250 sha256 = hashlib.sha256()
252
251
253 with self.open(oid) as fp:
252 with self.open(oid) as fp:
254 for chunk in util.filechunkiter(fp, size=1048576):
253 for chunk in util.filechunkiter(fp, size=1048576):
255 sha256.update(chunk)
254 sha256.update(chunk)
256
255
257 return oid == hex(sha256.digest())
256 return oid == hex(sha256.digest())
258
257
259 def has(self, oid):
258 def has(self, oid):
260 """Returns True if the local blobstore contains the requested blob,
259 """Returns True if the local blobstore contains the requested blob,
261 False otherwise."""
260 False otherwise."""
262 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
261 return self.cachevfs.exists(oid) or self.vfs.exists(oid)
263
262
264
263
265 def _urlerrorreason(urlerror):
264 def _urlerrorreason(urlerror):
266 """Create a friendly message for the given URLError to be used in an
265 """Create a friendly message for the given URLError to be used in an
267 LfsRemoteError message.
266 LfsRemoteError message.
268 """
267 """
269 inst = urlerror
268 inst = urlerror
270
269
271 if isinstance(urlerror.reason, Exception):
270 if isinstance(urlerror.reason, Exception):
272 inst = urlerror.reason
271 inst = urlerror.reason
273
272
274 if hasattr(inst, 'reason'):
273 if hasattr(inst, 'reason'):
275 try: # usually it is in the form (errno, strerror)
274 try: # usually it is in the form (errno, strerror)
276 reason = inst.reason.args[1]
275 reason = inst.reason.args[1]
277 except (AttributeError, IndexError):
276 except (AttributeError, IndexError):
278 # it might be anything, for example a string
277 # it might be anything, for example a string
279 reason = inst.reason
278 reason = inst.reason
280 if isinstance(reason, str):
279 if isinstance(reason, str):
281 # SSLError of Python 2.7.9 contains a unicode
280 # SSLError of Python 2.7.9 contains a unicode
282 reason = encoding.unitolocal(reason)
281 reason = encoding.unitolocal(reason)
283 return reason
282 return reason
284 elif getattr(inst, "strerror", None):
283 elif getattr(inst, "strerror", None):
285 return encoding.strtolocal(inst.strerror)
284 return encoding.strtolocal(inst.strerror)
286 else:
285 else:
287 return stringutil.forcebytestr(urlerror)
286 return stringutil.forcebytestr(urlerror)
288
287
289
288
290 class lfsauthhandler(util.urlreq.basehandler):
289 class lfsauthhandler(util.urlreq.basehandler):
291 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
290 handler_order = 480 # Before HTTPDigestAuthHandler (== 490)
292
291
293 def http_error_401(self, req, fp, code, msg, headers):
292 def http_error_401(self, req, fp, code, msg, headers):
294 """Enforces that any authentication performed is HTTP Basic
293 """Enforces that any authentication performed is HTTP Basic
295 Authentication. No authentication is also acceptable.
294 Authentication. No authentication is also acceptable.
296 """
295 """
297 authreq = headers.get('www-authenticate', None)
296 authreq = headers.get('www-authenticate', None)
298 if authreq:
297 if authreq:
299 scheme = authreq.split()[0]
298 scheme = authreq.split()[0]
300
299
301 if scheme.lower() != 'basic':
300 if scheme.lower() != 'basic':
302 msg = _(b'the server must support Basic Authentication')
301 msg = _(b'the server must support Basic Authentication')
303 raise util.urlerr.httperror(
302 raise util.urlerr.httperror(
304 req.get_full_url(),
303 req.get_full_url(),
305 code,
304 code,
306 encoding.strfromlocal(msg),
305 encoding.strfromlocal(msg),
307 headers,
306 headers,
308 fp,
307 fp,
309 )
308 )
310 return None
309 return None
311
310
312
311
313 class _gitlfsremote:
312 class _gitlfsremote:
314 def __init__(self, repo, url):
313 def __init__(self, repo, url):
315 ui = repo.ui
314 ui = repo.ui
316 self.ui = ui
315 self.ui = ui
317 baseurl, authinfo = url.authinfo()
316 baseurl, authinfo = url.authinfo()
318 self.baseurl = baseurl.rstrip(b'/')
317 self.baseurl = baseurl.rstrip(b'/')
319 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
318 useragent = repo.ui.config(b'experimental', b'lfs.user-agent')
320 if not useragent:
319 if not useragent:
321 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
320 useragent = b'git-lfs/2.3.4 (Mercurial %s)' % util.version()
322 self.urlopener = urlmod.opener(ui, authinfo, useragent)
321 self.urlopener = urlmod.opener(ui, authinfo, useragent)
323 self.urlopener.add_handler(lfsauthhandler())
322 self.urlopener.add_handler(lfsauthhandler())
324 self.retry = ui.configint(b'lfs', b'retry')
323 self.retry = ui.configint(b'lfs', b'retry')
325
324
326 def writebatch(self, pointers, fromstore):
325 def writebatch(self, pointers, fromstore):
327 """Batch upload from local to remote blobstore."""
326 """Batch upload from local to remote blobstore."""
328 self._batch(_deduplicate(pointers), fromstore, b'upload')
327 self._batch(_deduplicate(pointers), fromstore, b'upload')
329
328
330 def readbatch(self, pointers, tostore):
329 def readbatch(self, pointers, tostore):
331 """Batch download from remote to local blostore."""
330 """Batch download from remote to local blostore."""
332 self._batch(_deduplicate(pointers), tostore, b'download')
331 self._batch(_deduplicate(pointers), tostore, b'download')
333
332
334 def _batchrequest(self, pointers, action):
333 def _batchrequest(self, pointers, action):
335 """Get metadata about objects pointed by pointers for given action
334 """Get metadata about objects pointed by pointers for given action
336
335
337 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
336 Return decoded JSON object like {'objects': [{'oid': '', 'size': 1}]}
338 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
337 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/batch.md
339 """
338 """
340 objects = [
339 objects = [
341 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
340 {'oid': pycompat.strurl(p.oid()), 'size': p.size()}
342 for p in pointers
341 for p in pointers
343 ]
342 ]
344 requestdata = pycompat.bytesurl(
343 requestdata = pycompat.bytesurl(
345 json.dumps(
344 json.dumps(
346 {
345 {
347 'objects': objects,
346 'objects': objects,
348 'operation': pycompat.strurl(action),
347 'operation': pycompat.strurl(action),
349 }
348 }
350 )
349 )
351 )
350 )
352 url = b'%s/objects/batch' % self.baseurl
351 url = b'%s/objects/batch' % self.baseurl
353 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
352 batchreq = util.urlreq.request(pycompat.strurl(url), data=requestdata)
354 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
353 batchreq.add_header('Accept', 'application/vnd.git-lfs+json')
355 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
354 batchreq.add_header('Content-Type', 'application/vnd.git-lfs+json')
356 try:
355 try:
357 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
356 with contextlib.closing(self.urlopener.open(batchreq)) as rsp:
358 rawjson = rsp.read()
357 rawjson = rsp.read()
359 except util.urlerr.httperror as ex:
358 except util.urlerr.httperror as ex:
360 hints = {
359 hints = {
361 400: _(
360 400: _(
362 b'check that lfs serving is enabled on %s and "%s" is '
361 b'check that lfs serving is enabled on %s and "%s" is '
363 b'supported'
362 b'supported'
364 )
363 )
365 % (self.baseurl, action),
364 % (self.baseurl, action),
366 404: _(b'the "lfs.url" config may be used to override %s')
365 404: _(b'the "lfs.url" config may be used to override %s')
367 % self.baseurl,
366 % self.baseurl,
368 }
367 }
369 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
368 hint = hints.get(ex.code, _(b'api=%s, action=%s') % (url, action))
370 raise LfsRemoteError(
369 raise LfsRemoteError(
371 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
370 _(b'LFS HTTP error: %s') % stringutil.forcebytestr(ex),
372 hint=hint,
371 hint=hint,
373 )
372 )
374 except util.urlerr.urlerror as ex:
373 except util.urlerr.urlerror as ex:
375 hint = (
374 hint = (
376 _(b'the "lfs.url" config may be used to override %s')
375 _(b'the "lfs.url" config may be used to override %s')
377 % self.baseurl
376 % self.baseurl
378 )
377 )
379 raise LfsRemoteError(
378 raise LfsRemoteError(
380 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
379 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
381 )
380 )
382 try:
381 try:
383 response = pycompat.json_loads(rawjson)
382 response = pycompat.json_loads(rawjson)
384 except ValueError:
383 except ValueError:
385 raise LfsRemoteError(
384 raise LfsRemoteError(
386 _(b'LFS server returns invalid JSON: %s')
385 _(b'LFS server returns invalid JSON: %s')
387 % rawjson.encode("utf-8")
386 % rawjson.encode("utf-8")
388 )
387 )
389
388
390 if self.ui.debugflag:
389 if self.ui.debugflag:
391 self.ui.debug(b'Status: %d\n' % rsp.status)
390 self.ui.debug(b'Status: %d\n' % rsp.status)
392 # lfs-test-server and hg serve return headers in different order
391 # lfs-test-server and hg serve return headers in different order
393 headers = pycompat.bytestr(rsp.info()).strip()
392 headers = pycompat.bytestr(rsp.info()).strip()
394 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
393 self.ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
395
394
396 if 'objects' in response:
395 if 'objects' in response:
397 response['objects'] = sorted(
396 response['objects'] = sorted(
398 response['objects'], key=lambda p: p['oid']
397 response['objects'], key=lambda p: p['oid']
399 )
398 )
400 self.ui.debug(
399 self.ui.debug(
401 b'%s\n'
400 b'%s\n'
402 % pycompat.bytesurl(
401 % pycompat.bytesurl(
403 json.dumps(
402 json.dumps(
404 response,
403 response,
405 indent=2,
404 indent=2,
406 separators=('', ': '),
405 separators=('', ': '),
407 sort_keys=True,
406 sort_keys=True,
408 )
407 )
409 )
408 )
410 )
409 )
411
410
412 def encodestr(x):
411 def encodestr(x):
413 if isinstance(x, str):
412 if isinstance(x, str):
414 return x.encode('utf-8')
413 return x.encode('utf-8')
415 return x
414 return x
416
415
417 return pycompat.rapply(encodestr, response)
416 return pycompat.rapply(encodestr, response)
418
417
419 def _checkforservererror(self, pointers, responses, action):
418 def _checkforservererror(self, pointers, responses, action):
420 """Scans errors from objects
419 """Scans errors from objects
421
420
422 Raises LfsRemoteError if any objects have an error"""
421 Raises LfsRemoteError if any objects have an error"""
423 for response in responses:
422 for response in responses:
424 # The server should return 404 when objects cannot be found. Some
423 # The server should return 404 when objects cannot be found. Some
425 # server implementation (ex. lfs-test-server) does not set "error"
424 # server implementation (ex. lfs-test-server) does not set "error"
426 # but just removes "download" from "actions". Treat that case
425 # but just removes "download" from "actions". Treat that case
427 # as the same as 404 error.
426 # as the same as 404 error.
428 if b'error' not in response:
427 if b'error' not in response:
429 if action == b'download' and action not in response.get(
428 if action == b'download' and action not in response.get(
430 b'actions', []
429 b'actions', []
431 ):
430 ):
432 code = 404
431 code = 404
433 else:
432 else:
434 continue
433 continue
435 else:
434 else:
436 # An error dict without a code doesn't make much sense, so
435 # An error dict without a code doesn't make much sense, so
437 # treat as a server error.
436 # treat as a server error.
438 code = response.get(b'error').get(b'code', 500)
437 code = response.get(b'error').get(b'code', 500)
439
438
440 ptrmap = {p.oid(): p for p in pointers}
439 ptrmap = {p.oid(): p for p in pointers}
441 p = ptrmap.get(response[b'oid'], None)
440 p = ptrmap.get(response[b'oid'], None)
442 if p:
441 if p:
443 filename = getattr(p, 'filename', b'unknown')
442 filename = getattr(p, 'filename', b'unknown')
444 errors = {
443 errors = {
445 404: b'The object does not exist',
444 404: b'The object does not exist',
446 410: b'The object was removed by the owner',
445 410: b'The object was removed by the owner',
447 422: b'Validation error',
446 422: b'Validation error',
448 500: b'Internal server error',
447 500: b'Internal server error',
449 }
448 }
450 msg = errors.get(code, b'status code %d' % code)
449 msg = errors.get(code, b'status code %d' % code)
451 raise LfsRemoteError(
450 raise LfsRemoteError(
452 _(b'LFS server error for "%s": %s') % (filename, msg)
451 _(b'LFS server error for "%s": %s') % (filename, msg)
453 )
452 )
454 else:
453 else:
455 raise LfsRemoteError(
454 raise LfsRemoteError(
456 _(b'LFS server error. Unsolicited response for oid %s')
455 _(b'LFS server error. Unsolicited response for oid %s')
457 % response[b'oid']
456 % response[b'oid']
458 )
457 )
459
458
460 def _extractobjects(self, response, pointers, action):
459 def _extractobjects(self, response, pointers, action):
461 """extract objects from response of the batch API
460 """extract objects from response of the batch API
462
461
463 response: parsed JSON object returned by batch API
462 response: parsed JSON object returned by batch API
464 return response['objects'] filtered by action
463 return response['objects'] filtered by action
465 raise if any object has an error
464 raise if any object has an error
466 """
465 """
467 # Scan errors from objects - fail early
466 # Scan errors from objects - fail early
468 objects = response.get(b'objects', [])
467 objects = response.get(b'objects', [])
469 self._checkforservererror(pointers, objects, action)
468 self._checkforservererror(pointers, objects, action)
470
469
471 # Filter objects with given action. Practically, this skips uploading
470 # Filter objects with given action. Practically, this skips uploading
472 # objects which exist in the server.
471 # objects which exist in the server.
473 filteredobjects = [
472 filteredobjects = [
474 o for o in objects if action in o.get(b'actions', [])
473 o for o in objects if action in o.get(b'actions', [])
475 ]
474 ]
476
475
477 return filteredobjects
476 return filteredobjects
478
477
479 def _basictransfer(self, obj, action, localstore):
478 def _basictransfer(self, obj, action, localstore):
480 """Download or upload a single object using basic transfer protocol
479 """Download or upload a single object using basic transfer protocol
481
480
482 obj: dict, an object description returned by batch API
481 obj: dict, an object description returned by batch API
483 action: string, one of ['upload', 'download']
482 action: string, one of ['upload', 'download']
484 localstore: blobstore.local
483 localstore: blobstore.local
485
484
486 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
485 See https://github.com/git-lfs/git-lfs/blob/master/docs/api/\
487 basic-transfers.md
486 basic-transfers.md
488 """
487 """
489 oid = obj[b'oid']
488 oid = obj[b'oid']
490 href = obj[b'actions'][action].get(b'href')
489 href = obj[b'actions'][action].get(b'href')
491 headers = obj[b'actions'][action].get(b'header', {}).items()
490 headers = obj[b'actions'][action].get(b'header', {}).items()
492
491
493 request = util.urlreq.request(pycompat.strurl(href))
492 request = util.urlreq.request(pycompat.strurl(href))
494 if action == b'upload':
493 if action == b'upload':
495 # If uploading blobs, read data from local blobstore.
494 # If uploading blobs, read data from local blobstore.
496 if not localstore.verify(oid):
495 if not localstore.verify(oid):
497 raise error.Abort(
496 raise error.Abort(
498 _(b'detected corrupt lfs object: %s') % oid,
497 _(b'detected corrupt lfs object: %s') % oid,
499 hint=_(b'run hg verify'),
498 hint=_(b'run hg verify'),
500 )
499 )
501
500
502 for k, v in headers:
501 for k, v in headers:
503 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
502 request.add_header(pycompat.strurl(k), pycompat.strurl(v))
504
503
505 try:
504 try:
506 if action == b'upload':
505 if action == b'upload':
507 request.data = lfsuploadfile(self.ui, localstore.path(oid))
506 request.data = lfsuploadfile(self.ui, localstore.path(oid))
508 request.get_method = lambda: 'PUT'
507 request.get_method = lambda: 'PUT'
509 request.add_header('Content-Type', 'application/octet-stream')
508 request.add_header('Content-Type', 'application/octet-stream')
510 request.add_header('Content-Length', request.data.length)
509 request.add_header('Content-Length', request.data.length)
511
510
512 with contextlib.closing(self.urlopener.open(request)) as res:
511 with contextlib.closing(self.urlopener.open(request)) as res:
513 contentlength = res.info().get(b"content-length")
512 contentlength = res.info().get(b"content-length")
514 ui = self.ui # Shorten debug lines
513 ui = self.ui # Shorten debug lines
515 if self.ui.debugflag:
514 if self.ui.debugflag:
516 ui.debug(b'Status: %d\n' % res.status)
515 ui.debug(b'Status: %d\n' % res.status)
517 # lfs-test-server and hg serve return headers in different
516 # lfs-test-server and hg serve return headers in different
518 # order
517 # order
519 headers = pycompat.bytestr(res.info()).strip()
518 headers = pycompat.bytestr(res.info()).strip()
520 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
519 ui.debug(b'%s\n' % b'\n'.join(sorted(headers.splitlines())))
521
520
522 if action == b'download':
521 if action == b'download':
523 # If downloading blobs, store downloaded data to local
522 # If downloading blobs, store downloaded data to local
524 # blobstore
523 # blobstore
525 localstore.download(oid, res, contentlength)
524 localstore.download(oid, res, contentlength)
526 else:
525 else:
527 blocks = []
526 blocks = []
528 while True:
527 while True:
529 data = res.read(1048576)
528 data = res.read(1048576)
530 if not data:
529 if not data:
531 break
530 break
532 blocks.append(data)
531 blocks.append(data)
533
532
534 response = b"".join(blocks)
533 response = b"".join(blocks)
535 if response:
534 if response:
536 ui.debug(b'lfs %s response: %s' % (action, response))
535 ui.debug(b'lfs %s response: %s' % (action, response))
537 except util.urlerr.httperror as ex:
536 except util.urlerr.httperror as ex:
538 if self.ui.debugflag:
537 if self.ui.debugflag:
539 self.ui.debug(
538 self.ui.debug(
540 b'%s: %s\n' % (oid, ex.read())
539 b'%s: %s\n' % (oid, ex.read())
541 ) # XXX: also bytes?
540 ) # XXX: also bytes?
542 raise LfsRemoteError(
541 raise LfsRemoteError(
543 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
542 _(b'LFS HTTP error: %s (oid=%s, action=%s)')
544 % (stringutil.forcebytestr(ex), oid, action)
543 % (stringutil.forcebytestr(ex), oid, action)
545 )
544 )
546 except util.urlerr.urlerror as ex:
545 except util.urlerr.urlerror as ex:
547 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
546 hint = _(b'attempted connection to %s') % pycompat.bytesurl(
548 util.urllibcompat.getfullurl(request)
547 util.urllibcompat.getfullurl(request)
549 )
548 )
550 raise LfsRemoteError(
549 raise LfsRemoteError(
551 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
550 _(b'LFS error: %s') % _urlerrorreason(ex), hint=hint
552 )
551 )
553 finally:
552 finally:
554 if request.data:
553 if request.data:
555 request.data.close()
554 request.data.close()
556
555
557 def _batch(self, pointers, localstore, action):
556 def _batch(self, pointers, localstore, action):
558 if action not in [b'upload', b'download']:
557 if action not in [b'upload', b'download']:
559 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
558 raise error.ProgrammingError(b'invalid Git-LFS action: %s' % action)
560
559
561 response = self._batchrequest(pointers, action)
560 response = self._batchrequest(pointers, action)
562 objects = self._extractobjects(response, pointers, action)
561 objects = self._extractobjects(response, pointers, action)
563 total = sum(x.get(b'size', 0) for x in objects)
562 total = sum(x.get(b'size', 0) for x in objects)
564 sizes = {}
563 sizes = {}
565 for obj in objects:
564 for obj in objects:
566 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
565 sizes[obj.get(b'oid')] = obj.get(b'size', 0)
567 topic = {
566 topic = {
568 b'upload': _(b'lfs uploading'),
567 b'upload': _(b'lfs uploading'),
569 b'download': _(b'lfs downloading'),
568 b'download': _(b'lfs downloading'),
570 }[action]
569 }[action]
571 if len(objects) > 1:
570 if len(objects) > 1:
572 self.ui.note(
571 self.ui.note(
573 _(b'lfs: need to transfer %d objects (%s)\n')
572 _(b'lfs: need to transfer %d objects (%s)\n')
574 % (len(objects), util.bytecount(total))
573 % (len(objects), util.bytecount(total))
575 )
574 )
576
575
577 def transfer(chunk):
576 def transfer(chunk):
578 for obj in chunk:
577 for obj in chunk:
579 objsize = obj.get(b'size', 0)
578 objsize = obj.get(b'size', 0)
580 if self.ui.verbose:
579 if self.ui.verbose:
581 if action == b'download':
580 if action == b'download':
582 msg = _(b'lfs: downloading %s (%s)\n')
581 msg = _(b'lfs: downloading %s (%s)\n')
583 elif action == b'upload':
582 elif action == b'upload':
584 msg = _(b'lfs: uploading %s (%s)\n')
583 msg = _(b'lfs: uploading %s (%s)\n')
585 self.ui.note(
584 self.ui.note(
586 msg % (obj.get(b'oid'), util.bytecount(objsize))
585 msg % (obj.get(b'oid'), util.bytecount(objsize))
587 )
586 )
588 retry = self.retry
587 retry = self.retry
589 while True:
588 while True:
590 try:
589 try:
591 self._basictransfer(obj, action, localstore)
590 self._basictransfer(obj, action, localstore)
592 yield 1, obj.get(b'oid')
591 yield 1, obj.get(b'oid')
593 break
592 break
594 except socket.error as ex:
593 except socket.error as ex:
595 if retry > 0:
594 if retry > 0:
596 self.ui.note(
595 self.ui.note(
597 _(b'lfs: failed: %r (remaining retry %d)\n')
596 _(b'lfs: failed: %r (remaining retry %d)\n')
598 % (stringutil.forcebytestr(ex), retry)
597 % (stringutil.forcebytestr(ex), retry)
599 )
598 )
600 retry -= 1
599 retry -= 1
601 continue
600 continue
602 raise
601 raise
603
602
604 # Until https multiplexing gets sorted out. It's not clear if
603 # Until https multiplexing gets sorted out. It's not clear if
605 # ConnectionManager.set_ready() is externally synchronized for thread
604 # ConnectionManager.set_ready() is externally synchronized for thread
606 # safety with Windows workers.
605 # safety with Windows workers.
607 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
606 if self.ui.configbool(b'experimental', b'lfs.worker-enable'):
608 # The POSIX workers are forks of this process, so before spinning
607 # The POSIX workers are forks of this process, so before spinning
609 # them up, close all pooled connections. Otherwise, there's no way
608 # them up, close all pooled connections. Otherwise, there's no way
610 # to coordinate between them about who is using what, and the
609 # to coordinate between them about who is using what, and the
611 # transfers will get corrupted.
610 # transfers will get corrupted.
612 #
611 #
613 # TODO: add a function to keepalive.ConnectionManager to mark all
612 # TODO: add a function to keepalive.ConnectionManager to mark all
614 # ready connections as in use, and roll that back after the fork?
613 # ready connections as in use, and roll that back after the fork?
615 # That would allow the existing pool of connections in this process
614 # That would allow the existing pool of connections in this process
616 # to be preserved.
615 # to be preserved.
617 def prefork():
616 def prefork():
618 for h in self.urlopener.handlers:
617 for h in self.urlopener.handlers:
619 getattr(h, "close_all", lambda: None)()
618 getattr(h, "close_all", lambda: None)()
620
619
621 oids = worker.worker(
620 oids = worker.worker(
622 self.ui,
621 self.ui,
623 0.1,
622 0.1,
624 transfer,
623 transfer,
625 (),
624 (),
626 sorted(objects, key=lambda o: o.get(b'oid')),
625 sorted(objects, key=lambda o: o.get(b'oid')),
627 prefork=prefork,
626 prefork=prefork,
628 )
627 )
629 else:
628 else:
630 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
629 oids = transfer(sorted(objects, key=lambda o: o.get(b'oid')))
631
630
632 with self.ui.makeprogress(
631 with self.ui.makeprogress(
633 topic, unit=_(b"bytes"), total=total
632 topic, unit=_(b"bytes"), total=total
634 ) as progress:
633 ) as progress:
635 progress.update(0)
634 progress.update(0)
636 processed = 0
635 processed = 0
637 blobs = 0
636 blobs = 0
638 for _one, oid in oids:
637 for _one, oid in oids:
639 processed += sizes[oid]
638 processed += sizes[oid]
640 blobs += 1
639 blobs += 1
641 progress.update(processed)
640 progress.update(processed)
642 self.ui.note(_(b'lfs: processed: %s\n') % oid)
641 self.ui.note(_(b'lfs: processed: %s\n') % oid)
643
642
644 if blobs > 0:
643 if blobs > 0:
645 if action == b'upload':
644 if action == b'upload':
646 self.ui.status(
645 self.ui.status(
647 _(b'lfs: uploaded %d files (%s)\n')
646 _(b'lfs: uploaded %d files (%s)\n')
648 % (blobs, util.bytecount(processed))
647 % (blobs, util.bytecount(processed))
649 )
648 )
650 elif action == b'download':
649 elif action == b'download':
651 self.ui.status(
650 self.ui.status(
652 _(b'lfs: downloaded %d files (%s)\n')
651 _(b'lfs: downloaded %d files (%s)\n')
653 % (blobs, util.bytecount(processed))
652 % (blobs, util.bytecount(processed))
654 )
653 )
655
654
656 def __del__(self):
655 def __del__(self):
657 # copied from mercurial/httppeer.py
656 # copied from mercurial/httppeer.py
658 urlopener = getattr(self, 'urlopener', None)
657 urlopener = getattr(self, 'urlopener', None)
659 if urlopener:
658 if urlopener:
660 for h in urlopener.handlers:
659 for h in urlopener.handlers:
661 h.close()
660 h.close()
662 getattr(h, "close_all", lambda: None)()
661 getattr(h, "close_all", lambda: None)()
663
662
664
663
665 class _dummyremote:
664 class _dummyremote:
666 """Dummy store storing blobs to temp directory."""
665 """Dummy store storing blobs to temp directory."""
667
666
668 def __init__(self, repo, url):
667 def __init__(self, repo, url):
669 fullpath = repo.vfs.join(b'lfs', url.path)
668 fullpath = repo.vfs.join(b'lfs', url.path)
670 self.vfs = lfsvfs(fullpath)
669 self.vfs = lfsvfs(fullpath)
671
670
672 def writebatch(self, pointers, fromstore):
671 def writebatch(self, pointers, fromstore):
673 for p in _deduplicate(pointers):
672 for p in _deduplicate(pointers):
674 content = fromstore.read(p.oid(), verify=True)
673 content = fromstore.read(p.oid(), verify=True)
675 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
674 with self.vfs(p.oid(), b'wb', atomictemp=True) as fp:
676 fp.write(content)
675 fp.write(content)
677
676
678 def readbatch(self, pointers, tostore):
677 def readbatch(self, pointers, tostore):
679 for p in _deduplicate(pointers):
678 for p in _deduplicate(pointers):
680 with self.vfs(p.oid(), b'rb') as fp:
679 with self.vfs(p.oid(), b'rb') as fp:
681 tostore.download(p.oid(), fp, None)
680 tostore.download(p.oid(), fp, None)
682
681
683
682
684 class _nullremote:
683 class _nullremote:
685 """Null store storing blobs to /dev/null."""
684 """Null store storing blobs to /dev/null."""
686
685
687 def __init__(self, repo, url):
686 def __init__(self, repo, url):
688 pass
687 pass
689
688
690 def writebatch(self, pointers, fromstore):
689 def writebatch(self, pointers, fromstore):
691 pass
690 pass
692
691
693 def readbatch(self, pointers, tostore):
692 def readbatch(self, pointers, tostore):
694 pass
693 pass
695
694
696
695
697 class _promptremote:
696 class _promptremote:
698 """Prompt user to set lfs.url when accessed."""
697 """Prompt user to set lfs.url when accessed."""
699
698
700 def __init__(self, repo, url):
699 def __init__(self, repo, url):
701 pass
700 pass
702
701
703 def writebatch(self, pointers, fromstore, ui=None):
702 def writebatch(self, pointers, fromstore, ui=None):
704 self._prompt()
703 self._prompt()
705
704
706 def readbatch(self, pointers, tostore, ui=None):
705 def readbatch(self, pointers, tostore, ui=None):
707 self._prompt()
706 self._prompt()
708
707
709 def _prompt(self):
708 def _prompt(self):
710 raise error.Abort(_(b'lfs.url needs to be configured'))
709 raise error.Abort(_(b'lfs.url needs to be configured'))
711
710
712
711
713 _storemap = {
712 _storemap = {
714 b'https': _gitlfsremote,
713 b'https': _gitlfsremote,
715 b'http': _gitlfsremote,
714 b'http': _gitlfsremote,
716 b'file': _dummyremote,
715 b'file': _dummyremote,
717 b'null': _nullremote,
716 b'null': _nullremote,
718 None: _promptremote,
717 None: _promptremote,
719 }
718 }
720
719
721
720
722 def _deduplicate(pointers):
721 def _deduplicate(pointers):
723 """Remove any duplicate oids that exist in the list"""
722 """Remove any duplicate oids that exist in the list"""
724 reduced = util.sortdict()
723 reduced = util.sortdict()
725 for p in pointers:
724 for p in pointers:
726 reduced[p.oid()] = p
725 reduced[p.oid()] = p
727 return reduced.values()
726 return reduced.values()
728
727
729
728
730 def _verify(oid, content):
729 def _verify(oid, content):
731 realoid = hex(hashlib.sha256(content).digest())
730 realoid = hex(hashlib.sha256(content).digest())
732 if realoid != oid:
731 if realoid != oid:
733 raise LfsCorruptionError(
732 raise LfsCorruptionError(
734 _(b'detected corrupt lfs object: %s') % oid,
733 _(b'detected corrupt lfs object: %s') % oid,
735 hint=_(b'run hg verify'),
734 hint=_(b'run hg verify'),
736 )
735 )
737
736
738
737
739 def remote(repo, remote=None):
738 def remote(repo, remote=None):
740 """remotestore factory. return a store in _storemap depending on config
739 """remotestore factory. return a store in _storemap depending on config
741
740
742 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
741 If ``lfs.url`` is specified, use that remote endpoint. Otherwise, try to
743 infer the endpoint, based on the remote repository using the same path
742 infer the endpoint, based on the remote repository using the same path
744 adjustments as git. As an extension, 'http' is supported as well so that
743 adjustments as git. As an extension, 'http' is supported as well so that
745 ``hg serve`` works out of the box.
744 ``hg serve`` works out of the box.
746
745
747 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
746 https://github.com/git-lfs/git-lfs/blob/master/docs/api/server-discovery.md
748 """
747 """
749 lfsurl = repo.ui.config(b'lfs', b'url')
748 lfsurl = repo.ui.config(b'lfs', b'url')
750 url = urlutil.url(lfsurl or b'')
749 url = urlutil.url(lfsurl or b'')
751 if lfsurl is None:
750 if lfsurl is None:
752 if remote:
751 if remote:
753 path = remote
752 path = remote
754 elif hasattr(repo, '_subtoppath'):
753 elif hasattr(repo, '_subtoppath'):
755 # The pull command sets this during the optional update phase, which
754 # The pull command sets this during the optional update phase, which
756 # tells exactly where the pull originated, whether 'paths.default'
755 # tells exactly where the pull originated, whether 'paths.default'
757 # or explicit.
756 # or explicit.
758 path = repo._subtoppath
757 path = repo._subtoppath
759 else:
758 else:
760 # TODO: investigate 'paths.remote:lfsurl' style path customization,
759 # TODO: investigate 'paths.remote:lfsurl' style path customization,
761 # and fall back to inferring from 'paths.remote' if unspecified.
760 # and fall back to inferring from 'paths.remote' if unspecified.
762 path = repo.ui.config(b'paths', b'default') or b''
761 path = repo.ui.config(b'paths', b'default') or b''
763
762
764 defaulturl = urlutil.url(path)
763 defaulturl = urlutil.url(path)
765
764
766 # TODO: support local paths as well.
765 # TODO: support local paths as well.
767 # TODO: consider the ssh -> https transformation that git applies
766 # TODO: consider the ssh -> https transformation that git applies
768 if defaulturl.scheme in (b'http', b'https'):
767 if defaulturl.scheme in (b'http', b'https'):
769 if defaulturl.path and defaulturl.path[:-1] != b'/':
768 if defaulturl.path and defaulturl.path[:-1] != b'/':
770 defaulturl.path += b'/'
769 defaulturl.path += b'/'
771 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
770 defaulturl.path = (defaulturl.path or b'') + b'.git/info/lfs'
772
771
773 url = urlutil.url(bytes(defaulturl))
772 url = urlutil.url(bytes(defaulturl))
774 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
773 repo.ui.note(_(b'lfs: assuming remote store: %s\n') % url)
775
774
776 scheme = url.scheme
775 scheme = url.scheme
777 if scheme not in _storemap:
776 if scheme not in _storemap:
778 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
777 raise error.Abort(_(b'lfs: unknown url scheme: %s') % scheme)
779 return _storemap[scheme](repo, url)
778 return _storemap[scheme](repo, url)
780
779
781
780
782 class LfsRemoteError(error.StorageError):
781 class LfsRemoteError(error.StorageError):
783 pass
782 pass
784
783
785
784
786 class LfsCorruptionError(error.Abort):
785 class LfsCorruptionError(error.Abort):
787 """Raised when a corrupt blob is detected, aborting an operation
786 """Raised when a corrupt blob is detected, aborting an operation
788
787
789 It exists to allow specialized handling on the server side."""
788 It exists to allow specialized handling on the server side."""
@@ -1,544 +1,540 b''
1 # wrapper.py - methods wrapping core mercurial logic
1 # wrapper.py - methods wrapping core mercurial logic
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import hashlib
9 import hashlib
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial.node import bin, hex, short
12 from mercurial.node import bin, hex, short
13 from mercurial.pycompat import (
14 getattr,
15 setattr,
16 )
17
13
18 from mercurial import (
14 from mercurial import (
19 bundle2,
15 bundle2,
20 changegroup,
16 changegroup,
21 cmdutil,
17 cmdutil,
22 context,
18 context,
23 error,
19 error,
24 exchange,
20 exchange,
25 exthelper,
21 exthelper,
26 localrepo,
22 localrepo,
27 revlog,
23 revlog,
28 scmutil,
24 scmutil,
29 vfs as vfsmod,
25 vfs as vfsmod,
30 wireprotov1server,
26 wireprotov1server,
31 )
27 )
32
28
33 from mercurial.upgrade_utils import (
29 from mercurial.upgrade_utils import (
34 actions as upgrade_actions,
30 actions as upgrade_actions,
35 engine as upgrade_engine,
31 engine as upgrade_engine,
36 )
32 )
37
33
38 from mercurial.interfaces import repository
34 from mercurial.interfaces import repository
39
35
40 from mercurial.utils import (
36 from mercurial.utils import (
41 storageutil,
37 storageutil,
42 stringutil,
38 stringutil,
43 )
39 )
44
40
45 from ..largefiles import lfutil
41 from ..largefiles import lfutil
46
42
47 from . import (
43 from . import (
48 blobstore,
44 blobstore,
49 pointer,
45 pointer,
50 )
46 )
51
47
52 eh = exthelper.exthelper()
48 eh = exthelper.exthelper()
53
49
54
50
55 @eh.wrapfunction(localrepo, 'makefilestorage')
51 @eh.wrapfunction(localrepo, 'makefilestorage')
56 def localrepomakefilestorage(orig, requirements, features, **kwargs):
52 def localrepomakefilestorage(orig, requirements, features, **kwargs):
57 if b'lfs' in requirements:
53 if b'lfs' in requirements:
58 features.add(repository.REPO_FEATURE_LFS)
54 features.add(repository.REPO_FEATURE_LFS)
59
55
60 return orig(requirements=requirements, features=features, **kwargs)
56 return orig(requirements=requirements, features=features, **kwargs)
61
57
62
58
63 @eh.wrapfunction(changegroup, 'allsupportedversions')
59 @eh.wrapfunction(changegroup, 'allsupportedversions')
64 def allsupportedversions(orig, ui):
60 def allsupportedversions(orig, ui):
65 versions = orig(ui)
61 versions = orig(ui)
66 versions.add(b'03')
62 versions.add(b'03')
67 return versions
63 return versions
68
64
69
65
70 @eh.wrapfunction(wireprotov1server, '_capabilities')
66 @eh.wrapfunction(wireprotov1server, '_capabilities')
71 def _capabilities(orig, repo, proto):
67 def _capabilities(orig, repo, proto):
72 '''Wrap server command to announce lfs server capability'''
68 '''Wrap server command to announce lfs server capability'''
73 caps = orig(repo, proto)
69 caps = orig(repo, proto)
74 if hasattr(repo.svfs, 'lfslocalblobstore'):
70 if hasattr(repo.svfs, 'lfslocalblobstore'):
75 # Advertise a slightly different capability when lfs is *required*, so
71 # Advertise a slightly different capability when lfs is *required*, so
76 # that the client knows it MUST load the extension. If lfs is not
72 # that the client knows it MUST load the extension. If lfs is not
77 # required on the server, there's no reason to autoload the extension
73 # required on the server, there's no reason to autoload the extension
78 # on the client.
74 # on the client.
79 if b'lfs' in repo.requirements:
75 if b'lfs' in repo.requirements:
80 caps.append(b'lfs-serve')
76 caps.append(b'lfs-serve')
81
77
82 caps.append(b'lfs')
78 caps.append(b'lfs')
83 return caps
79 return caps
84
80
85
81
86 def bypasscheckhash(self, text):
82 def bypasscheckhash(self, text):
87 return False
83 return False
88
84
89
85
90 def readfromstore(self, text):
86 def readfromstore(self, text):
91 """Read filelog content from local blobstore transform for flagprocessor.
87 """Read filelog content from local blobstore transform for flagprocessor.
92
88
93 Default tranform for flagprocessor, returning contents from blobstore.
89 Default tranform for flagprocessor, returning contents from blobstore.
94 Returns a 2-typle (text, validatehash) where validatehash is True as the
90 Returns a 2-typle (text, validatehash) where validatehash is True as the
95 contents of the blobstore should be checked using checkhash.
91 contents of the blobstore should be checked using checkhash.
96 """
92 """
97 p = pointer.deserialize(text)
93 p = pointer.deserialize(text)
98 oid = p.oid()
94 oid = p.oid()
99 store = self.opener.lfslocalblobstore
95 store = self.opener.lfslocalblobstore
100 if not store.has(oid):
96 if not store.has(oid):
101 p.filename = self.filename
97 p.filename = self.filename
102 self.opener.lfsremoteblobstore.readbatch([p], store)
98 self.opener.lfsremoteblobstore.readbatch([p], store)
103
99
104 # The caller will validate the content
100 # The caller will validate the content
105 text = store.read(oid, verify=False)
101 text = store.read(oid, verify=False)
106
102
107 # pack hg filelog metadata
103 # pack hg filelog metadata
108 hgmeta = {}
104 hgmeta = {}
109 for k in p.keys():
105 for k in p.keys():
110 if k.startswith(b'x-hg-'):
106 if k.startswith(b'x-hg-'):
111 name = k[len(b'x-hg-') :]
107 name = k[len(b'x-hg-') :]
112 hgmeta[name] = p[k]
108 hgmeta[name] = p[k]
113 if hgmeta or text.startswith(b'\1\n'):
109 if hgmeta or text.startswith(b'\1\n'):
114 text = storageutil.packmeta(hgmeta, text)
110 text = storageutil.packmeta(hgmeta, text)
115
111
116 return (text, True)
112 return (text, True)
117
113
118
114
119 def writetostore(self, text):
115 def writetostore(self, text):
120 # hg filelog metadata (includes rename, etc)
116 # hg filelog metadata (includes rename, etc)
121 hgmeta, offset = storageutil.parsemeta(text)
117 hgmeta, offset = storageutil.parsemeta(text)
122 if offset and offset > 0:
118 if offset and offset > 0:
123 # lfs blob does not contain hg filelog metadata
119 # lfs blob does not contain hg filelog metadata
124 text = text[offset:]
120 text = text[offset:]
125
121
126 # git-lfs only supports sha256
122 # git-lfs only supports sha256
127 oid = hex(hashlib.sha256(text).digest())
123 oid = hex(hashlib.sha256(text).digest())
128 self.opener.lfslocalblobstore.write(oid, text)
124 self.opener.lfslocalblobstore.write(oid, text)
129
125
130 # replace contents with metadata
126 # replace contents with metadata
131 longoid = b'sha256:%s' % oid
127 longoid = b'sha256:%s' % oid
132 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
128 metadata = pointer.gitlfspointer(oid=longoid, size=b'%d' % len(text))
133
129
134 # by default, we expect the content to be binary. however, LFS could also
130 # by default, we expect the content to be binary. however, LFS could also
135 # be used for non-binary content. add a special entry for non-binary data.
131 # be used for non-binary content. add a special entry for non-binary data.
136 # this will be used by filectx.isbinary().
132 # this will be used by filectx.isbinary().
137 if not stringutil.binary(text):
133 if not stringutil.binary(text):
138 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
134 # not hg filelog metadata (affecting commit hash), no "x-hg-" prefix
139 metadata[b'x-is-binary'] = b'0'
135 metadata[b'x-is-binary'] = b'0'
140
136
141 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
137 # translate hg filelog metadata to lfs metadata with "x-hg-" prefix
142 if hgmeta is not None:
138 if hgmeta is not None:
143 for k, v in hgmeta.items():
139 for k, v in hgmeta.items():
144 metadata[b'x-hg-%s' % k] = v
140 metadata[b'x-hg-%s' % k] = v
145
141
146 rawtext = metadata.serialize()
142 rawtext = metadata.serialize()
147 return (rawtext, False)
143 return (rawtext, False)
148
144
149
145
150 def _islfs(rlog, node=None, rev=None):
146 def _islfs(rlog, node=None, rev=None):
151 if rev is None:
147 if rev is None:
152 if node is None:
148 if node is None:
153 # both None - likely working copy content where node is not ready
149 # both None - likely working copy content where node is not ready
154 return False
150 return False
155 rev = rlog.rev(node)
151 rev = rlog.rev(node)
156 else:
152 else:
157 node = rlog.node(rev)
153 node = rlog.node(rev)
158 if node == rlog.nullid:
154 if node == rlog.nullid:
159 return False
155 return False
160 flags = rlog.flags(rev)
156 flags = rlog.flags(rev)
161 return bool(flags & revlog.REVIDX_EXTSTORED)
157 return bool(flags & revlog.REVIDX_EXTSTORED)
162
158
163
159
164 # Wrapping may also be applied by remotefilelog
160 # Wrapping may also be applied by remotefilelog
165 def filelogaddrevision(
161 def filelogaddrevision(
166 orig,
162 orig,
167 self,
163 self,
168 text,
164 text,
169 transaction,
165 transaction,
170 link,
166 link,
171 p1,
167 p1,
172 p2,
168 p2,
173 cachedelta=None,
169 cachedelta=None,
174 node=None,
170 node=None,
175 flags=revlog.REVIDX_DEFAULT_FLAGS,
171 flags=revlog.REVIDX_DEFAULT_FLAGS,
176 **kwds
172 **kwds
177 ):
173 ):
178 # The matcher isn't available if reposetup() wasn't called.
174 # The matcher isn't available if reposetup() wasn't called.
179 lfstrack = self._revlog.opener.options.get(b'lfstrack')
175 lfstrack = self._revlog.opener.options.get(b'lfstrack')
180
176
181 if lfstrack:
177 if lfstrack:
182 textlen = len(text)
178 textlen = len(text)
183 # exclude hg rename meta from file size
179 # exclude hg rename meta from file size
184 meta, offset = storageutil.parsemeta(text)
180 meta, offset = storageutil.parsemeta(text)
185 if offset:
181 if offset:
186 textlen -= offset
182 textlen -= offset
187
183
188 if lfstrack(self._revlog.filename, textlen):
184 if lfstrack(self._revlog.filename, textlen):
189 flags |= revlog.REVIDX_EXTSTORED
185 flags |= revlog.REVIDX_EXTSTORED
190
186
191 return orig(
187 return orig(
192 self,
188 self,
193 text,
189 text,
194 transaction,
190 transaction,
195 link,
191 link,
196 p1,
192 p1,
197 p2,
193 p2,
198 cachedelta=cachedelta,
194 cachedelta=cachedelta,
199 node=node,
195 node=node,
200 flags=flags,
196 flags=flags,
201 **kwds
197 **kwds
202 )
198 )
203
199
204
200
205 # Wrapping may also be applied by remotefilelog
201 # Wrapping may also be applied by remotefilelog
206 def filelogrenamed(orig, self, node):
202 def filelogrenamed(orig, self, node):
207 if _islfs(self._revlog, node):
203 if _islfs(self._revlog, node):
208 rawtext = self._revlog.rawdata(node)
204 rawtext = self._revlog.rawdata(node)
209 if not rawtext:
205 if not rawtext:
210 return False
206 return False
211 metadata = pointer.deserialize(rawtext)
207 metadata = pointer.deserialize(rawtext)
212 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
208 if b'x-hg-copy' in metadata and b'x-hg-copyrev' in metadata:
213 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
209 return metadata[b'x-hg-copy'], bin(metadata[b'x-hg-copyrev'])
214 else:
210 else:
215 return False
211 return False
216 return orig(self, node)
212 return orig(self, node)
217
213
218
214
219 # Wrapping may also be applied by remotefilelog
215 # Wrapping may also be applied by remotefilelog
220 def filelogsize(orig, self, rev):
216 def filelogsize(orig, self, rev):
221 if _islfs(self._revlog, rev=rev):
217 if _islfs(self._revlog, rev=rev):
222 # fast path: use lfs metadata to answer size
218 # fast path: use lfs metadata to answer size
223 rawtext = self._revlog.rawdata(rev)
219 rawtext = self._revlog.rawdata(rev)
224 metadata = pointer.deserialize(rawtext)
220 metadata = pointer.deserialize(rawtext)
225 return int(metadata[b'size'])
221 return int(metadata[b'size'])
226 return orig(self, rev)
222 return orig(self, rev)
227
223
228
224
229 @eh.wrapfunction(revlog, '_verify_revision')
225 @eh.wrapfunction(revlog, '_verify_revision')
230 def _verify_revision(orig, rl, skipflags, state, node):
226 def _verify_revision(orig, rl, skipflags, state, node):
231 if _islfs(rl, node=node):
227 if _islfs(rl, node=node):
232 rawtext = rl.rawdata(node)
228 rawtext = rl.rawdata(node)
233 metadata = pointer.deserialize(rawtext)
229 metadata = pointer.deserialize(rawtext)
234
230
235 # Don't skip blobs that are stored locally, as local verification is
231 # Don't skip blobs that are stored locally, as local verification is
236 # relatively cheap and there's no other way to verify the raw data in
232 # relatively cheap and there's no other way to verify the raw data in
237 # the revlog.
233 # the revlog.
238 if rl.opener.lfslocalblobstore.has(metadata.oid()):
234 if rl.opener.lfslocalblobstore.has(metadata.oid()):
239 skipflags &= ~revlog.REVIDX_EXTSTORED
235 skipflags &= ~revlog.REVIDX_EXTSTORED
240 elif skipflags & revlog.REVIDX_EXTSTORED:
236 elif skipflags & revlog.REVIDX_EXTSTORED:
241 # The wrapped method will set `skipread`, but there's enough local
237 # The wrapped method will set `skipread`, but there's enough local
242 # info to check renames.
238 # info to check renames.
243 state[b'safe_renamed'].add(node)
239 state[b'safe_renamed'].add(node)
244
240
245 orig(rl, skipflags, state, node)
241 orig(rl, skipflags, state, node)
246
242
247
243
248 @eh.wrapfunction(context.basefilectx, 'cmp')
244 @eh.wrapfunction(context.basefilectx, 'cmp')
249 def filectxcmp(orig, self, fctx):
245 def filectxcmp(orig, self, fctx):
250 """returns True if text is different than fctx"""
246 """returns True if text is different than fctx"""
251 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
247 # some fctx (ex. hg-git) is not based on basefilectx and do not have islfs
252 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
248 if self.islfs() and getattr(fctx, 'islfs', lambda: False)():
253 # fast path: check LFS oid
249 # fast path: check LFS oid
254 p1 = pointer.deserialize(self.rawdata())
250 p1 = pointer.deserialize(self.rawdata())
255 p2 = pointer.deserialize(fctx.rawdata())
251 p2 = pointer.deserialize(fctx.rawdata())
256 return p1.oid() != p2.oid()
252 return p1.oid() != p2.oid()
257 return orig(self, fctx)
253 return orig(self, fctx)
258
254
259
255
260 @eh.wrapfunction(context.basefilectx, 'isbinary')
256 @eh.wrapfunction(context.basefilectx, 'isbinary')
261 def filectxisbinary(orig, self):
257 def filectxisbinary(orig, self):
262 if self.islfs():
258 if self.islfs():
263 # fast path: use lfs metadata to answer isbinary
259 # fast path: use lfs metadata to answer isbinary
264 metadata = pointer.deserialize(self.rawdata())
260 metadata = pointer.deserialize(self.rawdata())
265 # if lfs metadata says nothing, assume it's binary by default
261 # if lfs metadata says nothing, assume it's binary by default
266 return bool(int(metadata.get(b'x-is-binary', 1)))
262 return bool(int(metadata.get(b'x-is-binary', 1)))
267 return orig(self)
263 return orig(self)
268
264
269
265
270 def filectxislfs(self):
266 def filectxislfs(self):
271 return _islfs(self.filelog()._revlog, self.filenode())
267 return _islfs(self.filelog()._revlog, self.filenode())
272
268
273
269
274 @eh.wrapfunction(cmdutil, '_updatecatformatter')
270 @eh.wrapfunction(cmdutil, '_updatecatformatter')
275 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
271 def _updatecatformatter(orig, fm, ctx, matcher, path, decode):
276 orig(fm, ctx, matcher, path, decode)
272 orig(fm, ctx, matcher, path, decode)
277 fm.data(rawdata=ctx[path].rawdata())
273 fm.data(rawdata=ctx[path].rawdata())
278
274
279
275
280 @eh.wrapfunction(scmutil, 'wrapconvertsink')
276 @eh.wrapfunction(scmutil, 'wrapconvertsink')
281 def convertsink(orig, sink):
277 def convertsink(orig, sink):
282 sink = orig(sink)
278 sink = orig(sink)
283 if sink.repotype == b'hg':
279 if sink.repotype == b'hg':
284
280
285 class lfssink(sink.__class__):
281 class lfssink(sink.__class__):
286 def putcommit(
282 def putcommit(
287 self,
283 self,
288 files,
284 files,
289 copies,
285 copies,
290 parents,
286 parents,
291 commit,
287 commit,
292 source,
288 source,
293 revmap,
289 revmap,
294 full,
290 full,
295 cleanp2,
291 cleanp2,
296 ):
292 ):
297 pc = super(lfssink, self).putcommit
293 pc = super(lfssink, self).putcommit
298 node = pc(
294 node = pc(
299 files,
295 files,
300 copies,
296 copies,
301 parents,
297 parents,
302 commit,
298 commit,
303 source,
299 source,
304 revmap,
300 revmap,
305 full,
301 full,
306 cleanp2,
302 cleanp2,
307 )
303 )
308
304
309 if b'lfs' not in self.repo.requirements:
305 if b'lfs' not in self.repo.requirements:
310 ctx = self.repo[node]
306 ctx = self.repo[node]
311
307
312 # The file list may contain removed files, so check for
308 # The file list may contain removed files, so check for
313 # membership before assuming it is in the context.
309 # membership before assuming it is in the context.
314 if any(f in ctx and ctx[f].islfs() for f, n in files):
310 if any(f in ctx and ctx[f].islfs() for f, n in files):
315 self.repo.requirements.add(b'lfs')
311 self.repo.requirements.add(b'lfs')
316 scmutil.writereporequirements(self.repo)
312 scmutil.writereporequirements(self.repo)
317
313
318 return node
314 return node
319
315
320 sink.__class__ = lfssink
316 sink.__class__ = lfssink
321
317
322 return sink
318 return sink
323
319
324
320
325 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
321 # bundlerepo uses "vfsmod.readonlyvfs(othervfs)", we need to make sure lfs
326 # options and blob stores are passed from othervfs to the new readonlyvfs.
322 # options and blob stores are passed from othervfs to the new readonlyvfs.
327 @eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
323 @eh.wrapfunction(vfsmod.readonlyvfs, '__init__')
328 def vfsinit(orig, self, othervfs):
324 def vfsinit(orig, self, othervfs):
329 orig(self, othervfs)
325 orig(self, othervfs)
330 # copy lfs related options
326 # copy lfs related options
331 for k, v in othervfs.options.items():
327 for k, v in othervfs.options.items():
332 if k.startswith(b'lfs'):
328 if k.startswith(b'lfs'):
333 self.options[k] = v
329 self.options[k] = v
334 # also copy lfs blobstores. note: this can run before reposetup, so lfs
330 # also copy lfs blobstores. note: this can run before reposetup, so lfs
335 # blobstore attributes are not always ready at this time.
331 # blobstore attributes are not always ready at this time.
336 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
332 for name in ['lfslocalblobstore', 'lfsremoteblobstore']:
337 if hasattr(othervfs, name):
333 if hasattr(othervfs, name):
338 setattr(self, name, getattr(othervfs, name))
334 setattr(self, name, getattr(othervfs, name))
339
335
340
336
341 def _prefetchfiles(repo, revmatches):
337 def _prefetchfiles(repo, revmatches):
342 """Ensure that required LFS blobs are present, fetching them as a group if
338 """Ensure that required LFS blobs are present, fetching them as a group if
343 needed."""
339 needed."""
344 if not hasattr(repo.svfs, 'lfslocalblobstore'):
340 if not hasattr(repo.svfs, 'lfslocalblobstore'):
345 return
341 return
346
342
347 pointers = []
343 pointers = []
348 oids = set()
344 oids = set()
349 localstore = repo.svfs.lfslocalblobstore
345 localstore = repo.svfs.lfslocalblobstore
350
346
351 for rev, match in revmatches:
347 for rev, match in revmatches:
352 ctx = repo[rev]
348 ctx = repo[rev]
353 for f in ctx.walk(match):
349 for f in ctx.walk(match):
354 p = pointerfromctx(ctx, f)
350 p = pointerfromctx(ctx, f)
355 if p and p.oid() not in oids and not localstore.has(p.oid()):
351 if p and p.oid() not in oids and not localstore.has(p.oid()):
356 p.filename = f
352 p.filename = f
357 pointers.append(p)
353 pointers.append(p)
358 oids.add(p.oid())
354 oids.add(p.oid())
359
355
360 if pointers:
356 if pointers:
361 # Recalculating the repo store here allows 'paths.default' that is set
357 # Recalculating the repo store here allows 'paths.default' that is set
362 # on the repo by a clone command to be used for the update.
358 # on the repo by a clone command to be used for the update.
363 blobstore.remote(repo).readbatch(pointers, localstore)
359 blobstore.remote(repo).readbatch(pointers, localstore)
364
360
365
361
366 def _canskipupload(repo):
362 def _canskipupload(repo):
367 # Skip if this hasn't been passed to reposetup()
363 # Skip if this hasn't been passed to reposetup()
368 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
364 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
369 return True
365 return True
370
366
371 # if remotestore is a null store, upload is a no-op and can be skipped
367 # if remotestore is a null store, upload is a no-op and can be skipped
372 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
368 return isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
373
369
374
370
375 def candownload(repo):
371 def candownload(repo):
376 # Skip if this hasn't been passed to reposetup()
372 # Skip if this hasn't been passed to reposetup()
377 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
373 if not hasattr(repo.svfs, 'lfsremoteblobstore'):
378 return False
374 return False
379
375
380 # if remotestore is a null store, downloads will lead to nothing
376 # if remotestore is a null store, downloads will lead to nothing
381 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
377 return not isinstance(repo.svfs.lfsremoteblobstore, blobstore._nullremote)
382
378
383
379
384 def uploadblobsfromrevs(repo, revs):
380 def uploadblobsfromrevs(repo, revs):
385 """upload lfs blobs introduced by revs"""
381 """upload lfs blobs introduced by revs"""
386 if _canskipupload(repo):
382 if _canskipupload(repo):
387 return
383 return
388 pointers = extractpointers(repo, revs)
384 pointers = extractpointers(repo, revs)
389 uploadblobs(repo, pointers)
385 uploadblobs(repo, pointers)
390
386
391
387
392 def prepush(pushop):
388 def prepush(pushop):
393 """Prepush hook.
389 """Prepush hook.
394
390
395 Read through the revisions to push, looking for filelog entries that can be
391 Read through the revisions to push, looking for filelog entries that can be
396 deserialized into metadata so that we can block the push on their upload to
392 deserialized into metadata so that we can block the push on their upload to
397 the remote blobstore.
393 the remote blobstore.
398 """
394 """
399 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
395 return uploadblobsfromrevs(pushop.repo, pushop.outgoing.missing)
400
396
401
397
402 @eh.wrapfunction(exchange, 'push')
398 @eh.wrapfunction(exchange, 'push')
403 def push(orig, repo, remote, *args, **kwargs):
399 def push(orig, repo, remote, *args, **kwargs):
404 """bail on push if the extension isn't enabled on remote when needed, and
400 """bail on push if the extension isn't enabled on remote when needed, and
405 update the remote store based on the destination path."""
401 update the remote store based on the destination path."""
406 if b'lfs' in repo.requirements:
402 if b'lfs' in repo.requirements:
407 # If the remote peer is for a local repo, the requirement tests in the
403 # If the remote peer is for a local repo, the requirement tests in the
408 # base class method enforce lfs support. Otherwise, some revisions in
404 # base class method enforce lfs support. Otherwise, some revisions in
409 # this repo use lfs, and the remote repo needs the extension loaded.
405 # this repo use lfs, and the remote repo needs the extension loaded.
410 if not remote.local() and not remote.capable(b'lfs'):
406 if not remote.local() and not remote.capable(b'lfs'):
411 # This is a copy of the message in exchange.push() when requirements
407 # This is a copy of the message in exchange.push() when requirements
412 # are missing between local repos.
408 # are missing between local repos.
413 m = _(b"required features are not supported in the destination: %s")
409 m = _(b"required features are not supported in the destination: %s")
414 raise error.Abort(
410 raise error.Abort(
415 m % b'lfs', hint=_(b'enable the lfs extension on the server')
411 m % b'lfs', hint=_(b'enable the lfs extension on the server')
416 )
412 )
417
413
418 # Repositories where this extension is disabled won't have the field.
414 # Repositories where this extension is disabled won't have the field.
419 # But if there's a requirement, then the extension must be loaded AND
415 # But if there's a requirement, then the extension must be loaded AND
420 # there may be blobs to push.
416 # there may be blobs to push.
421 remotestore = repo.svfs.lfsremoteblobstore
417 remotestore = repo.svfs.lfsremoteblobstore
422 try:
418 try:
423 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
419 repo.svfs.lfsremoteblobstore = blobstore.remote(repo, remote.url())
424 return orig(repo, remote, *args, **kwargs)
420 return orig(repo, remote, *args, **kwargs)
425 finally:
421 finally:
426 repo.svfs.lfsremoteblobstore = remotestore
422 repo.svfs.lfsremoteblobstore = remotestore
427 else:
423 else:
428 return orig(repo, remote, *args, **kwargs)
424 return orig(repo, remote, *args, **kwargs)
429
425
430
426
431 # when writing a bundle via "hg bundle" command, upload related LFS blobs
427 # when writing a bundle via "hg bundle" command, upload related LFS blobs
432 @eh.wrapfunction(bundle2, 'writenewbundle')
428 @eh.wrapfunction(bundle2, 'writenewbundle')
433 def writenewbundle(
429 def writenewbundle(
434 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
430 orig, ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
435 ):
431 ):
436 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
432 """upload LFS blobs added by outgoing revisions on 'hg bundle'"""
437 uploadblobsfromrevs(repo, outgoing.missing)
433 uploadblobsfromrevs(repo, outgoing.missing)
438 return orig(
434 return orig(
439 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
435 ui, repo, source, filename, bundletype, outgoing, *args, **kwargs
440 )
436 )
441
437
442
438
443 def extractpointers(repo, revs):
439 def extractpointers(repo, revs):
444 """return a list of lfs pointers added by given revs"""
440 """return a list of lfs pointers added by given revs"""
445 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
441 repo.ui.debug(b'lfs: computing set of blobs to upload\n')
446 pointers = {}
442 pointers = {}
447
443
448 makeprogress = repo.ui.makeprogress
444 makeprogress = repo.ui.makeprogress
449 with makeprogress(
445 with makeprogress(
450 _(b'lfs search'), _(b'changesets'), len(revs)
446 _(b'lfs search'), _(b'changesets'), len(revs)
451 ) as progress:
447 ) as progress:
452 for r in revs:
448 for r in revs:
453 ctx = repo[r]
449 ctx = repo[r]
454 for p in pointersfromctx(ctx).values():
450 for p in pointersfromctx(ctx).values():
455 pointers[p.oid()] = p
451 pointers[p.oid()] = p
456 progress.increment()
452 progress.increment()
457 return sorted(pointers.values(), key=lambda p: p.oid())
453 return sorted(pointers.values(), key=lambda p: p.oid())
458
454
459
455
460 def pointerfromctx(ctx, f, removed=False):
456 def pointerfromctx(ctx, f, removed=False):
461 """return a pointer for the named file from the given changectx, or None if
457 """return a pointer for the named file from the given changectx, or None if
462 the file isn't LFS.
458 the file isn't LFS.
463
459
464 Optionally, the pointer for a file deleted from the context can be returned.
460 Optionally, the pointer for a file deleted from the context can be returned.
465 Since no such pointer is actually stored, and to distinguish from a non LFS
461 Since no such pointer is actually stored, and to distinguish from a non LFS
466 file, this pointer is represented by an empty dict.
462 file, this pointer is represented by an empty dict.
467 """
463 """
468 _ctx = ctx
464 _ctx = ctx
469 if f not in ctx:
465 if f not in ctx:
470 if not removed:
466 if not removed:
471 return None
467 return None
472 if f in ctx.p1():
468 if f in ctx.p1():
473 _ctx = ctx.p1()
469 _ctx = ctx.p1()
474 elif f in ctx.p2():
470 elif f in ctx.p2():
475 _ctx = ctx.p2()
471 _ctx = ctx.p2()
476 else:
472 else:
477 return None
473 return None
478 fctx = _ctx[f]
474 fctx = _ctx[f]
479 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
475 if not _islfs(fctx.filelog()._revlog, fctx.filenode()):
480 return None
476 return None
481 try:
477 try:
482 p = pointer.deserialize(fctx.rawdata())
478 p = pointer.deserialize(fctx.rawdata())
483 if ctx == _ctx:
479 if ctx == _ctx:
484 return p
480 return p
485 return {}
481 return {}
486 except pointer.InvalidPointer as ex:
482 except pointer.InvalidPointer as ex:
487 raise error.Abort(
483 raise error.Abort(
488 _(b'lfs: corrupted pointer (%s@%s): %s\n')
484 _(b'lfs: corrupted pointer (%s@%s): %s\n')
489 % (f, short(_ctx.node()), ex)
485 % (f, short(_ctx.node()), ex)
490 )
486 )
491
487
492
488
493 def pointersfromctx(ctx, removed=False):
489 def pointersfromctx(ctx, removed=False):
494 """return a dict {path: pointer} for given single changectx.
490 """return a dict {path: pointer} for given single changectx.
495
491
496 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
492 If ``removed`` == True and the LFS file was removed from ``ctx``, the value
497 stored for the path is an empty dict.
493 stored for the path is an empty dict.
498 """
494 """
499 result = {}
495 result = {}
500 m = ctx.repo().narrowmatch()
496 m = ctx.repo().narrowmatch()
501
497
502 # TODO: consider manifest.fastread() instead
498 # TODO: consider manifest.fastread() instead
503 for f in ctx.files():
499 for f in ctx.files():
504 if not m(f):
500 if not m(f):
505 continue
501 continue
506 p = pointerfromctx(ctx, f, removed=removed)
502 p = pointerfromctx(ctx, f, removed=removed)
507 if p is not None:
503 if p is not None:
508 result[f] = p
504 result[f] = p
509 return result
505 return result
510
506
511
507
512 def uploadblobs(repo, pointers):
508 def uploadblobs(repo, pointers):
513 """upload given pointers from local blobstore"""
509 """upload given pointers from local blobstore"""
514 if not pointers:
510 if not pointers:
515 return
511 return
516
512
517 remoteblob = repo.svfs.lfsremoteblobstore
513 remoteblob = repo.svfs.lfsremoteblobstore
518 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
514 remoteblob.writebatch(pointers, repo.svfs.lfslocalblobstore)
519
515
520
516
521 @eh.wrapfunction(upgrade_engine, 'finishdatamigration')
517 @eh.wrapfunction(upgrade_engine, 'finishdatamigration')
522 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
518 def upgradefinishdatamigration(orig, ui, srcrepo, dstrepo, requirements):
523 orig(ui, srcrepo, dstrepo, requirements)
519 orig(ui, srcrepo, dstrepo, requirements)
524
520
525 # Skip if this hasn't been passed to reposetup()
521 # Skip if this hasn't been passed to reposetup()
526 if hasattr(srcrepo.svfs, 'lfslocalblobstore') and hasattr(
522 if hasattr(srcrepo.svfs, 'lfslocalblobstore') and hasattr(
527 dstrepo.svfs, 'lfslocalblobstore'
523 dstrepo.svfs, 'lfslocalblobstore'
528 ):
524 ):
529 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
525 srclfsvfs = srcrepo.svfs.lfslocalblobstore.vfs
530 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
526 dstlfsvfs = dstrepo.svfs.lfslocalblobstore.vfs
531
527
532 for dirpath, dirs, files in srclfsvfs.walk():
528 for dirpath, dirs, files in srclfsvfs.walk():
533 for oid in files:
529 for oid in files:
534 ui.write(_(b'copying lfs blob %s\n') % oid)
530 ui.write(_(b'copying lfs blob %s\n') % oid)
535 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
531 lfutil.link(srclfsvfs.join(oid), dstlfsvfs.join(oid))
536
532
537
533
538 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
534 @eh.wrapfunction(upgrade_actions, 'preservedrequirements')
539 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
535 @eh.wrapfunction(upgrade_actions, 'supporteddestrequirements')
540 def upgraderequirements(orig, repo):
536 def upgraderequirements(orig, repo):
541 reqs = orig(repo)
537 reqs = orig(repo)
542 if b'lfs' in repo.requirements:
538 if b'lfs' in repo.requirements:
543 reqs.add(b'lfs')
539 reqs.add(b'lfs')
544 return reqs
540 return reqs
@@ -1,4303 +1,4301 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help COMMAND` for more details)::
17 Common tasks (use :hg:`help COMMAND` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behavior can be configured with::
31 files creations or deletions. This behavior can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 It may be desirable for mq changesets to be kept in the secret phase (see
41 It may be desirable for mq changesets to be kept in the secret phase (see
42 :hg:`help phases`), which can be enabled with the following setting::
42 :hg:`help phases`), which can be enabled with the following setting::
43
43
44 [mq]
44 [mq]
45 secret = True
45 secret = True
46
46
47 You will by default be managing a patch queue named "patches". You can
47 You will by default be managing a patch queue named "patches". You can
48 create other, independent patch queues with the :hg:`qqueue` command.
48 create other, independent patch queues with the :hg:`qqueue` command.
49
49
50 If the working directory contains uncommitted files, qpush, qpop and
50 If the working directory contains uncommitted files, qpush, qpop and
51 qgoto abort immediately. If -f/--force is used, the changes are
51 qgoto abort immediately. If -f/--force is used, the changes are
52 discarded. Setting::
52 discarded. Setting::
53
53
54 [mq]
54 [mq]
55 keepchanges = True
55 keepchanges = True
56
56
57 make them behave as if --keep-changes were passed, and non-conflicting
57 make them behave as if --keep-changes were passed, and non-conflicting
58 local changes will be tolerated and preserved. If incompatible options
58 local changes will be tolerated and preserved. If incompatible options
59 such as -f/--force or --exact are passed, this setting is ignored.
59 such as -f/--force or --exact are passed, this setting is ignored.
60
60
61 This extension used to provide a strip command. This command now lives
61 This extension used to provide a strip command. This command now lives
62 in the strip extension.
62 in the strip extension.
63 '''
63 '''
64
64
65
65
66 import os
66 import os
67 import re
67 import re
68 import shutil
68 import shutil
69 import sys
69 import sys
70 from mercurial.i18n import _
70 from mercurial.i18n import _
71 from mercurial.node import (
71 from mercurial.node import (
72 bin,
72 bin,
73 hex,
73 hex,
74 nullrev,
74 nullrev,
75 short,
75 short,
76 )
76 )
77 from mercurial.pycompat import (
77 from mercurial.pycompat import (
78 delattr,
79 getattr,
80 open,
78 open,
81 )
79 )
82 from mercurial import (
80 from mercurial import (
83 cmdutil,
81 cmdutil,
84 commands,
82 commands,
85 encoding,
83 encoding,
86 error,
84 error,
87 extensions,
85 extensions,
88 hg,
86 hg,
89 localrepo,
87 localrepo,
90 lock as lockmod,
88 lock as lockmod,
91 logcmdutil,
89 logcmdutil,
92 patch as patchmod,
90 patch as patchmod,
93 phases,
91 phases,
94 pycompat,
92 pycompat,
95 registrar,
93 registrar,
96 revsetlang,
94 revsetlang,
97 scmutil,
95 scmutil,
98 smartset,
96 smartset,
99 strip,
97 strip,
100 subrepoutil,
98 subrepoutil,
101 util,
99 util,
102 vfs as vfsmod,
100 vfs as vfsmod,
103 )
101 )
104 from mercurial.utils import (
102 from mercurial.utils import (
105 dateutil,
103 dateutil,
106 stringutil,
104 stringutil,
107 urlutil,
105 urlutil,
108 )
106 )
109
107
110 release = lockmod.release
108 release = lockmod.release
111 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
109 seriesopts = [(b's', b'summary', None, _(b'print first line of patch header'))]
112
110
113 cmdtable = {}
111 cmdtable = {}
114 command = registrar.command(cmdtable)
112 command = registrar.command(cmdtable)
115 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
113 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
116 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
114 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 # be specifying the version(s) of Mercurial they are tested with, or
115 # be specifying the version(s) of Mercurial they are tested with, or
118 # leave the attribute unspecified.
116 # leave the attribute unspecified.
119 testedwith = b'ships-with-hg-core'
117 testedwith = b'ships-with-hg-core'
120
118
121 configtable = {}
119 configtable = {}
122 configitem = registrar.configitem(configtable)
120 configitem = registrar.configitem(configtable)
123
121
124 configitem(
122 configitem(
125 b'mq',
123 b'mq',
126 b'git',
124 b'git',
127 default=b'auto',
125 default=b'auto',
128 )
126 )
129 configitem(
127 configitem(
130 b'mq',
128 b'mq',
131 b'keepchanges',
129 b'keepchanges',
132 default=False,
130 default=False,
133 )
131 )
134 configitem(
132 configitem(
135 b'mq',
133 b'mq',
136 b'plain',
134 b'plain',
137 default=False,
135 default=False,
138 )
136 )
139 configitem(
137 configitem(
140 b'mq',
138 b'mq',
141 b'secret',
139 b'secret',
142 default=False,
140 default=False,
143 )
141 )
144
142
145 # force load strip extension formerly included in mq and import some utility
143 # force load strip extension formerly included in mq and import some utility
146 try:
144 try:
147 extensions.find(b'strip')
145 extensions.find(b'strip')
148 except KeyError:
146 except KeyError:
149 # note: load is lazy so we could avoid the try-except,
147 # note: load is lazy so we could avoid the try-except,
150 # but I (marmoute) prefer this explicit code.
148 # but I (marmoute) prefer this explicit code.
151 class dummyui:
149 class dummyui:
152 def debug(self, msg):
150 def debug(self, msg):
153 pass
151 pass
154
152
155 def log(self, event, msgfmt, *msgargs, **opts):
153 def log(self, event, msgfmt, *msgargs, **opts):
156 pass
154 pass
157
155
158 extensions.load(dummyui(), b'strip', b'')
156 extensions.load(dummyui(), b'strip', b'')
159
157
160 strip = strip.strip
158 strip = strip.strip
161
159
162
160
163 def checksubstate(repo, baserev=None):
161 def checksubstate(repo, baserev=None):
164 """return list of subrepos at a different revision than substate.
162 """return list of subrepos at a different revision than substate.
165 Abort if any subrepos have uncommitted changes."""
163 Abort if any subrepos have uncommitted changes."""
166 inclsubs = []
164 inclsubs = []
167 wctx = repo[None]
165 wctx = repo[None]
168 if baserev:
166 if baserev:
169 bctx = repo[baserev]
167 bctx = repo[baserev]
170 else:
168 else:
171 bctx = wctx.p1()
169 bctx = wctx.p1()
172 for s in sorted(wctx.substate):
170 for s in sorted(wctx.substate):
173 wctx.sub(s).bailifchanged(True)
171 wctx.sub(s).bailifchanged(True)
174 if s not in bctx.substate or bctx.sub(s).dirty():
172 if s not in bctx.substate or bctx.sub(s).dirty():
175 inclsubs.append(s)
173 inclsubs.append(s)
176 return inclsubs
174 return inclsubs
177
175
178
176
179 # Patch names looks like unix-file names.
177 # Patch names looks like unix-file names.
180 # They must be joinable with queue directory and result in the patch path.
178 # They must be joinable with queue directory and result in the patch path.
181 normname = util.normpath
179 normname = util.normpath
182
180
183
181
184 class statusentry:
182 class statusentry:
185 def __init__(self, node, name):
183 def __init__(self, node, name):
186 self.node, self.name = node, name
184 self.node, self.name = node, name
187
185
188 def __bytes__(self):
186 def __bytes__(self):
189 return hex(self.node) + b':' + self.name
187 return hex(self.node) + b':' + self.name
190
188
191 __str__ = encoding.strmethod(__bytes__)
189 __str__ = encoding.strmethod(__bytes__)
192 __repr__ = encoding.strmethod(__bytes__)
190 __repr__ = encoding.strmethod(__bytes__)
193
191
194
192
195 # The order of the headers in 'hg export' HG patches:
193 # The order of the headers in 'hg export' HG patches:
196 HGHEADERS = [
194 HGHEADERS = [
197 # '# HG changeset patch',
195 # '# HG changeset patch',
198 b'# User ',
196 b'# User ',
199 b'# Date ',
197 b'# Date ',
200 b'# ',
198 b'# ',
201 b'# Branch ',
199 b'# Branch ',
202 b'# Node ID ',
200 b'# Node ID ',
203 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
201 b'# Parent ', # can occur twice for merges - but that is not relevant for mq
204 ]
202 ]
205 # The order of headers in plain 'mail style' patches:
203 # The order of headers in plain 'mail style' patches:
206 PLAINHEADERS = {
204 PLAINHEADERS = {
207 b'from': 0,
205 b'from': 0,
208 b'date': 1,
206 b'date': 1,
209 b'subject': 2,
207 b'subject': 2,
210 }
208 }
211
209
212
210
213 def inserthgheader(lines, header, value):
211 def inserthgheader(lines, header, value):
214 """Assuming lines contains a HG patch header, add a header line with value.
212 """Assuming lines contains a HG patch header, add a header line with value.
215 >>> try: inserthgheader([], b'# Date ', b'z')
213 >>> try: inserthgheader([], b'# Date ', b'z')
216 ... except ValueError as inst: print("oops")
214 ... except ValueError as inst: print("oops")
217 oops
215 oops
218 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
216 >>> inserthgheader([b'# HG changeset patch'], b'# Date ', b'z')
219 ['# HG changeset patch', '# Date z']
217 ['# HG changeset patch', '# Date z']
220 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
218 >>> inserthgheader([b'# HG changeset patch', b''], b'# Date ', b'z')
221 ['# HG changeset patch', '# Date z', '']
219 ['# HG changeset patch', '# Date z', '']
222 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
220 >>> inserthgheader([b'# HG changeset patch', b'# User y'], b'# Date ', b'z')
223 ['# HG changeset patch', '# User y', '# Date z']
221 ['# HG changeset patch', '# User y', '# Date z']
224 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
222 >>> inserthgheader([b'# HG changeset patch', b'# Date x', b'# User y'],
225 ... b'# User ', b'z')
223 ... b'# User ', b'z')
226 ['# HG changeset patch', '# Date x', '# User z']
224 ['# HG changeset patch', '# Date x', '# User z']
227 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
225 >>> inserthgheader([b'# HG changeset patch', b'# Date y'], b'# Date ', b'z')
228 ['# HG changeset patch', '# Date z']
226 ['# HG changeset patch', '# Date z']
229 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
227 >>> inserthgheader([b'# HG changeset patch', b'', b'# Date y'],
230 ... b'# Date ', b'z')
228 ... b'# Date ', b'z')
231 ['# HG changeset patch', '# Date z', '', '# Date y']
229 ['# HG changeset patch', '# Date z', '', '# Date y']
232 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
230 >>> inserthgheader([b'# HG changeset patch', b'# Parent y'],
233 ... b'# Date ', b'z')
231 ... b'# Date ', b'z')
234 ['# HG changeset patch', '# Date z', '# Parent y']
232 ['# HG changeset patch', '# Date z', '# Parent y']
235 """
233 """
236 start = lines.index(b'# HG changeset patch') + 1
234 start = lines.index(b'# HG changeset patch') + 1
237 newindex = HGHEADERS.index(header)
235 newindex = HGHEADERS.index(header)
238 bestpos = len(lines)
236 bestpos = len(lines)
239 for i in range(start, len(lines)):
237 for i in range(start, len(lines)):
240 line = lines[i]
238 line = lines[i]
241 if not line.startswith(b'# '):
239 if not line.startswith(b'# '):
242 bestpos = min(bestpos, i)
240 bestpos = min(bestpos, i)
243 break
241 break
244 for lineindex, h in enumerate(HGHEADERS):
242 for lineindex, h in enumerate(HGHEADERS):
245 if line.startswith(h):
243 if line.startswith(h):
246 if lineindex == newindex:
244 if lineindex == newindex:
247 lines[i] = header + value
245 lines[i] = header + value
248 return lines
246 return lines
249 if lineindex > newindex:
247 if lineindex > newindex:
250 bestpos = min(bestpos, i)
248 bestpos = min(bestpos, i)
251 break # next line
249 break # next line
252 lines.insert(bestpos, header + value)
250 lines.insert(bestpos, header + value)
253 return lines
251 return lines
254
252
255
253
256 def insertplainheader(lines, header, value):
254 def insertplainheader(lines, header, value):
257 """For lines containing a plain patch header, add a header line with value.
255 """For lines containing a plain patch header, add a header line with value.
258 >>> insertplainheader([], b'Date', b'z')
256 >>> insertplainheader([], b'Date', b'z')
259 ['Date: z']
257 ['Date: z']
260 >>> insertplainheader([b''], b'Date', b'z')
258 >>> insertplainheader([b''], b'Date', b'z')
261 ['Date: z', '']
259 ['Date: z', '']
262 >>> insertplainheader([b'x'], b'Date', b'z')
260 >>> insertplainheader([b'x'], b'Date', b'z')
263 ['Date: z', '', 'x']
261 ['Date: z', '', 'x']
264 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
262 >>> insertplainheader([b'From: y', b'x'], b'Date', b'z')
265 ['From: y', 'Date: z', '', 'x']
263 ['From: y', 'Date: z', '', 'x']
266 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
264 >>> insertplainheader([b' date : x', b' from : y', b''], b'From', b'z')
267 [' date : x', 'From: z', '']
265 [' date : x', 'From: z', '']
268 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
266 >>> insertplainheader([b'', b'Date: y'], b'Date', b'z')
269 ['Date: z', '', 'Date: y']
267 ['Date: z', '', 'Date: y']
270 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
268 >>> insertplainheader([b'foo: bar', b'DATE: z', b'x'], b'From', b'y')
271 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
269 ['From: y', 'foo: bar', 'DATE: z', '', 'x']
272 """
270 """
273 newprio = PLAINHEADERS[header.lower()]
271 newprio = PLAINHEADERS[header.lower()]
274 bestpos = len(lines)
272 bestpos = len(lines)
275 for i, line in enumerate(lines):
273 for i, line in enumerate(lines):
276 if b':' in line:
274 if b':' in line:
277 lheader = line.split(b':', 1)[0].strip().lower()
275 lheader = line.split(b':', 1)[0].strip().lower()
278 lprio = PLAINHEADERS.get(lheader, newprio + 1)
276 lprio = PLAINHEADERS.get(lheader, newprio + 1)
279 if lprio == newprio:
277 if lprio == newprio:
280 lines[i] = b'%s: %s' % (header, value)
278 lines[i] = b'%s: %s' % (header, value)
281 return lines
279 return lines
282 if lprio > newprio and i < bestpos:
280 if lprio > newprio and i < bestpos:
283 bestpos = i
281 bestpos = i
284 else:
282 else:
285 if line:
283 if line:
286 lines.insert(i, b'')
284 lines.insert(i, b'')
287 if i < bestpos:
285 if i < bestpos:
288 bestpos = i
286 bestpos = i
289 break
287 break
290 lines.insert(bestpos, b'%s: %s' % (header, value))
288 lines.insert(bestpos, b'%s: %s' % (header, value))
291 return lines
289 return lines
292
290
293
291
294 class patchheader:
292 class patchheader:
295 def __init__(self, pf, plainmode=False):
293 def __init__(self, pf, plainmode=False):
296 def eatdiff(lines):
294 def eatdiff(lines):
297 while lines:
295 while lines:
298 l = lines[-1]
296 l = lines[-1]
299 if (
297 if (
300 l.startswith(b"diff -")
298 l.startswith(b"diff -")
301 or l.startswith(b"Index:")
299 or l.startswith(b"Index:")
302 or l.startswith(b"===========")
300 or l.startswith(b"===========")
303 ):
301 ):
304 del lines[-1]
302 del lines[-1]
305 else:
303 else:
306 break
304 break
307
305
308 def eatempty(lines):
306 def eatempty(lines):
309 while lines:
307 while lines:
310 if not lines[-1].strip():
308 if not lines[-1].strip():
311 del lines[-1]
309 del lines[-1]
312 else:
310 else:
313 break
311 break
314
312
315 message = []
313 message = []
316 comments = []
314 comments = []
317 user = None
315 user = None
318 date = None
316 date = None
319 parent = None
317 parent = None
320 format = None
318 format = None
321 subject = None
319 subject = None
322 branch = None
320 branch = None
323 nodeid = None
321 nodeid = None
324 diffstart = 0
322 diffstart = 0
325
323
326 for line in open(pf, b'rb'):
324 for line in open(pf, b'rb'):
327 line = line.rstrip()
325 line = line.rstrip()
328 if line.startswith(b'diff --git') or (
326 if line.startswith(b'diff --git') or (
329 diffstart and line.startswith(b'+++ ')
327 diffstart and line.startswith(b'+++ ')
330 ):
328 ):
331 diffstart = 2
329 diffstart = 2
332 break
330 break
333 diffstart = 0 # reset
331 diffstart = 0 # reset
334 if line.startswith(b"--- "):
332 if line.startswith(b"--- "):
335 diffstart = 1
333 diffstart = 1
336 continue
334 continue
337 elif format == b"hgpatch":
335 elif format == b"hgpatch":
338 # parse values when importing the result of an hg export
336 # parse values when importing the result of an hg export
339 if line.startswith(b"# User "):
337 if line.startswith(b"# User "):
340 user = line[7:]
338 user = line[7:]
341 elif line.startswith(b"# Date "):
339 elif line.startswith(b"# Date "):
342 date = line[7:]
340 date = line[7:]
343 elif line.startswith(b"# Parent "):
341 elif line.startswith(b"# Parent "):
344 parent = line[9:].lstrip() # handle double trailing space
342 parent = line[9:].lstrip() # handle double trailing space
345 elif line.startswith(b"# Branch "):
343 elif line.startswith(b"# Branch "):
346 branch = line[9:]
344 branch = line[9:]
347 elif line.startswith(b"# Node ID "):
345 elif line.startswith(b"# Node ID "):
348 nodeid = line[10:]
346 nodeid = line[10:]
349 elif not line.startswith(b"# ") and line:
347 elif not line.startswith(b"# ") and line:
350 message.append(line)
348 message.append(line)
351 format = None
349 format = None
352 elif line == b'# HG changeset patch':
350 elif line == b'# HG changeset patch':
353 message = []
351 message = []
354 format = b"hgpatch"
352 format = b"hgpatch"
355 elif format != b"tagdone" and (
353 elif format != b"tagdone" and (
356 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
354 line.startswith(b"Subject: ") or line.startswith(b"subject: ")
357 ):
355 ):
358 subject = line[9:]
356 subject = line[9:]
359 format = b"tag"
357 format = b"tag"
360 elif format != b"tagdone" and (
358 elif format != b"tagdone" and (
361 line.startswith(b"From: ") or line.startswith(b"from: ")
359 line.startswith(b"From: ") or line.startswith(b"from: ")
362 ):
360 ):
363 user = line[6:]
361 user = line[6:]
364 format = b"tag"
362 format = b"tag"
365 elif format != b"tagdone" and (
363 elif format != b"tagdone" and (
366 line.startswith(b"Date: ") or line.startswith(b"date: ")
364 line.startswith(b"Date: ") or line.startswith(b"date: ")
367 ):
365 ):
368 date = line[6:]
366 date = line[6:]
369 format = b"tag"
367 format = b"tag"
370 elif format == b"tag" and line == b"":
368 elif format == b"tag" and line == b"":
371 # when looking for tags (subject: from: etc) they
369 # when looking for tags (subject: from: etc) they
372 # end once you find a blank line in the source
370 # end once you find a blank line in the source
373 format = b"tagdone"
371 format = b"tagdone"
374 elif message or line:
372 elif message or line:
375 message.append(line)
373 message.append(line)
376 comments.append(line)
374 comments.append(line)
377
375
378 eatdiff(message)
376 eatdiff(message)
379 eatdiff(comments)
377 eatdiff(comments)
380 # Remember the exact starting line of the patch diffs before consuming
378 # Remember the exact starting line of the patch diffs before consuming
381 # empty lines, for external use by TortoiseHg and others
379 # empty lines, for external use by TortoiseHg and others
382 self.diffstartline = len(comments)
380 self.diffstartline = len(comments)
383 eatempty(message)
381 eatempty(message)
384 eatempty(comments)
382 eatempty(comments)
385
383
386 # make sure message isn't empty
384 # make sure message isn't empty
387 if format and format.startswith(b"tag") and subject:
385 if format and format.startswith(b"tag") and subject:
388 message.insert(0, subject)
386 message.insert(0, subject)
389
387
390 self.message = message
388 self.message = message
391 self.comments = comments
389 self.comments = comments
392 self.user = user
390 self.user = user
393 self.date = date
391 self.date = date
394 self.parent = parent
392 self.parent = parent
395 # nodeid and branch are for external use by TortoiseHg and others
393 # nodeid and branch are for external use by TortoiseHg and others
396 self.nodeid = nodeid
394 self.nodeid = nodeid
397 self.branch = branch
395 self.branch = branch
398 self.haspatch = diffstart > 1
396 self.haspatch = diffstart > 1
399 self.plainmode = (
397 self.plainmode = (
400 plainmode
398 plainmode
401 or b'# HG changeset patch' not in self.comments
399 or b'# HG changeset patch' not in self.comments
402 and any(
400 and any(
403 c.startswith(b'Date: ') or c.startswith(b'From: ')
401 c.startswith(b'Date: ') or c.startswith(b'From: ')
404 for c in self.comments
402 for c in self.comments
405 )
403 )
406 )
404 )
407
405
408 def setuser(self, user):
406 def setuser(self, user):
409 try:
407 try:
410 inserthgheader(self.comments, b'# User ', user)
408 inserthgheader(self.comments, b'# User ', user)
411 except ValueError:
409 except ValueError:
412 if self.plainmode:
410 if self.plainmode:
413 insertplainheader(self.comments, b'From', user)
411 insertplainheader(self.comments, b'From', user)
414 else:
412 else:
415 tmp = [b'# HG changeset patch', b'# User ' + user]
413 tmp = [b'# HG changeset patch', b'# User ' + user]
416 self.comments = tmp + self.comments
414 self.comments = tmp + self.comments
417 self.user = user
415 self.user = user
418
416
419 def setdate(self, date):
417 def setdate(self, date):
420 try:
418 try:
421 inserthgheader(self.comments, b'# Date ', date)
419 inserthgheader(self.comments, b'# Date ', date)
422 except ValueError:
420 except ValueError:
423 if self.plainmode:
421 if self.plainmode:
424 insertplainheader(self.comments, b'Date', date)
422 insertplainheader(self.comments, b'Date', date)
425 else:
423 else:
426 tmp = [b'# HG changeset patch', b'# Date ' + date]
424 tmp = [b'# HG changeset patch', b'# Date ' + date]
427 self.comments = tmp + self.comments
425 self.comments = tmp + self.comments
428 self.date = date
426 self.date = date
429
427
430 def setparent(self, parent):
428 def setparent(self, parent):
431 try:
429 try:
432 inserthgheader(self.comments, b'# Parent ', parent)
430 inserthgheader(self.comments, b'# Parent ', parent)
433 except ValueError:
431 except ValueError:
434 if not self.plainmode:
432 if not self.plainmode:
435 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
433 tmp = [b'# HG changeset patch', b'# Parent ' + parent]
436 self.comments = tmp + self.comments
434 self.comments = tmp + self.comments
437 self.parent = parent
435 self.parent = parent
438
436
439 def setmessage(self, message):
437 def setmessage(self, message):
440 if self.comments:
438 if self.comments:
441 self._delmsg()
439 self._delmsg()
442 self.message = [message]
440 self.message = [message]
443 if message:
441 if message:
444 if self.plainmode and self.comments and self.comments[-1]:
442 if self.plainmode and self.comments and self.comments[-1]:
445 self.comments.append(b'')
443 self.comments.append(b'')
446 self.comments.append(message)
444 self.comments.append(message)
447
445
448 def __bytes__(self):
446 def __bytes__(self):
449 s = b'\n'.join(self.comments).rstrip()
447 s = b'\n'.join(self.comments).rstrip()
450 if not s:
448 if not s:
451 return b''
449 return b''
452 return s + b'\n\n'
450 return s + b'\n\n'
453
451
454 __str__ = encoding.strmethod(__bytes__)
452 __str__ = encoding.strmethod(__bytes__)
455
453
456 def _delmsg(self):
454 def _delmsg(self):
457 """Remove existing message, keeping the rest of the comments fields.
455 """Remove existing message, keeping the rest of the comments fields.
458 If comments contains 'subject: ', message will prepend
456 If comments contains 'subject: ', message will prepend
459 the field and a blank line."""
457 the field and a blank line."""
460 if self.message:
458 if self.message:
461 subj = b'subject: ' + self.message[0].lower()
459 subj = b'subject: ' + self.message[0].lower()
462 for i in range(len(self.comments)):
460 for i in range(len(self.comments)):
463 if subj == self.comments[i].lower():
461 if subj == self.comments[i].lower():
464 del self.comments[i]
462 del self.comments[i]
465 self.message = self.message[2:]
463 self.message = self.message[2:]
466 break
464 break
467 ci = 0
465 ci = 0
468 for mi in self.message:
466 for mi in self.message:
469 while mi != self.comments[ci]:
467 while mi != self.comments[ci]:
470 ci += 1
468 ci += 1
471 del self.comments[ci]
469 del self.comments[ci]
472
470
473
471
474 def newcommit(repo, phase, *args, **kwargs):
472 def newcommit(repo, phase, *args, **kwargs):
475 """helper dedicated to ensure a commit respect mq.secret setting
473 """helper dedicated to ensure a commit respect mq.secret setting
476
474
477 It should be used instead of repo.commit inside the mq source for operation
475 It should be used instead of repo.commit inside the mq source for operation
478 creating new changeset.
476 creating new changeset.
479 """
477 """
480 repo = repo.unfiltered()
478 repo = repo.unfiltered()
481 if phase is None:
479 if phase is None:
482 if repo.ui.configbool(b'mq', b'secret'):
480 if repo.ui.configbool(b'mq', b'secret'):
483 phase = phases.secret
481 phase = phases.secret
484 overrides = {(b'ui', b'allowemptycommit'): True}
482 overrides = {(b'ui', b'allowemptycommit'): True}
485 if phase is not None:
483 if phase is not None:
486 overrides[(b'phases', b'new-commit')] = phase
484 overrides[(b'phases', b'new-commit')] = phase
487 with repo.ui.configoverride(overrides, b'mq'):
485 with repo.ui.configoverride(overrides, b'mq'):
488 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
486 repo.ui.setconfig(b'ui', b'allowemptycommit', True)
489 return repo.commit(*args, **kwargs)
487 return repo.commit(*args, **kwargs)
490
488
491
489
492 class AbortNoCleanup(error.Abort):
490 class AbortNoCleanup(error.Abort):
493 pass
491 pass
494
492
495
493
496 class queue:
494 class queue:
497 def __init__(self, ui, baseui, path, patchdir=None):
495 def __init__(self, ui, baseui, path, patchdir=None):
498 self.basepath = path
496 self.basepath = path
499 try:
497 try:
500 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
498 with open(os.path.join(path, b'patches.queue'), 'rb') as fh:
501 cur = fh.read().rstrip()
499 cur = fh.read().rstrip()
502
500
503 if not cur:
501 if not cur:
504 curpath = os.path.join(path, b'patches')
502 curpath = os.path.join(path, b'patches')
505 else:
503 else:
506 curpath = os.path.join(path, b'patches-' + cur)
504 curpath = os.path.join(path, b'patches-' + cur)
507 except IOError:
505 except IOError:
508 curpath = os.path.join(path, b'patches')
506 curpath = os.path.join(path, b'patches')
509 self.path = patchdir or curpath
507 self.path = patchdir or curpath
510 self.opener = vfsmod.vfs(self.path)
508 self.opener = vfsmod.vfs(self.path)
511 self.ui = ui
509 self.ui = ui
512 self.baseui = baseui
510 self.baseui = baseui
513 self.applieddirty = False
511 self.applieddirty = False
514 self.seriesdirty = False
512 self.seriesdirty = False
515 self.added = []
513 self.added = []
516 self.seriespath = b"series"
514 self.seriespath = b"series"
517 self.statuspath = b"status"
515 self.statuspath = b"status"
518 self.guardspath = b"guards"
516 self.guardspath = b"guards"
519 self.activeguards = None
517 self.activeguards = None
520 self.guardsdirty = False
518 self.guardsdirty = False
521 # Handle mq.git as a bool with extended values
519 # Handle mq.git as a bool with extended values
522 gitmode = ui.config(b'mq', b'git').lower()
520 gitmode = ui.config(b'mq', b'git').lower()
523 boolmode = stringutil.parsebool(gitmode)
521 boolmode = stringutil.parsebool(gitmode)
524 if boolmode is not None:
522 if boolmode is not None:
525 if boolmode:
523 if boolmode:
526 gitmode = b'yes'
524 gitmode = b'yes'
527 else:
525 else:
528 gitmode = b'no'
526 gitmode = b'no'
529 self.gitmode = gitmode
527 self.gitmode = gitmode
530 # deprecated config: mq.plain
528 # deprecated config: mq.plain
531 self.plainmode = ui.configbool(b'mq', b'plain')
529 self.plainmode = ui.configbool(b'mq', b'plain')
532 self.checkapplied = True
530 self.checkapplied = True
533
531
534 @util.propertycache
532 @util.propertycache
535 def applied(self):
533 def applied(self):
536 def parselines(lines):
534 def parselines(lines):
537 for l in lines:
535 for l in lines:
538 entry = l.split(b':', 1)
536 entry = l.split(b':', 1)
539 if len(entry) > 1:
537 if len(entry) > 1:
540 n, name = entry
538 n, name = entry
541 yield statusentry(bin(n), name)
539 yield statusentry(bin(n), name)
542 elif l.strip():
540 elif l.strip():
543 self.ui.warn(
541 self.ui.warn(
544 _(b'malformated mq status line: %s\n')
542 _(b'malformated mq status line: %s\n')
545 % stringutil.pprint(entry)
543 % stringutil.pprint(entry)
546 )
544 )
547 # else we ignore empty lines
545 # else we ignore empty lines
548
546
549 try:
547 try:
550 lines = self.opener.read(self.statuspath).splitlines()
548 lines = self.opener.read(self.statuspath).splitlines()
551 return list(parselines(lines))
549 return list(parselines(lines))
552 except FileNotFoundError:
550 except FileNotFoundError:
553 return []
551 return []
554
552
555 @util.propertycache
553 @util.propertycache
556 def fullseries(self):
554 def fullseries(self):
557 try:
555 try:
558 return self.opener.read(self.seriespath).splitlines()
556 return self.opener.read(self.seriespath).splitlines()
559 except FileNotFoundError:
557 except FileNotFoundError:
560 return []
558 return []
561
559
562 @util.propertycache
560 @util.propertycache
563 def series(self):
561 def series(self):
564 self.parseseries()
562 self.parseseries()
565 return self.series
563 return self.series
566
564
567 @util.propertycache
565 @util.propertycache
568 def seriesguards(self):
566 def seriesguards(self):
569 self.parseseries()
567 self.parseseries()
570 return self.seriesguards
568 return self.seriesguards
571
569
572 def invalidate(self):
570 def invalidate(self):
573 for a in 'applied fullseries series seriesguards'.split():
571 for a in 'applied fullseries series seriesguards'.split():
574 if a in self.__dict__:
572 if a in self.__dict__:
575 delattr(self, a)
573 delattr(self, a)
576 self.applieddirty = False
574 self.applieddirty = False
577 self.seriesdirty = False
575 self.seriesdirty = False
578 self.guardsdirty = False
576 self.guardsdirty = False
579 self.activeguards = None
577 self.activeguards = None
580
578
581 def diffopts(self, opts=None, patchfn=None, plain=False):
579 def diffopts(self, opts=None, patchfn=None, plain=False):
582 """Return diff options tweaked for this mq use, possibly upgrading to
580 """Return diff options tweaked for this mq use, possibly upgrading to
583 git format, and possibly plain and without lossy options."""
581 git format, and possibly plain and without lossy options."""
584 diffopts = patchmod.difffeatureopts(
582 diffopts = patchmod.difffeatureopts(
585 self.ui,
583 self.ui,
586 opts,
584 opts,
587 git=True,
585 git=True,
588 whitespace=not plain,
586 whitespace=not plain,
589 formatchanging=not plain,
587 formatchanging=not plain,
590 )
588 )
591 if self.gitmode == b'auto':
589 if self.gitmode == b'auto':
592 diffopts.upgrade = True
590 diffopts.upgrade = True
593 elif self.gitmode == b'keep':
591 elif self.gitmode == b'keep':
594 pass
592 pass
595 elif self.gitmode in (b'yes', b'no'):
593 elif self.gitmode in (b'yes', b'no'):
596 diffopts.git = self.gitmode == b'yes'
594 diffopts.git = self.gitmode == b'yes'
597 else:
595 else:
598 raise error.Abort(
596 raise error.Abort(
599 _(b'mq.git option can be auto/keep/yes/no got %s')
597 _(b'mq.git option can be auto/keep/yes/no got %s')
600 % self.gitmode
598 % self.gitmode
601 )
599 )
602 if patchfn:
600 if patchfn:
603 diffopts = self.patchopts(diffopts, patchfn)
601 diffopts = self.patchopts(diffopts, patchfn)
604 return diffopts
602 return diffopts
605
603
606 def patchopts(self, diffopts, *patches):
604 def patchopts(self, diffopts, *patches):
607 """Return a copy of input diff options with git set to true if
605 """Return a copy of input diff options with git set to true if
608 referenced patch is a git patch and should be preserved as such.
606 referenced patch is a git patch and should be preserved as such.
609 """
607 """
610 diffopts = diffopts.copy()
608 diffopts = diffopts.copy()
611 if not diffopts.git and self.gitmode == b'keep':
609 if not diffopts.git and self.gitmode == b'keep':
612 for patchfn in patches:
610 for patchfn in patches:
613 patchf = self.opener(patchfn, b'r')
611 patchf = self.opener(patchfn, b'r')
614 # if the patch was a git patch, refresh it as a git patch
612 # if the patch was a git patch, refresh it as a git patch
615 diffopts.git = any(
613 diffopts.git = any(
616 line.startswith(b'diff --git') for line in patchf
614 line.startswith(b'diff --git') for line in patchf
617 )
615 )
618 patchf.close()
616 patchf.close()
619 return diffopts
617 return diffopts
620
618
621 def join(self, *p):
619 def join(self, *p):
622 return os.path.join(self.path, *p)
620 return os.path.join(self.path, *p)
623
621
624 def findseries(self, patch):
622 def findseries(self, patch):
625 def matchpatch(l):
623 def matchpatch(l):
626 l = l.split(b'#', 1)[0]
624 l = l.split(b'#', 1)[0]
627 return l.strip() == patch
625 return l.strip() == patch
628
626
629 for index, l in enumerate(self.fullseries):
627 for index, l in enumerate(self.fullseries):
630 if matchpatch(l):
628 if matchpatch(l):
631 return index
629 return index
632 return None
630 return None
633
631
634 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
632 guard_re = re.compile(br'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
635
633
636 def parseseries(self):
634 def parseseries(self):
637 self.series = []
635 self.series = []
638 self.seriesguards = []
636 self.seriesguards = []
639 for l in self.fullseries:
637 for l in self.fullseries:
640 h = l.find(b'#')
638 h = l.find(b'#')
641 if h == -1:
639 if h == -1:
642 patch = l
640 patch = l
643 comment = b''
641 comment = b''
644 elif h == 0:
642 elif h == 0:
645 continue
643 continue
646 else:
644 else:
647 patch = l[:h]
645 patch = l[:h]
648 comment = l[h:]
646 comment = l[h:]
649 patch = patch.strip()
647 patch = patch.strip()
650 if patch:
648 if patch:
651 if patch in self.series:
649 if patch in self.series:
652 raise error.Abort(
650 raise error.Abort(
653 _(b'%s appears more than once in %s')
651 _(b'%s appears more than once in %s')
654 % (patch, self.join(self.seriespath))
652 % (patch, self.join(self.seriespath))
655 )
653 )
656 self.series.append(patch)
654 self.series.append(patch)
657 self.seriesguards.append(self.guard_re.findall(comment))
655 self.seriesguards.append(self.guard_re.findall(comment))
658
656
659 def checkguard(self, guard):
657 def checkguard(self, guard):
660 if not guard:
658 if not guard:
661 return _(b'guard cannot be an empty string')
659 return _(b'guard cannot be an empty string')
662 bad_chars = b'# \t\r\n\f'
660 bad_chars = b'# \t\r\n\f'
663 first = guard[0]
661 first = guard[0]
664 if first in b'-+':
662 if first in b'-+':
665 return _(b'guard %r starts with invalid character: %r') % (
663 return _(b'guard %r starts with invalid character: %r') % (
666 guard,
664 guard,
667 first,
665 first,
668 )
666 )
669 for c in bad_chars:
667 for c in bad_chars:
670 if c in guard:
668 if c in guard:
671 return _(b'invalid character in guard %r: %r') % (guard, c)
669 return _(b'invalid character in guard %r: %r') % (guard, c)
672
670
673 def setactive(self, guards):
671 def setactive(self, guards):
674 for guard in guards:
672 for guard in guards:
675 bad = self.checkguard(guard)
673 bad = self.checkguard(guard)
676 if bad:
674 if bad:
677 raise error.Abort(bad)
675 raise error.Abort(bad)
678 guards = sorted(set(guards))
676 guards = sorted(set(guards))
679 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
677 self.ui.debug(b'active guards: %s\n' % b' '.join(guards))
680 self.activeguards = guards
678 self.activeguards = guards
681 self.guardsdirty = True
679 self.guardsdirty = True
682
680
683 def active(self):
681 def active(self):
684 if self.activeguards is None:
682 if self.activeguards is None:
685 self.activeguards = []
683 self.activeguards = []
686 try:
684 try:
687 guards = self.opener.read(self.guardspath).split()
685 guards = self.opener.read(self.guardspath).split()
688 except FileNotFoundError:
686 except FileNotFoundError:
689 guards = []
687 guards = []
690 for i, guard in enumerate(guards):
688 for i, guard in enumerate(guards):
691 bad = self.checkguard(guard)
689 bad = self.checkguard(guard)
692 if bad:
690 if bad:
693 self.ui.warn(
691 self.ui.warn(
694 b'%s:%d: %s\n'
692 b'%s:%d: %s\n'
695 % (self.join(self.guardspath), i + 1, bad)
693 % (self.join(self.guardspath), i + 1, bad)
696 )
694 )
697 else:
695 else:
698 self.activeguards.append(guard)
696 self.activeguards.append(guard)
699 return self.activeguards
697 return self.activeguards
700
698
701 def setguards(self, idx, guards):
699 def setguards(self, idx, guards):
702 for g in guards:
700 for g in guards:
703 if len(g) < 2:
701 if len(g) < 2:
704 raise error.Abort(_(b'guard %r too short') % g)
702 raise error.Abort(_(b'guard %r too short') % g)
705 if g[0] not in b'-+':
703 if g[0] not in b'-+':
706 raise error.Abort(_(b'guard %r starts with invalid char') % g)
704 raise error.Abort(_(b'guard %r starts with invalid char') % g)
707 bad = self.checkguard(g[1:])
705 bad = self.checkguard(g[1:])
708 if bad:
706 if bad:
709 raise error.Abort(bad)
707 raise error.Abort(bad)
710 drop = self.guard_re.sub(b'', self.fullseries[idx])
708 drop = self.guard_re.sub(b'', self.fullseries[idx])
711 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
709 self.fullseries[idx] = drop + b''.join([b' #' + g for g in guards])
712 self.parseseries()
710 self.parseseries()
713 self.seriesdirty = True
711 self.seriesdirty = True
714
712
715 def pushable(self, idx):
713 def pushable(self, idx):
716 if isinstance(idx, bytes):
714 if isinstance(idx, bytes):
717 idx = self.series.index(idx)
715 idx = self.series.index(idx)
718 patchguards = self.seriesguards[idx]
716 patchguards = self.seriesguards[idx]
719 if not patchguards:
717 if not patchguards:
720 return True, None
718 return True, None
721 guards = self.active()
719 guards = self.active()
722 exactneg = [
720 exactneg = [
723 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
721 g for g in patchguards if g.startswith(b'-') and g[1:] in guards
724 ]
722 ]
725 if exactneg:
723 if exactneg:
726 return False, stringutil.pprint(exactneg[0])
724 return False, stringutil.pprint(exactneg[0])
727 pos = [g for g in patchguards if g.startswith(b'+')]
725 pos = [g for g in patchguards if g.startswith(b'+')]
728 exactpos = [g for g in pos if g[1:] in guards]
726 exactpos = [g for g in pos if g[1:] in guards]
729 if pos:
727 if pos:
730 if exactpos:
728 if exactpos:
731 return True, stringutil.pprint(exactpos[0])
729 return True, stringutil.pprint(exactpos[0])
732 return False, b' '.join([stringutil.pprint(p) for p in pos])
730 return False, b' '.join([stringutil.pprint(p) for p in pos])
733 return True, b''
731 return True, b''
734
732
735 def explainpushable(self, idx, all_patches=False):
733 def explainpushable(self, idx, all_patches=False):
736 if all_patches:
734 if all_patches:
737 write = self.ui.write
735 write = self.ui.write
738 else:
736 else:
739 write = self.ui.warn
737 write = self.ui.warn
740
738
741 if all_patches or self.ui.verbose:
739 if all_patches or self.ui.verbose:
742 if isinstance(idx, bytes):
740 if isinstance(idx, bytes):
743 idx = self.series.index(idx)
741 idx = self.series.index(idx)
744 pushable, why = self.pushable(idx)
742 pushable, why = self.pushable(idx)
745 if all_patches and pushable:
743 if all_patches and pushable:
746 if why is None:
744 if why is None:
747 write(
745 write(
748 _(b'allowing %s - no guards in effect\n')
746 _(b'allowing %s - no guards in effect\n')
749 % self.series[idx]
747 % self.series[idx]
750 )
748 )
751 else:
749 else:
752 if not why:
750 if not why:
753 write(
751 write(
754 _(b'allowing %s - no matching negative guards\n')
752 _(b'allowing %s - no matching negative guards\n')
755 % self.series[idx]
753 % self.series[idx]
756 )
754 )
757 else:
755 else:
758 write(
756 write(
759 _(b'allowing %s - guarded by %s\n')
757 _(b'allowing %s - guarded by %s\n')
760 % (self.series[idx], why)
758 % (self.series[idx], why)
761 )
759 )
762 if not pushable:
760 if not pushable:
763 if why:
761 if why:
764 write(
762 write(
765 _(b'skipping %s - guarded by %s\n')
763 _(b'skipping %s - guarded by %s\n')
766 % (self.series[idx], why)
764 % (self.series[idx], why)
767 )
765 )
768 else:
766 else:
769 write(
767 write(
770 _(b'skipping %s - no matching guards\n')
768 _(b'skipping %s - no matching guards\n')
771 % self.series[idx]
769 % self.series[idx]
772 )
770 )
773
771
774 def savedirty(self):
772 def savedirty(self):
775 def writelist(items, path):
773 def writelist(items, path):
776 fp = self.opener(path, b'wb')
774 fp = self.opener(path, b'wb')
777 for i in items:
775 for i in items:
778 fp.write(b"%s\n" % i)
776 fp.write(b"%s\n" % i)
779 fp.close()
777 fp.close()
780
778
781 if self.applieddirty:
779 if self.applieddirty:
782 writelist(map(bytes, self.applied), self.statuspath)
780 writelist(map(bytes, self.applied), self.statuspath)
783 self.applieddirty = False
781 self.applieddirty = False
784 if self.seriesdirty:
782 if self.seriesdirty:
785 writelist(self.fullseries, self.seriespath)
783 writelist(self.fullseries, self.seriespath)
786 self.seriesdirty = False
784 self.seriesdirty = False
787 if self.guardsdirty:
785 if self.guardsdirty:
788 writelist(self.activeguards, self.guardspath)
786 writelist(self.activeguards, self.guardspath)
789 self.guardsdirty = False
787 self.guardsdirty = False
790 if self.added:
788 if self.added:
791 qrepo = self.qrepo()
789 qrepo = self.qrepo()
792 if qrepo:
790 if qrepo:
793 with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo):
791 with qrepo.wlock(), qrepo.dirstate.changing_files(qrepo):
794 qrepo[None].add(
792 qrepo[None].add(
795 f for f in self.added if f not in qrepo[None]
793 f for f in self.added if f not in qrepo[None]
796 )
794 )
797 self.added = []
795 self.added = []
798
796
799 def removeundo(self, repo):
797 def removeundo(self, repo):
800 undo = repo.sjoin(b'undo')
798 undo = repo.sjoin(b'undo')
801 if not os.path.exists(undo):
799 if not os.path.exists(undo):
802 return
800 return
803 try:
801 try:
804 os.unlink(undo)
802 os.unlink(undo)
805 except OSError as inst:
803 except OSError as inst:
806 self.ui.warn(
804 self.ui.warn(
807 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
805 _(b'error removing undo: %s\n') % stringutil.forcebytestr(inst)
808 )
806 )
809
807
810 def backup(self, repo, files, copy=False):
808 def backup(self, repo, files, copy=False):
811 # backup local changes in --force case
809 # backup local changes in --force case
812 for f in sorted(files):
810 for f in sorted(files):
813 absf = repo.wjoin(f)
811 absf = repo.wjoin(f)
814 if os.path.lexists(absf):
812 if os.path.lexists(absf):
815 absorig = scmutil.backuppath(self.ui, repo, f)
813 absorig = scmutil.backuppath(self.ui, repo, f)
816 self.ui.note(
814 self.ui.note(
817 _(b'saving current version of %s as %s\n')
815 _(b'saving current version of %s as %s\n')
818 % (f, os.path.relpath(absorig))
816 % (f, os.path.relpath(absorig))
819 )
817 )
820
818
821 if copy:
819 if copy:
822 util.copyfile(absf, absorig)
820 util.copyfile(absf, absorig)
823 else:
821 else:
824 util.rename(absf, absorig)
822 util.rename(absf, absorig)
825
823
826 def printdiff(
824 def printdiff(
827 self,
825 self,
828 repo,
826 repo,
829 diffopts,
827 diffopts,
830 node1,
828 node1,
831 node2=None,
829 node2=None,
832 files=None,
830 files=None,
833 fp=None,
831 fp=None,
834 changes=None,
832 changes=None,
835 opts=None,
833 opts=None,
836 ):
834 ):
837 if opts is None:
835 if opts is None:
838 opts = {}
836 opts = {}
839 stat = opts.get(b'stat')
837 stat = opts.get(b'stat')
840 m = scmutil.match(repo[node1], files, opts)
838 m = scmutil.match(repo[node1], files, opts)
841 logcmdutil.diffordiffstat(
839 logcmdutil.diffordiffstat(
842 self.ui,
840 self.ui,
843 repo,
841 repo,
844 diffopts,
842 diffopts,
845 repo[node1],
843 repo[node1],
846 repo[node2],
844 repo[node2],
847 m,
845 m,
848 changes,
846 changes,
849 stat,
847 stat,
850 fp,
848 fp,
851 )
849 )
852
850
853 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
851 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
854 # first try just applying the patch
852 # first try just applying the patch
855 (err, n) = self.apply(
853 (err, n) = self.apply(
856 repo, [patch], update_status=False, strict=True, merge=rev
854 repo, [patch], update_status=False, strict=True, merge=rev
857 )
855 )
858
856
859 if err == 0:
857 if err == 0:
860 return (err, n)
858 return (err, n)
861
859
862 if n is None:
860 if n is None:
863 raise error.Abort(_(b"apply failed for patch %s") % patch)
861 raise error.Abort(_(b"apply failed for patch %s") % patch)
864
862
865 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
863 self.ui.warn(_(b"patch didn't work out, merging %s\n") % patch)
866
864
867 # apply failed, strip away that rev and merge.
865 # apply failed, strip away that rev and merge.
868 hg.clean(repo, head)
866 hg.clean(repo, head)
869 strip(self.ui, repo, [n], update=False, backup=False)
867 strip(self.ui, repo, [n], update=False, backup=False)
870
868
871 ctx = repo[rev]
869 ctx = repo[rev]
872 ret = hg.merge(ctx, remind=False)
870 ret = hg.merge(ctx, remind=False)
873 if ret:
871 if ret:
874 raise error.Abort(_(b"update returned %d") % ret)
872 raise error.Abort(_(b"update returned %d") % ret)
875 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
873 n = newcommit(repo, None, ctx.description(), ctx.user(), force=True)
876 if n is None:
874 if n is None:
877 raise error.Abort(_(b"repo commit failed"))
875 raise error.Abort(_(b"repo commit failed"))
878 try:
876 try:
879 ph = patchheader(mergeq.join(patch), self.plainmode)
877 ph = patchheader(mergeq.join(patch), self.plainmode)
880 except Exception:
878 except Exception:
881 raise error.Abort(_(b"unable to read %s") % patch)
879 raise error.Abort(_(b"unable to read %s") % patch)
882
880
883 diffopts = self.patchopts(diffopts, patch)
881 diffopts = self.patchopts(diffopts, patch)
884 patchf = self.opener(patch, b"w")
882 patchf = self.opener(patch, b"w")
885 comments = bytes(ph)
883 comments = bytes(ph)
886 if comments:
884 if comments:
887 patchf.write(comments)
885 patchf.write(comments)
888 self.printdiff(repo, diffopts, head, n, fp=patchf)
886 self.printdiff(repo, diffopts, head, n, fp=patchf)
889 patchf.close()
887 patchf.close()
890 self.removeundo(repo)
888 self.removeundo(repo)
891 return (0, n)
889 return (0, n)
892
890
893 def qparents(self, repo, rev=None):
891 def qparents(self, repo, rev=None):
894 """return the mq handled parent or p1
892 """return the mq handled parent or p1
895
893
896 In some case where mq get himself in being the parent of a merge the
894 In some case where mq get himself in being the parent of a merge the
897 appropriate parent may be p2.
895 appropriate parent may be p2.
898 (eg: an in progress merge started with mq disabled)
896 (eg: an in progress merge started with mq disabled)
899
897
900 If no parent are managed by mq, p1 is returned.
898 If no parent are managed by mq, p1 is returned.
901 """
899 """
902 if rev is None:
900 if rev is None:
903 (p1, p2) = repo.dirstate.parents()
901 (p1, p2) = repo.dirstate.parents()
904 if p2 == repo.nullid:
902 if p2 == repo.nullid:
905 return p1
903 return p1
906 if not self.applied:
904 if not self.applied:
907 return None
905 return None
908 return self.applied[-1].node
906 return self.applied[-1].node
909 p1, p2 = repo.changelog.parents(rev)
907 p1, p2 = repo.changelog.parents(rev)
910 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
908 if p2 != repo.nullid and p2 in [x.node for x in self.applied]:
911 return p2
909 return p2
912 return p1
910 return p1
913
911
914 def mergepatch(self, repo, mergeq, series, diffopts):
912 def mergepatch(self, repo, mergeq, series, diffopts):
915 if not self.applied:
913 if not self.applied:
916 # each of the patches merged in will have two parents. This
914 # each of the patches merged in will have two parents. This
917 # can confuse the qrefresh, qdiff, and strip code because it
915 # can confuse the qrefresh, qdiff, and strip code because it
918 # needs to know which parent is actually in the patch queue.
916 # needs to know which parent is actually in the patch queue.
919 # so, we insert a merge marker with only one parent. This way
917 # so, we insert a merge marker with only one parent. This way
920 # the first patch in the queue is never a merge patch
918 # the first patch in the queue is never a merge patch
921 #
919 #
922 pname = b".hg.patches.merge.marker"
920 pname = b".hg.patches.merge.marker"
923 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
921 n = newcommit(repo, None, b'[mq]: merge marker', force=True)
924 self.removeundo(repo)
922 self.removeundo(repo)
925 self.applied.append(statusentry(n, pname))
923 self.applied.append(statusentry(n, pname))
926 self.applieddirty = True
924 self.applieddirty = True
927
925
928 head = self.qparents(repo)
926 head = self.qparents(repo)
929
927
930 for patch in series:
928 for patch in series:
931 patch = mergeq.lookup(patch, strict=True)
929 patch = mergeq.lookup(patch, strict=True)
932 if not patch:
930 if not patch:
933 self.ui.warn(_(b"patch %s does not exist\n") % patch)
931 self.ui.warn(_(b"patch %s does not exist\n") % patch)
934 return (1, None)
932 return (1, None)
935 pushable, reason = self.pushable(patch)
933 pushable, reason = self.pushable(patch)
936 if not pushable:
934 if not pushable:
937 self.explainpushable(patch, all_patches=True)
935 self.explainpushable(patch, all_patches=True)
938 continue
936 continue
939 info = mergeq.isapplied(patch)
937 info = mergeq.isapplied(patch)
940 if not info:
938 if not info:
941 self.ui.warn(_(b"patch %s is not applied\n") % patch)
939 self.ui.warn(_(b"patch %s is not applied\n") % patch)
942 return (1, None)
940 return (1, None)
943 rev = info[1]
941 rev = info[1]
944 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
942 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
945 if head:
943 if head:
946 self.applied.append(statusentry(head, patch))
944 self.applied.append(statusentry(head, patch))
947 self.applieddirty = True
945 self.applieddirty = True
948 if err:
946 if err:
949 return (err, head)
947 return (err, head)
950 self.savedirty()
948 self.savedirty()
951 return (0, head)
949 return (0, head)
952
950
953 def patch(self, repo, patchfile):
951 def patch(self, repo, patchfile):
954 """Apply patchfile to the working directory.
952 """Apply patchfile to the working directory.
955 patchfile: name of patch file"""
953 patchfile: name of patch file"""
956 files = set()
954 files = set()
957 try:
955 try:
958 fuzz = patchmod.patch(
956 fuzz = patchmod.patch(
959 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
957 self.ui, repo, patchfile, strip=1, files=files, eolmode=None
960 )
958 )
961 return (True, list(files), fuzz)
959 return (True, list(files), fuzz)
962 except Exception as inst:
960 except Exception as inst:
963 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
961 self.ui.note(stringutil.forcebytestr(inst) + b'\n')
964 if not self.ui.verbose:
962 if not self.ui.verbose:
965 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
963 self.ui.warn(_(b"patch failed, unable to continue (try -v)\n"))
966 self.ui.traceback()
964 self.ui.traceback()
967 return (False, list(files), False)
965 return (False, list(files), False)
968
966
969 def apply(
967 def apply(
970 self,
968 self,
971 repo,
969 repo,
972 series,
970 series,
973 list=False,
971 list=False,
974 update_status=True,
972 update_status=True,
975 strict=False,
973 strict=False,
976 patchdir=None,
974 patchdir=None,
977 merge=None,
975 merge=None,
978 all_files=None,
976 all_files=None,
979 tobackup=None,
977 tobackup=None,
980 keepchanges=False,
978 keepchanges=False,
981 ):
979 ):
982 wlock = lock = tr = None
980 wlock = lock = tr = None
983 try:
981 try:
984 wlock = repo.wlock()
982 wlock = repo.wlock()
985 lock = repo.lock()
983 lock = repo.lock()
986 tr = repo.transaction(b"qpush")
984 tr = repo.transaction(b"qpush")
987 try:
985 try:
988 ret = self._apply(
986 ret = self._apply(
989 repo,
987 repo,
990 series,
988 series,
991 list,
989 list,
992 update_status,
990 update_status,
993 strict,
991 strict,
994 patchdir,
992 patchdir,
995 merge,
993 merge,
996 all_files=all_files,
994 all_files=all_files,
997 tobackup=tobackup,
995 tobackup=tobackup,
998 keepchanges=keepchanges,
996 keepchanges=keepchanges,
999 )
997 )
1000 tr.close()
998 tr.close()
1001 self.savedirty()
999 self.savedirty()
1002 return ret
1000 return ret
1003 except AbortNoCleanup:
1001 except AbortNoCleanup:
1004 tr.close()
1002 tr.close()
1005 self.savedirty()
1003 self.savedirty()
1006 raise
1004 raise
1007 except: # re-raises
1005 except: # re-raises
1008 try:
1006 try:
1009 tr.abort()
1007 tr.abort()
1010 finally:
1008 finally:
1011 self.invalidate()
1009 self.invalidate()
1012 raise
1010 raise
1013 finally:
1011 finally:
1014 release(tr, lock, wlock)
1012 release(tr, lock, wlock)
1015 self.removeundo(repo)
1013 self.removeundo(repo)
1016
1014
1017 def _apply(
1015 def _apply(
1018 self,
1016 self,
1019 repo,
1017 repo,
1020 series,
1018 series,
1021 list=False,
1019 list=False,
1022 update_status=True,
1020 update_status=True,
1023 strict=False,
1021 strict=False,
1024 patchdir=None,
1022 patchdir=None,
1025 merge=None,
1023 merge=None,
1026 all_files=None,
1024 all_files=None,
1027 tobackup=None,
1025 tobackup=None,
1028 keepchanges=False,
1026 keepchanges=False,
1029 ):
1027 ):
1030 """returns (error, hash)
1028 """returns (error, hash)
1031
1029
1032 error = 1 for unable to read, 2 for patch failed, 3 for patch
1030 error = 1 for unable to read, 2 for patch failed, 3 for patch
1033 fuzz. tobackup is None or a set of files to backup before they
1031 fuzz. tobackup is None or a set of files to backup before they
1034 are modified by a patch.
1032 are modified by a patch.
1035 """
1033 """
1036 # TODO unify with commands.py
1034 # TODO unify with commands.py
1037 if not patchdir:
1035 if not patchdir:
1038 patchdir = self.path
1036 patchdir = self.path
1039 err = 0
1037 err = 0
1040 n = None
1038 n = None
1041 for patchname in series:
1039 for patchname in series:
1042 pushable, reason = self.pushable(patchname)
1040 pushable, reason = self.pushable(patchname)
1043 if not pushable:
1041 if not pushable:
1044 self.explainpushable(patchname, all_patches=True)
1042 self.explainpushable(patchname, all_patches=True)
1045 continue
1043 continue
1046 self.ui.status(_(b"applying %s\n") % patchname)
1044 self.ui.status(_(b"applying %s\n") % patchname)
1047 pf = os.path.join(patchdir, patchname)
1045 pf = os.path.join(patchdir, patchname)
1048
1046
1049 try:
1047 try:
1050 ph = patchheader(self.join(patchname), self.plainmode)
1048 ph = patchheader(self.join(patchname), self.plainmode)
1051 except IOError:
1049 except IOError:
1052 self.ui.warn(_(b"unable to read %s\n") % patchname)
1050 self.ui.warn(_(b"unable to read %s\n") % patchname)
1053 err = 1
1051 err = 1
1054 break
1052 break
1055
1053
1056 message = ph.message
1054 message = ph.message
1057 if not message:
1055 if not message:
1058 # The commit message should not be translated
1056 # The commit message should not be translated
1059 message = b"imported patch %s\n" % patchname
1057 message = b"imported patch %s\n" % patchname
1060 else:
1058 else:
1061 if list:
1059 if list:
1062 # The commit message should not be translated
1060 # The commit message should not be translated
1063 message.append(b"\nimported patch %s" % patchname)
1061 message.append(b"\nimported patch %s" % patchname)
1064 message = b'\n'.join(message)
1062 message = b'\n'.join(message)
1065
1063
1066 if ph.haspatch:
1064 if ph.haspatch:
1067 if tobackup:
1065 if tobackup:
1068 touched = patchmod.changedfiles(self.ui, repo, pf)
1066 touched = patchmod.changedfiles(self.ui, repo, pf)
1069 touched = set(touched) & tobackup
1067 touched = set(touched) & tobackup
1070 if touched and keepchanges:
1068 if touched and keepchanges:
1071 raise AbortNoCleanup(
1069 raise AbortNoCleanup(
1072 _(b"conflicting local changes found"),
1070 _(b"conflicting local changes found"),
1073 hint=_(b"did you forget to qrefresh?"),
1071 hint=_(b"did you forget to qrefresh?"),
1074 )
1072 )
1075 self.backup(repo, touched, copy=True)
1073 self.backup(repo, touched, copy=True)
1076 tobackup = tobackup - touched
1074 tobackup = tobackup - touched
1077 (patcherr, files, fuzz) = self.patch(repo, pf)
1075 (patcherr, files, fuzz) = self.patch(repo, pf)
1078 if all_files is not None:
1076 if all_files is not None:
1079 all_files.update(files)
1077 all_files.update(files)
1080 patcherr = not patcherr
1078 patcherr = not patcherr
1081 else:
1079 else:
1082 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1080 self.ui.warn(_(b"patch %s is empty\n") % patchname)
1083 patcherr, files, fuzz = 0, [], 0
1081 patcherr, files, fuzz = 0, [], 0
1084
1082
1085 if merge and files:
1083 if merge and files:
1086 # Mark as removed/merged and update dirstate parent info
1084 # Mark as removed/merged and update dirstate parent info
1087 with repo.dirstate.changing_parents(repo):
1085 with repo.dirstate.changing_parents(repo):
1088 for f in files:
1086 for f in files:
1089 repo.dirstate.update_file_p1(f, p1_tracked=True)
1087 repo.dirstate.update_file_p1(f, p1_tracked=True)
1090 p1 = repo.dirstate.p1()
1088 p1 = repo.dirstate.p1()
1091 repo.setparents(p1, merge)
1089 repo.setparents(p1, merge)
1092
1090
1093 if all_files and b'.hgsubstate' in all_files:
1091 if all_files and b'.hgsubstate' in all_files:
1094 wctx = repo[None]
1092 wctx = repo[None]
1095 pctx = repo[b'.']
1093 pctx = repo[b'.']
1096 overwrite = False
1094 overwrite = False
1097 mergedsubstate = subrepoutil.submerge(
1095 mergedsubstate = subrepoutil.submerge(
1098 repo, pctx, wctx, wctx, overwrite
1096 repo, pctx, wctx, wctx, overwrite
1099 )
1097 )
1100 files += mergedsubstate.keys()
1098 files += mergedsubstate.keys()
1101
1099
1102 match = scmutil.matchfiles(repo, files or [])
1100 match = scmutil.matchfiles(repo, files or [])
1103 oldtip = repo.changelog.tip()
1101 oldtip = repo.changelog.tip()
1104 n = newcommit(
1102 n = newcommit(
1105 repo, None, message, ph.user, ph.date, match=match, force=True
1103 repo, None, message, ph.user, ph.date, match=match, force=True
1106 )
1104 )
1107 if repo.changelog.tip() == oldtip:
1105 if repo.changelog.tip() == oldtip:
1108 raise error.Abort(
1106 raise error.Abort(
1109 _(b"qpush exactly duplicates child changeset")
1107 _(b"qpush exactly duplicates child changeset")
1110 )
1108 )
1111 if n is None:
1109 if n is None:
1112 raise error.Abort(_(b"repository commit failed"))
1110 raise error.Abort(_(b"repository commit failed"))
1113
1111
1114 if update_status:
1112 if update_status:
1115 self.applied.append(statusentry(n, patchname))
1113 self.applied.append(statusentry(n, patchname))
1116
1114
1117 if patcherr:
1115 if patcherr:
1118 self.ui.warn(
1116 self.ui.warn(
1119 _(b"patch failed, rejects left in working directory\n")
1117 _(b"patch failed, rejects left in working directory\n")
1120 )
1118 )
1121 err = 2
1119 err = 2
1122 break
1120 break
1123
1121
1124 if fuzz and strict:
1122 if fuzz and strict:
1125 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1123 self.ui.warn(_(b"fuzz found when applying patch, stopping\n"))
1126 err = 3
1124 err = 3
1127 break
1125 break
1128 return (err, n)
1126 return (err, n)
1129
1127
1130 def _cleanup(self, patches, numrevs, keep=False):
1128 def _cleanup(self, patches, numrevs, keep=False):
1131 if not keep:
1129 if not keep:
1132 r = self.qrepo()
1130 r = self.qrepo()
1133 if r:
1131 if r:
1134 with r.wlock(), r.dirstate.changing_files(r):
1132 with r.wlock(), r.dirstate.changing_files(r):
1135 r[None].forget(patches)
1133 r[None].forget(patches)
1136 for p in patches:
1134 for p in patches:
1137 try:
1135 try:
1138 os.unlink(self.join(p))
1136 os.unlink(self.join(p))
1139 except FileNotFoundError:
1137 except FileNotFoundError:
1140 pass
1138 pass
1141
1139
1142 qfinished = []
1140 qfinished = []
1143 if numrevs:
1141 if numrevs:
1144 qfinished = self.applied[:numrevs]
1142 qfinished = self.applied[:numrevs]
1145 del self.applied[:numrevs]
1143 del self.applied[:numrevs]
1146 self.applieddirty = True
1144 self.applieddirty = True
1147
1145
1148 unknown = []
1146 unknown = []
1149
1147
1150 sortedseries = []
1148 sortedseries = []
1151 for p in patches:
1149 for p in patches:
1152 idx = self.findseries(p)
1150 idx = self.findseries(p)
1153 if idx is None:
1151 if idx is None:
1154 sortedseries.append((-1, p))
1152 sortedseries.append((-1, p))
1155 else:
1153 else:
1156 sortedseries.append((idx, p))
1154 sortedseries.append((idx, p))
1157
1155
1158 sortedseries.sort(reverse=True)
1156 sortedseries.sort(reverse=True)
1159 for i, p in sortedseries:
1157 for i, p in sortedseries:
1160 if i != -1:
1158 if i != -1:
1161 del self.fullseries[i]
1159 del self.fullseries[i]
1162 else:
1160 else:
1163 unknown.append(p)
1161 unknown.append(p)
1164
1162
1165 if unknown:
1163 if unknown:
1166 if numrevs:
1164 if numrevs:
1167 rev = {entry.name: entry.node for entry in qfinished}
1165 rev = {entry.name: entry.node for entry in qfinished}
1168 for p in unknown:
1166 for p in unknown:
1169 msg = _(b'revision %s refers to unknown patches: %s\n')
1167 msg = _(b'revision %s refers to unknown patches: %s\n')
1170 self.ui.warn(msg % (short(rev[p]), p))
1168 self.ui.warn(msg % (short(rev[p]), p))
1171 else:
1169 else:
1172 msg = _(b'unknown patches: %s\n')
1170 msg = _(b'unknown patches: %s\n')
1173 raise error.Abort(b''.join(msg % p for p in unknown))
1171 raise error.Abort(b''.join(msg % p for p in unknown))
1174
1172
1175 self.parseseries()
1173 self.parseseries()
1176 self.seriesdirty = True
1174 self.seriesdirty = True
1177 return [entry.node for entry in qfinished]
1175 return [entry.node for entry in qfinished]
1178
1176
1179 def _revpatches(self, repo, revs):
1177 def _revpatches(self, repo, revs):
1180 firstrev = repo[self.applied[0].node].rev()
1178 firstrev = repo[self.applied[0].node].rev()
1181 patches = []
1179 patches = []
1182 for i, rev in enumerate(revs):
1180 for i, rev in enumerate(revs):
1183 if rev < firstrev:
1181 if rev < firstrev:
1184 raise error.Abort(_(b'revision %d is not managed') % rev)
1182 raise error.Abort(_(b'revision %d is not managed') % rev)
1185
1183
1186 ctx = repo[rev]
1184 ctx = repo[rev]
1187 base = self.applied[i].node
1185 base = self.applied[i].node
1188 if ctx.node() != base:
1186 if ctx.node() != base:
1189 msg = _(b'cannot delete revision %d above applied patches')
1187 msg = _(b'cannot delete revision %d above applied patches')
1190 raise error.Abort(msg % rev)
1188 raise error.Abort(msg % rev)
1191
1189
1192 patch = self.applied[i].name
1190 patch = self.applied[i].name
1193 for fmt in (b'[mq]: %s', b'imported patch %s'):
1191 for fmt in (b'[mq]: %s', b'imported patch %s'):
1194 if ctx.description() == fmt % patch:
1192 if ctx.description() == fmt % patch:
1195 msg = _(b'patch %s finalized without changeset message\n')
1193 msg = _(b'patch %s finalized without changeset message\n')
1196 repo.ui.status(msg % patch)
1194 repo.ui.status(msg % patch)
1197 break
1195 break
1198
1196
1199 patches.append(patch)
1197 patches.append(patch)
1200 return patches
1198 return patches
1201
1199
1202 def finish(self, repo, revs):
1200 def finish(self, repo, revs):
1203 # Manually trigger phase computation to ensure phasedefaults is
1201 # Manually trigger phase computation to ensure phasedefaults is
1204 # executed before we remove the patches.
1202 # executed before we remove the patches.
1205 repo._phasecache
1203 repo._phasecache
1206 patches = self._revpatches(repo, sorted(revs))
1204 patches = self._revpatches(repo, sorted(revs))
1207 qfinished = self._cleanup(patches, len(patches))
1205 qfinished = self._cleanup(patches, len(patches))
1208 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1206 if qfinished and repo.ui.configbool(b'mq', b'secret'):
1209 # only use this logic when the secret option is added
1207 # only use this logic when the secret option is added
1210 oldqbase = repo[qfinished[0]]
1208 oldqbase = repo[qfinished[0]]
1211 tphase = phases.newcommitphase(repo.ui)
1209 tphase = phases.newcommitphase(repo.ui)
1212 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1210 if oldqbase.phase() > tphase and oldqbase.p1().phase() <= tphase:
1213 with repo.transaction(b'qfinish') as tr:
1211 with repo.transaction(b'qfinish') as tr:
1214 phases.advanceboundary(repo, tr, tphase, qfinished)
1212 phases.advanceboundary(repo, tr, tphase, qfinished)
1215
1213
1216 def delete(self, repo, patches, opts):
1214 def delete(self, repo, patches, opts):
1217 if not patches and not opts.get(b'rev'):
1215 if not patches and not opts.get(b'rev'):
1218 raise error.Abort(
1216 raise error.Abort(
1219 _(b'qdelete requires at least one revision or patch name')
1217 _(b'qdelete requires at least one revision or patch name')
1220 )
1218 )
1221
1219
1222 realpatches = []
1220 realpatches = []
1223 for patch in patches:
1221 for patch in patches:
1224 patch = self.lookup(patch, strict=True)
1222 patch = self.lookup(patch, strict=True)
1225 info = self.isapplied(patch)
1223 info = self.isapplied(patch)
1226 if info:
1224 if info:
1227 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1225 raise error.Abort(_(b"cannot delete applied patch %s") % patch)
1228 if patch not in self.series:
1226 if patch not in self.series:
1229 raise error.Abort(_(b"patch %s not in series file") % patch)
1227 raise error.Abort(_(b"patch %s not in series file") % patch)
1230 if patch not in realpatches:
1228 if patch not in realpatches:
1231 realpatches.append(patch)
1229 realpatches.append(patch)
1232
1230
1233 numrevs = 0
1231 numrevs = 0
1234 if opts.get(b'rev'):
1232 if opts.get(b'rev'):
1235 if not self.applied:
1233 if not self.applied:
1236 raise error.Abort(_(b'no patches applied'))
1234 raise error.Abort(_(b'no patches applied'))
1237 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1235 revs = logcmdutil.revrange(repo, opts.get(b'rev'))
1238 revs.sort()
1236 revs.sort()
1239 revpatches = self._revpatches(repo, revs)
1237 revpatches = self._revpatches(repo, revs)
1240 realpatches += revpatches
1238 realpatches += revpatches
1241 numrevs = len(revpatches)
1239 numrevs = len(revpatches)
1242
1240
1243 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1241 self._cleanup(realpatches, numrevs, opts.get(b'keep'))
1244
1242
1245 def checktoppatch(self, repo):
1243 def checktoppatch(self, repo):
1246 '''check that working directory is at qtip'''
1244 '''check that working directory is at qtip'''
1247 if self.applied:
1245 if self.applied:
1248 top = self.applied[-1].node
1246 top = self.applied[-1].node
1249 patch = self.applied[-1].name
1247 patch = self.applied[-1].name
1250 if repo.dirstate.p1() != top:
1248 if repo.dirstate.p1() != top:
1251 raise error.Abort(_(b"working directory revision is not qtip"))
1249 raise error.Abort(_(b"working directory revision is not qtip"))
1252 return top, patch
1250 return top, patch
1253 return None, None
1251 return None, None
1254
1252
1255 def putsubstate2changes(self, substatestate, changes):
1253 def putsubstate2changes(self, substatestate, changes):
1256 if isinstance(changes, list):
1254 if isinstance(changes, list):
1257 mar = changes[:3]
1255 mar = changes[:3]
1258 else:
1256 else:
1259 mar = (changes.modified, changes.added, changes.removed)
1257 mar = (changes.modified, changes.added, changes.removed)
1260 if any((b'.hgsubstate' in files for files in mar)):
1258 if any((b'.hgsubstate' in files for files in mar)):
1261 return # already listed up
1259 return # already listed up
1262 # not yet listed up
1260 # not yet listed up
1263 if substatestate.added or not substatestate.any_tracked:
1261 if substatestate.added or not substatestate.any_tracked:
1264 mar[1].append(b'.hgsubstate')
1262 mar[1].append(b'.hgsubstate')
1265 elif substatestate.removed:
1263 elif substatestate.removed:
1266 mar[2].append(b'.hgsubstate')
1264 mar[2].append(b'.hgsubstate')
1267 else: # modified
1265 else: # modified
1268 mar[0].append(b'.hgsubstate')
1266 mar[0].append(b'.hgsubstate')
1269
1267
1270 def checklocalchanges(self, repo, force=False, refresh=True):
1268 def checklocalchanges(self, repo, force=False, refresh=True):
1271 excsuffix = b''
1269 excsuffix = b''
1272 if refresh:
1270 if refresh:
1273 excsuffix = b', qrefresh first'
1271 excsuffix = b', qrefresh first'
1274 # plain versions for i18n tool to detect them
1272 # plain versions for i18n tool to detect them
1275 _(b"local changes found, qrefresh first")
1273 _(b"local changes found, qrefresh first")
1276 _(b"local changed subrepos found, qrefresh first")
1274 _(b"local changed subrepos found, qrefresh first")
1277
1275
1278 s = repo.status()
1276 s = repo.status()
1279 if not force:
1277 if not force:
1280 cmdutil.checkunfinished(repo)
1278 cmdutil.checkunfinished(repo)
1281 if s.modified or s.added or s.removed or s.deleted:
1279 if s.modified or s.added or s.removed or s.deleted:
1282 _(b"local changes found") # i18n tool detection
1280 _(b"local changes found") # i18n tool detection
1283 raise error.Abort(_(b"local changes found" + excsuffix))
1281 raise error.Abort(_(b"local changes found" + excsuffix))
1284 if checksubstate(repo):
1282 if checksubstate(repo):
1285 _(b"local changed subrepos found") # i18n tool detection
1283 _(b"local changed subrepos found") # i18n tool detection
1286 raise error.Abort(
1284 raise error.Abort(
1287 _(b"local changed subrepos found" + excsuffix)
1285 _(b"local changed subrepos found" + excsuffix)
1288 )
1286 )
1289 else:
1287 else:
1290 cmdutil.checkunfinished(repo, skipmerge=True)
1288 cmdutil.checkunfinished(repo, skipmerge=True)
1291 return s
1289 return s
1292
1290
1293 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1291 _reserved = (b'series', b'status', b'guards', b'.', b'..')
1294
1292
1295 def checkreservedname(self, name):
1293 def checkreservedname(self, name):
1296 if name in self._reserved:
1294 if name in self._reserved:
1297 raise error.Abort(
1295 raise error.Abort(
1298 _(b'"%s" cannot be used as the name of a patch') % name
1296 _(b'"%s" cannot be used as the name of a patch') % name
1299 )
1297 )
1300 if name != name.strip():
1298 if name != name.strip():
1301 # whitespace is stripped by parseseries()
1299 # whitespace is stripped by parseseries()
1302 raise error.Abort(
1300 raise error.Abort(
1303 _(b'patch name cannot begin or end with whitespace')
1301 _(b'patch name cannot begin or end with whitespace')
1304 )
1302 )
1305 for prefix in (b'.hg', b'.mq'):
1303 for prefix in (b'.hg', b'.mq'):
1306 if name.startswith(prefix):
1304 if name.startswith(prefix):
1307 raise error.Abort(
1305 raise error.Abort(
1308 _(b'patch name cannot begin with "%s"') % prefix
1306 _(b'patch name cannot begin with "%s"') % prefix
1309 )
1307 )
1310 for c in (b'#', b':', b'\r', b'\n'):
1308 for c in (b'#', b':', b'\r', b'\n'):
1311 if c in name:
1309 if c in name:
1312 raise error.Abort(
1310 raise error.Abort(
1313 _(b'%r cannot be used in the name of a patch')
1311 _(b'%r cannot be used in the name of a patch')
1314 % pycompat.bytestr(c)
1312 % pycompat.bytestr(c)
1315 )
1313 )
1316
1314
1317 def checkpatchname(self, name, force=False):
1315 def checkpatchname(self, name, force=False):
1318 self.checkreservedname(name)
1316 self.checkreservedname(name)
1319 if not force and os.path.exists(self.join(name)):
1317 if not force and os.path.exists(self.join(name)):
1320 if os.path.isdir(self.join(name)):
1318 if os.path.isdir(self.join(name)):
1321 raise error.Abort(
1319 raise error.Abort(
1322 _(b'"%s" already exists as a directory') % name
1320 _(b'"%s" already exists as a directory') % name
1323 )
1321 )
1324 else:
1322 else:
1325 raise error.Abort(_(b'patch "%s" already exists') % name)
1323 raise error.Abort(_(b'patch "%s" already exists') % name)
1326
1324
1327 def makepatchname(self, title, fallbackname):
1325 def makepatchname(self, title, fallbackname):
1328 """Return a suitable filename for title, adding a suffix to make
1326 """Return a suitable filename for title, adding a suffix to make
1329 it unique in the existing list"""
1327 it unique in the existing list"""
1330 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1328 namebase = re.sub(br'[\s\W_]+', b'_', title.lower()).strip(b'_')
1331 namebase = namebase[:75] # avoid too long name (issue5117)
1329 namebase = namebase[:75] # avoid too long name (issue5117)
1332 if namebase:
1330 if namebase:
1333 try:
1331 try:
1334 self.checkreservedname(namebase)
1332 self.checkreservedname(namebase)
1335 except error.Abort:
1333 except error.Abort:
1336 namebase = fallbackname
1334 namebase = fallbackname
1337 else:
1335 else:
1338 namebase = fallbackname
1336 namebase = fallbackname
1339 name = namebase
1337 name = namebase
1340 i = 0
1338 i = 0
1341 while True:
1339 while True:
1342 if name not in self.fullseries:
1340 if name not in self.fullseries:
1343 try:
1341 try:
1344 self.checkpatchname(name)
1342 self.checkpatchname(name)
1345 break
1343 break
1346 except error.Abort:
1344 except error.Abort:
1347 pass
1345 pass
1348 i += 1
1346 i += 1
1349 name = b'%s__%d' % (namebase, i)
1347 name = b'%s__%d' % (namebase, i)
1350 return name
1348 return name
1351
1349
1352 def checkkeepchanges(self, keepchanges, force):
1350 def checkkeepchanges(self, keepchanges, force):
1353 if force and keepchanges:
1351 if force and keepchanges:
1354 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1352 raise error.Abort(_(b'cannot use both --force and --keep-changes'))
1355
1353
1356 def new(self, repo, patchfn, *pats, **opts):
1354 def new(self, repo, patchfn, *pats, **opts):
1357 """options:
1355 """options:
1358 msg: a string or a no-argument function returning a string
1356 msg: a string or a no-argument function returning a string
1359 """
1357 """
1360 opts = pycompat.byteskwargs(opts)
1358 opts = pycompat.byteskwargs(opts)
1361 msg = opts.get(b'msg')
1359 msg = opts.get(b'msg')
1362 edit = opts.get(b'edit')
1360 edit = opts.get(b'edit')
1363 editform = opts.get(b'editform', b'mq.qnew')
1361 editform = opts.get(b'editform', b'mq.qnew')
1364 user = opts.get(b'user')
1362 user = opts.get(b'user')
1365 date = opts.get(b'date')
1363 date = opts.get(b'date')
1366 if date:
1364 if date:
1367 date = dateutil.parsedate(date)
1365 date = dateutil.parsedate(date)
1368 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1366 diffopts = self.diffopts({b'git': opts.get(b'git')}, plain=True)
1369 if opts.get(b'checkname', True):
1367 if opts.get(b'checkname', True):
1370 self.checkpatchname(patchfn)
1368 self.checkpatchname(patchfn)
1371 inclsubs = checksubstate(repo)
1369 inclsubs = checksubstate(repo)
1372 if inclsubs:
1370 if inclsubs:
1373 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1371 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1374 if opts.get(b'include') or opts.get(b'exclude') or pats:
1372 if opts.get(b'include') or opts.get(b'exclude') or pats:
1375 # detect missing files in pats
1373 # detect missing files in pats
1376 def badfn(f, msg):
1374 def badfn(f, msg):
1377 if f != b'.hgsubstate': # .hgsubstate is auto-created
1375 if f != b'.hgsubstate': # .hgsubstate is auto-created
1378 raise error.Abort(b'%s: %s' % (f, msg))
1376 raise error.Abort(b'%s: %s' % (f, msg))
1379
1377
1380 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1378 match = scmutil.match(repo[None], pats, opts, badfn=badfn)
1381 changes = repo.status(match=match)
1379 changes = repo.status(match=match)
1382 else:
1380 else:
1383 changes = self.checklocalchanges(repo, force=True)
1381 changes = self.checklocalchanges(repo, force=True)
1384 commitfiles = list(inclsubs)
1382 commitfiles = list(inclsubs)
1385 commitfiles.extend(changes.modified)
1383 commitfiles.extend(changes.modified)
1386 commitfiles.extend(changes.added)
1384 commitfiles.extend(changes.added)
1387 commitfiles.extend(changes.removed)
1385 commitfiles.extend(changes.removed)
1388 match = scmutil.matchfiles(repo, commitfiles)
1386 match = scmutil.matchfiles(repo, commitfiles)
1389 if len(repo[None].parents()) > 1:
1387 if len(repo[None].parents()) > 1:
1390 raise error.Abort(_(b'cannot manage merge changesets'))
1388 raise error.Abort(_(b'cannot manage merge changesets'))
1391 self.checktoppatch(repo)
1389 self.checktoppatch(repo)
1392 insert = self.fullseriesend()
1390 insert = self.fullseriesend()
1393 with repo.wlock():
1391 with repo.wlock():
1394 try:
1392 try:
1395 # if patch file write fails, abort early
1393 # if patch file write fails, abort early
1396 p = self.opener(patchfn, b"w")
1394 p = self.opener(patchfn, b"w")
1397 except IOError as e:
1395 except IOError as e:
1398 raise error.Abort(
1396 raise error.Abort(
1399 _(b'cannot write patch "%s": %s')
1397 _(b'cannot write patch "%s": %s')
1400 % (patchfn, encoding.strtolocal(e.strerror))
1398 % (patchfn, encoding.strtolocal(e.strerror))
1401 )
1399 )
1402 try:
1400 try:
1403 defaultmsg = b"[mq]: %s" % patchfn
1401 defaultmsg = b"[mq]: %s" % patchfn
1404 editor = cmdutil.getcommiteditor(editform=editform)
1402 editor = cmdutil.getcommiteditor(editform=editform)
1405 if edit:
1403 if edit:
1406
1404
1407 def finishdesc(desc):
1405 def finishdesc(desc):
1408 if desc.rstrip():
1406 if desc.rstrip():
1409 return desc
1407 return desc
1410 else:
1408 else:
1411 return defaultmsg
1409 return defaultmsg
1412
1410
1413 # i18n: this message is shown in editor with "HG: " prefix
1411 # i18n: this message is shown in editor with "HG: " prefix
1414 extramsg = _(b'Leave message empty to use default message.')
1412 extramsg = _(b'Leave message empty to use default message.')
1415 editor = cmdutil.getcommiteditor(
1413 editor = cmdutil.getcommiteditor(
1416 finishdesc=finishdesc,
1414 finishdesc=finishdesc,
1417 extramsg=extramsg,
1415 extramsg=extramsg,
1418 editform=editform,
1416 editform=editform,
1419 )
1417 )
1420 commitmsg = msg
1418 commitmsg = msg
1421 else:
1419 else:
1422 commitmsg = msg or defaultmsg
1420 commitmsg = msg or defaultmsg
1423
1421
1424 n = newcommit(
1422 n = newcommit(
1425 repo,
1423 repo,
1426 None,
1424 None,
1427 commitmsg,
1425 commitmsg,
1428 user,
1426 user,
1429 date,
1427 date,
1430 match=match,
1428 match=match,
1431 force=True,
1429 force=True,
1432 editor=editor,
1430 editor=editor,
1433 )
1431 )
1434 if n is None:
1432 if n is None:
1435 raise error.Abort(_(b"repo commit failed"))
1433 raise error.Abort(_(b"repo commit failed"))
1436 try:
1434 try:
1437 self.fullseries[insert:insert] = [patchfn]
1435 self.fullseries[insert:insert] = [patchfn]
1438 self.applied.append(statusentry(n, patchfn))
1436 self.applied.append(statusentry(n, patchfn))
1439 self.parseseries()
1437 self.parseseries()
1440 self.seriesdirty = True
1438 self.seriesdirty = True
1441 self.applieddirty = True
1439 self.applieddirty = True
1442 nctx = repo[n]
1440 nctx = repo[n]
1443 ph = patchheader(self.join(patchfn), self.plainmode)
1441 ph = patchheader(self.join(patchfn), self.plainmode)
1444 if user:
1442 if user:
1445 ph.setuser(user)
1443 ph.setuser(user)
1446 if date:
1444 if date:
1447 ph.setdate(b'%d %d' % date)
1445 ph.setdate(b'%d %d' % date)
1448 ph.setparent(hex(nctx.p1().node()))
1446 ph.setparent(hex(nctx.p1().node()))
1449 msg = nctx.description().strip()
1447 msg = nctx.description().strip()
1450 if msg == defaultmsg.strip():
1448 if msg == defaultmsg.strip():
1451 msg = b''
1449 msg = b''
1452 ph.setmessage(msg)
1450 ph.setmessage(msg)
1453 p.write(bytes(ph))
1451 p.write(bytes(ph))
1454 if commitfiles:
1452 if commitfiles:
1455 parent = self.qparents(repo, n)
1453 parent = self.qparents(repo, n)
1456 if inclsubs:
1454 if inclsubs:
1457 self.putsubstate2changes(substatestate, changes)
1455 self.putsubstate2changes(substatestate, changes)
1458 chunks = patchmod.diff(
1456 chunks = patchmod.diff(
1459 repo,
1457 repo,
1460 node1=parent,
1458 node1=parent,
1461 node2=n,
1459 node2=n,
1462 changes=changes,
1460 changes=changes,
1463 opts=diffopts,
1461 opts=diffopts,
1464 )
1462 )
1465 for chunk in chunks:
1463 for chunk in chunks:
1466 p.write(chunk)
1464 p.write(chunk)
1467 p.close()
1465 p.close()
1468 r = self.qrepo()
1466 r = self.qrepo()
1469 if r:
1467 if r:
1470 with r.wlock(), r.dirstate.changing_files(r):
1468 with r.wlock(), r.dirstate.changing_files(r):
1471 r[None].add([patchfn])
1469 r[None].add([patchfn])
1472 except: # re-raises
1470 except: # re-raises
1473 repo.rollback()
1471 repo.rollback()
1474 raise
1472 raise
1475 except Exception:
1473 except Exception:
1476 patchpath = self.join(patchfn)
1474 patchpath = self.join(patchfn)
1477 try:
1475 try:
1478 os.unlink(patchpath)
1476 os.unlink(patchpath)
1479 except OSError:
1477 except OSError:
1480 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1478 self.ui.warn(_(b'error unlinking %s\n') % patchpath)
1481 raise
1479 raise
1482 self.removeundo(repo)
1480 self.removeundo(repo)
1483
1481
1484 def isapplied(self, patch):
1482 def isapplied(self, patch):
1485 """returns (index, rev, patch)"""
1483 """returns (index, rev, patch)"""
1486 for i, a in enumerate(self.applied):
1484 for i, a in enumerate(self.applied):
1487 if a.name == patch:
1485 if a.name == patch:
1488 return (i, a.node, a.name)
1486 return (i, a.node, a.name)
1489 return None
1487 return None
1490
1488
1491 # if the exact patch name does not exist, we try a few
1489 # if the exact patch name does not exist, we try a few
1492 # variations. If strict is passed, we try only #1
1490 # variations. If strict is passed, we try only #1
1493 #
1491 #
1494 # 1) a number (as string) to indicate an offset in the series file
1492 # 1) a number (as string) to indicate an offset in the series file
1495 # 2) a unique substring of the patch name was given
1493 # 2) a unique substring of the patch name was given
1496 # 3) patchname[-+]num to indicate an offset in the series file
1494 # 3) patchname[-+]num to indicate an offset in the series file
1497 def lookup(self, patch, strict=False):
1495 def lookup(self, patch, strict=False):
1498 def partialname(s):
1496 def partialname(s):
1499 if s in self.series:
1497 if s in self.series:
1500 return s
1498 return s
1501 matches = [x for x in self.series if s in x]
1499 matches = [x for x in self.series if s in x]
1502 if len(matches) > 1:
1500 if len(matches) > 1:
1503 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1501 self.ui.warn(_(b'patch name "%s" is ambiguous:\n') % s)
1504 for m in matches:
1502 for m in matches:
1505 self.ui.warn(b' %s\n' % m)
1503 self.ui.warn(b' %s\n' % m)
1506 return None
1504 return None
1507 if matches:
1505 if matches:
1508 return matches[0]
1506 return matches[0]
1509 if self.series and self.applied:
1507 if self.series and self.applied:
1510 if s == b'qtip':
1508 if s == b'qtip':
1511 return self.series[self.seriesend(True) - 1]
1509 return self.series[self.seriesend(True) - 1]
1512 if s == b'qbase':
1510 if s == b'qbase':
1513 return self.series[0]
1511 return self.series[0]
1514 return None
1512 return None
1515
1513
1516 if patch in self.series:
1514 if patch in self.series:
1517 return patch
1515 return patch
1518
1516
1519 if not os.path.isfile(self.join(patch)):
1517 if not os.path.isfile(self.join(patch)):
1520 try:
1518 try:
1521 sno = int(patch)
1519 sno = int(patch)
1522 except (ValueError, OverflowError):
1520 except (ValueError, OverflowError):
1523 pass
1521 pass
1524 else:
1522 else:
1525 if -len(self.series) <= sno < len(self.series):
1523 if -len(self.series) <= sno < len(self.series):
1526 return self.series[sno]
1524 return self.series[sno]
1527
1525
1528 if not strict:
1526 if not strict:
1529 res = partialname(patch)
1527 res = partialname(patch)
1530 if res:
1528 if res:
1531 return res
1529 return res
1532 minus = patch.rfind(b'-')
1530 minus = patch.rfind(b'-')
1533 if minus >= 0:
1531 if minus >= 0:
1534 res = partialname(patch[:minus])
1532 res = partialname(patch[:minus])
1535 if res:
1533 if res:
1536 i = self.series.index(res)
1534 i = self.series.index(res)
1537 try:
1535 try:
1538 off = int(patch[minus + 1 :] or 1)
1536 off = int(patch[minus + 1 :] or 1)
1539 except (ValueError, OverflowError):
1537 except (ValueError, OverflowError):
1540 pass
1538 pass
1541 else:
1539 else:
1542 if i - off >= 0:
1540 if i - off >= 0:
1543 return self.series[i - off]
1541 return self.series[i - off]
1544 plus = patch.rfind(b'+')
1542 plus = patch.rfind(b'+')
1545 if plus >= 0:
1543 if plus >= 0:
1546 res = partialname(patch[:plus])
1544 res = partialname(patch[:plus])
1547 if res:
1545 if res:
1548 i = self.series.index(res)
1546 i = self.series.index(res)
1549 try:
1547 try:
1550 off = int(patch[plus + 1 :] or 1)
1548 off = int(patch[plus + 1 :] or 1)
1551 except (ValueError, OverflowError):
1549 except (ValueError, OverflowError):
1552 pass
1550 pass
1553 else:
1551 else:
1554 if i + off < len(self.series):
1552 if i + off < len(self.series):
1555 return self.series[i + off]
1553 return self.series[i + off]
1556 raise error.Abort(_(b"patch %s not in series") % patch)
1554 raise error.Abort(_(b"patch %s not in series") % patch)
1557
1555
1558 def push(
1556 def push(
1559 self,
1557 self,
1560 repo,
1558 repo,
1561 patch=None,
1559 patch=None,
1562 force=False,
1560 force=False,
1563 list=False,
1561 list=False,
1564 mergeq=None,
1562 mergeq=None,
1565 all=False,
1563 all=False,
1566 move=False,
1564 move=False,
1567 exact=False,
1565 exact=False,
1568 nobackup=False,
1566 nobackup=False,
1569 keepchanges=False,
1567 keepchanges=False,
1570 ):
1568 ):
1571 self.checkkeepchanges(keepchanges, force)
1569 self.checkkeepchanges(keepchanges, force)
1572 diffopts = self.diffopts()
1570 diffopts = self.diffopts()
1573 with repo.wlock():
1571 with repo.wlock():
1574 heads = []
1572 heads = []
1575 for hs in repo.branchmap().iterheads():
1573 for hs in repo.branchmap().iterheads():
1576 heads.extend(hs)
1574 heads.extend(hs)
1577 if not heads:
1575 if not heads:
1578 heads = [repo.nullid]
1576 heads = [repo.nullid]
1579 if repo.dirstate.p1() not in heads and not exact:
1577 if repo.dirstate.p1() not in heads and not exact:
1580 self.ui.status(_(b"(working directory not at a head)\n"))
1578 self.ui.status(_(b"(working directory not at a head)\n"))
1581
1579
1582 if not self.series:
1580 if not self.series:
1583 self.ui.warn(_(b'no patches in series\n'))
1581 self.ui.warn(_(b'no patches in series\n'))
1584 return 0
1582 return 0
1585
1583
1586 # Suppose our series file is: A B C and the current 'top'
1584 # Suppose our series file is: A B C and the current 'top'
1587 # patch is B. qpush C should be performed (moving forward)
1585 # patch is B. qpush C should be performed (moving forward)
1588 # qpush B is a NOP (no change) qpush A is an error (can't
1586 # qpush B is a NOP (no change) qpush A is an error (can't
1589 # go backwards with qpush)
1587 # go backwards with qpush)
1590 if patch:
1588 if patch:
1591 patch = self.lookup(patch)
1589 patch = self.lookup(patch)
1592 info = self.isapplied(patch)
1590 info = self.isapplied(patch)
1593 if info and info[0] >= len(self.applied) - 1:
1591 if info and info[0] >= len(self.applied) - 1:
1594 self.ui.warn(
1592 self.ui.warn(
1595 _(b'qpush: %s is already at the top\n') % patch
1593 _(b'qpush: %s is already at the top\n') % patch
1596 )
1594 )
1597 return 0
1595 return 0
1598
1596
1599 pushable, reason = self.pushable(patch)
1597 pushable, reason = self.pushable(patch)
1600 if pushable:
1598 if pushable:
1601 if self.series.index(patch) < self.seriesend():
1599 if self.series.index(patch) < self.seriesend():
1602 raise error.Abort(
1600 raise error.Abort(
1603 _(b"cannot push to a previous patch: %s") % patch
1601 _(b"cannot push to a previous patch: %s") % patch
1604 )
1602 )
1605 else:
1603 else:
1606 if reason:
1604 if reason:
1607 reason = _(b'guarded by %s') % reason
1605 reason = _(b'guarded by %s') % reason
1608 else:
1606 else:
1609 reason = _(b'no matching guards')
1607 reason = _(b'no matching guards')
1610 self.ui.warn(
1608 self.ui.warn(
1611 _(b"cannot push '%s' - %s\n") % (patch, reason)
1609 _(b"cannot push '%s' - %s\n") % (patch, reason)
1612 )
1610 )
1613 return 1
1611 return 1
1614 elif all:
1612 elif all:
1615 patch = self.series[-1]
1613 patch = self.series[-1]
1616 if self.isapplied(patch):
1614 if self.isapplied(patch):
1617 self.ui.warn(_(b'all patches are currently applied\n'))
1615 self.ui.warn(_(b'all patches are currently applied\n'))
1618 return 0
1616 return 0
1619
1617
1620 # Following the above example, starting at 'top' of B:
1618 # Following the above example, starting at 'top' of B:
1621 # qpush should be performed (pushes C), but a subsequent
1619 # qpush should be performed (pushes C), but a subsequent
1622 # qpush without an argument is an error (nothing to
1620 # qpush without an argument is an error (nothing to
1623 # apply). This allows a loop of "...while hg qpush..." to
1621 # apply). This allows a loop of "...while hg qpush..." to
1624 # work as it detects an error when done
1622 # work as it detects an error when done
1625 start = self.seriesend()
1623 start = self.seriesend()
1626 if start == len(self.series):
1624 if start == len(self.series):
1627 self.ui.warn(_(b'patch series already fully applied\n'))
1625 self.ui.warn(_(b'patch series already fully applied\n'))
1628 return 1
1626 return 1
1629 if not force and not keepchanges:
1627 if not force and not keepchanges:
1630 self.checklocalchanges(repo, refresh=self.applied)
1628 self.checklocalchanges(repo, refresh=self.applied)
1631
1629
1632 if exact:
1630 if exact:
1633 if keepchanges:
1631 if keepchanges:
1634 raise error.Abort(
1632 raise error.Abort(
1635 _(b"cannot use --exact and --keep-changes together")
1633 _(b"cannot use --exact and --keep-changes together")
1636 )
1634 )
1637 if move:
1635 if move:
1638 raise error.Abort(
1636 raise error.Abort(
1639 _(b'cannot use --exact and --move together')
1637 _(b'cannot use --exact and --move together')
1640 )
1638 )
1641 if self.applied:
1639 if self.applied:
1642 raise error.Abort(
1640 raise error.Abort(
1643 _(b'cannot push --exact with applied patches')
1641 _(b'cannot push --exact with applied patches')
1644 )
1642 )
1645 root = self.series[start]
1643 root = self.series[start]
1646 target = patchheader(self.join(root), self.plainmode).parent
1644 target = patchheader(self.join(root), self.plainmode).parent
1647 if not target:
1645 if not target:
1648 raise error.Abort(
1646 raise error.Abort(
1649 _(b"%s does not have a parent recorded") % root
1647 _(b"%s does not have a parent recorded") % root
1650 )
1648 )
1651 if not repo[target] == repo[b'.']:
1649 if not repo[target] == repo[b'.']:
1652 hg.update(repo, target)
1650 hg.update(repo, target)
1653
1651
1654 if move:
1652 if move:
1655 if not patch:
1653 if not patch:
1656 raise error.Abort(_(b"please specify the patch to move"))
1654 raise error.Abort(_(b"please specify the patch to move"))
1657 for fullstart, rpn in enumerate(self.fullseries):
1655 for fullstart, rpn in enumerate(self.fullseries):
1658 # strip markers for patch guards
1656 # strip markers for patch guards
1659 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1657 if self.guard_re.split(rpn, 1)[0] == self.series[start]:
1660 break
1658 break
1661 for i, rpn in enumerate(self.fullseries[fullstart:]):
1659 for i, rpn in enumerate(self.fullseries[fullstart:]):
1662 # strip markers for patch guards
1660 # strip markers for patch guards
1663 if self.guard_re.split(rpn, 1)[0] == patch:
1661 if self.guard_re.split(rpn, 1)[0] == patch:
1664 break
1662 break
1665 index = fullstart + i
1663 index = fullstart + i
1666 assert index < len(self.fullseries)
1664 assert index < len(self.fullseries)
1667 fullpatch = self.fullseries[index]
1665 fullpatch = self.fullseries[index]
1668 del self.fullseries[index]
1666 del self.fullseries[index]
1669 self.fullseries.insert(fullstart, fullpatch)
1667 self.fullseries.insert(fullstart, fullpatch)
1670 self.parseseries()
1668 self.parseseries()
1671 self.seriesdirty = True
1669 self.seriesdirty = True
1672
1670
1673 self.applieddirty = True
1671 self.applieddirty = True
1674 if start > 0:
1672 if start > 0:
1675 self.checktoppatch(repo)
1673 self.checktoppatch(repo)
1676 if not patch:
1674 if not patch:
1677 patch = self.series[start]
1675 patch = self.series[start]
1678 end = start + 1
1676 end = start + 1
1679 else:
1677 else:
1680 end = self.series.index(patch, start) + 1
1678 end = self.series.index(patch, start) + 1
1681
1679
1682 tobackup = set()
1680 tobackup = set()
1683 if (not nobackup and force) or keepchanges:
1681 if (not nobackup and force) or keepchanges:
1684 status = self.checklocalchanges(repo, force=True)
1682 status = self.checklocalchanges(repo, force=True)
1685 if keepchanges:
1683 if keepchanges:
1686 tobackup.update(
1684 tobackup.update(
1687 status.modified
1685 status.modified
1688 + status.added
1686 + status.added
1689 + status.removed
1687 + status.removed
1690 + status.deleted
1688 + status.deleted
1691 )
1689 )
1692 else:
1690 else:
1693 tobackup.update(status.modified + status.added)
1691 tobackup.update(status.modified + status.added)
1694
1692
1695 s = self.series[start:end]
1693 s = self.series[start:end]
1696 all_files = set()
1694 all_files = set()
1697 try:
1695 try:
1698 if mergeq:
1696 if mergeq:
1699 ret = self.mergepatch(repo, mergeq, s, diffopts)
1697 ret = self.mergepatch(repo, mergeq, s, diffopts)
1700 else:
1698 else:
1701 ret = self.apply(
1699 ret = self.apply(
1702 repo,
1700 repo,
1703 s,
1701 s,
1704 list,
1702 list,
1705 all_files=all_files,
1703 all_files=all_files,
1706 tobackup=tobackup,
1704 tobackup=tobackup,
1707 keepchanges=keepchanges,
1705 keepchanges=keepchanges,
1708 )
1706 )
1709 except AbortNoCleanup:
1707 except AbortNoCleanup:
1710 raise
1708 raise
1711 except: # re-raises
1709 except: # re-raises
1712 self.ui.warn(_(b'cleaning up working directory...\n'))
1710 self.ui.warn(_(b'cleaning up working directory...\n'))
1713 cmdutil.revert(
1711 cmdutil.revert(
1714 self.ui,
1712 self.ui,
1715 repo,
1713 repo,
1716 repo[b'.'],
1714 repo[b'.'],
1717 no_backup=True,
1715 no_backup=True,
1718 )
1716 )
1719 # only remove unknown files that we know we touched or
1717 # only remove unknown files that we know we touched or
1720 # created while patching
1718 # created while patching
1721 for f in all_files:
1719 for f in all_files:
1722 if f not in repo.dirstate:
1720 if f not in repo.dirstate:
1723 repo.wvfs.unlinkpath(f, ignoremissing=True)
1721 repo.wvfs.unlinkpath(f, ignoremissing=True)
1724 self.ui.warn(_(b'done\n'))
1722 self.ui.warn(_(b'done\n'))
1725 raise
1723 raise
1726
1724
1727 if not self.applied:
1725 if not self.applied:
1728 return ret[0]
1726 return ret[0]
1729 top = self.applied[-1].name
1727 top = self.applied[-1].name
1730 if ret[0] and ret[0] > 1:
1728 if ret[0] and ret[0] > 1:
1731 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1729 msg = _(b"errors during apply, please fix and qrefresh %s\n")
1732 self.ui.write(msg % top)
1730 self.ui.write(msg % top)
1733 else:
1731 else:
1734 self.ui.write(_(b"now at: %s\n") % top)
1732 self.ui.write(_(b"now at: %s\n") % top)
1735 return ret[0]
1733 return ret[0]
1736
1734
1737 def pop(
1735 def pop(
1738 self,
1736 self,
1739 repo,
1737 repo,
1740 patch=None,
1738 patch=None,
1741 force=False,
1739 force=False,
1742 update=True,
1740 update=True,
1743 all=False,
1741 all=False,
1744 nobackup=False,
1742 nobackup=False,
1745 keepchanges=False,
1743 keepchanges=False,
1746 ):
1744 ):
1747 self.checkkeepchanges(keepchanges, force)
1745 self.checkkeepchanges(keepchanges, force)
1748 with repo.wlock():
1746 with repo.wlock():
1749 if patch:
1747 if patch:
1750 # index, rev, patch
1748 # index, rev, patch
1751 info = self.isapplied(patch)
1749 info = self.isapplied(patch)
1752 if not info:
1750 if not info:
1753 patch = self.lookup(patch)
1751 patch = self.lookup(patch)
1754 info = self.isapplied(patch)
1752 info = self.isapplied(patch)
1755 if not info:
1753 if not info:
1756 raise error.Abort(_(b"patch %s is not applied") % patch)
1754 raise error.Abort(_(b"patch %s is not applied") % patch)
1757
1755
1758 if not self.applied:
1756 if not self.applied:
1759 # Allow qpop -a to work repeatedly,
1757 # Allow qpop -a to work repeatedly,
1760 # but not qpop without an argument
1758 # but not qpop without an argument
1761 self.ui.warn(_(b"no patches applied\n"))
1759 self.ui.warn(_(b"no patches applied\n"))
1762 return not all
1760 return not all
1763
1761
1764 if all:
1762 if all:
1765 start = 0
1763 start = 0
1766 elif patch:
1764 elif patch:
1767 start = info[0] + 1
1765 start = info[0] + 1
1768 else:
1766 else:
1769 start = len(self.applied) - 1
1767 start = len(self.applied) - 1
1770
1768
1771 if start >= len(self.applied):
1769 if start >= len(self.applied):
1772 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1770 self.ui.warn(_(b"qpop: %s is already at the top\n") % patch)
1773 return
1771 return
1774
1772
1775 if not update:
1773 if not update:
1776 parents = repo.dirstate.parents()
1774 parents = repo.dirstate.parents()
1777 rr = [x.node for x in self.applied]
1775 rr = [x.node for x in self.applied]
1778 for p in parents:
1776 for p in parents:
1779 if p in rr:
1777 if p in rr:
1780 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1778 self.ui.warn(_(b"qpop: forcing dirstate update\n"))
1781 update = True
1779 update = True
1782 else:
1780 else:
1783 parents = [p.node() for p in repo[None].parents()]
1781 parents = [p.node() for p in repo[None].parents()]
1784 update = any(
1782 update = any(
1785 entry.node in parents for entry in self.applied[start:]
1783 entry.node in parents for entry in self.applied[start:]
1786 )
1784 )
1787
1785
1788 tobackup = set()
1786 tobackup = set()
1789 if update:
1787 if update:
1790 s = self.checklocalchanges(repo, force=force or keepchanges)
1788 s = self.checklocalchanges(repo, force=force or keepchanges)
1791 if force:
1789 if force:
1792 if not nobackup:
1790 if not nobackup:
1793 tobackup.update(s.modified + s.added)
1791 tobackup.update(s.modified + s.added)
1794 elif keepchanges:
1792 elif keepchanges:
1795 tobackup.update(
1793 tobackup.update(
1796 s.modified + s.added + s.removed + s.deleted
1794 s.modified + s.added + s.removed + s.deleted
1797 )
1795 )
1798
1796
1799 self.applieddirty = True
1797 self.applieddirty = True
1800 end = len(self.applied)
1798 end = len(self.applied)
1801 rev = self.applied[start].node
1799 rev = self.applied[start].node
1802
1800
1803 try:
1801 try:
1804 heads = repo.changelog.heads(rev)
1802 heads = repo.changelog.heads(rev)
1805 except error.LookupError:
1803 except error.LookupError:
1806 node = short(rev)
1804 node = short(rev)
1807 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1805 raise error.Abort(_(b'trying to pop unknown node %s') % node)
1808
1806
1809 if heads != [self.applied[-1].node]:
1807 if heads != [self.applied[-1].node]:
1810 raise error.Abort(
1808 raise error.Abort(
1811 _(
1809 _(
1812 b"popping would remove a revision not "
1810 b"popping would remove a revision not "
1813 b"managed by this patch queue"
1811 b"managed by this patch queue"
1814 )
1812 )
1815 )
1813 )
1816 if not repo[self.applied[-1].node].mutable():
1814 if not repo[self.applied[-1].node].mutable():
1817 raise error.Abort(
1815 raise error.Abort(
1818 _(b"popping would remove a public revision"),
1816 _(b"popping would remove a public revision"),
1819 hint=_(b"see 'hg help phases' for details"),
1817 hint=_(b"see 'hg help phases' for details"),
1820 )
1818 )
1821
1819
1822 # we know there are no local changes, so we can make a simplified
1820 # we know there are no local changes, so we can make a simplified
1823 # form of hg.update.
1821 # form of hg.update.
1824 if update:
1822 if update:
1825 qp = self.qparents(repo, rev)
1823 qp = self.qparents(repo, rev)
1826 ctx = repo[qp]
1824 ctx = repo[qp]
1827 st = repo.status(qp, b'.')
1825 st = repo.status(qp, b'.')
1828 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1826 m, a, r, d = st.modified, st.added, st.removed, st.deleted
1829 if d:
1827 if d:
1830 raise error.Abort(_(b"deletions found between repo revs"))
1828 raise error.Abort(_(b"deletions found between repo revs"))
1831
1829
1832 tobackup = set(a + m + r) & tobackup
1830 tobackup = set(a + m + r) & tobackup
1833 if keepchanges and tobackup:
1831 if keepchanges and tobackup:
1834 raise error.Abort(_(b"local changes found, qrefresh first"))
1832 raise error.Abort(_(b"local changes found, qrefresh first"))
1835 self.backup(repo, tobackup)
1833 self.backup(repo, tobackup)
1836 with repo.dirstate.changing_parents(repo):
1834 with repo.dirstate.changing_parents(repo):
1837 for f in a:
1835 for f in a:
1838 repo.wvfs.unlinkpath(f, ignoremissing=True)
1836 repo.wvfs.unlinkpath(f, ignoremissing=True)
1839 repo.dirstate.update_file(
1837 repo.dirstate.update_file(
1840 f, p1_tracked=False, wc_tracked=False
1838 f, p1_tracked=False, wc_tracked=False
1841 )
1839 )
1842 for f in m + r:
1840 for f in m + r:
1843 fctx = ctx[f]
1841 fctx = ctx[f]
1844 repo.wwrite(f, fctx.data(), fctx.flags())
1842 repo.wwrite(f, fctx.data(), fctx.flags())
1845 repo.dirstate.update_file(
1843 repo.dirstate.update_file(
1846 f, p1_tracked=True, wc_tracked=True
1844 f, p1_tracked=True, wc_tracked=True
1847 )
1845 )
1848 repo.setparents(qp, repo.nullid)
1846 repo.setparents(qp, repo.nullid)
1849 for patch in reversed(self.applied[start:end]):
1847 for patch in reversed(self.applied[start:end]):
1850 self.ui.status(_(b"popping %s\n") % patch.name)
1848 self.ui.status(_(b"popping %s\n") % patch.name)
1851 del self.applied[start:end]
1849 del self.applied[start:end]
1852 strip(self.ui, repo, [rev], update=False, backup=False)
1850 strip(self.ui, repo, [rev], update=False, backup=False)
1853 for s, state in repo[b'.'].substate.items():
1851 for s, state in repo[b'.'].substate.items():
1854 repo[b'.'].sub(s).get(state)
1852 repo[b'.'].sub(s).get(state)
1855 if self.applied:
1853 if self.applied:
1856 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1854 self.ui.write(_(b"now at: %s\n") % self.applied[-1].name)
1857 else:
1855 else:
1858 self.ui.write(_(b"patch queue now empty\n"))
1856 self.ui.write(_(b"patch queue now empty\n"))
1859
1857
1860 def diff(self, repo, pats, opts):
1858 def diff(self, repo, pats, opts):
1861 top, patch = self.checktoppatch(repo)
1859 top, patch = self.checktoppatch(repo)
1862 if not top:
1860 if not top:
1863 self.ui.write(_(b"no patches applied\n"))
1861 self.ui.write(_(b"no patches applied\n"))
1864 return
1862 return
1865 qp = self.qparents(repo, top)
1863 qp = self.qparents(repo, top)
1866 if opts.get(b'reverse'):
1864 if opts.get(b'reverse'):
1867 node1, node2 = None, qp
1865 node1, node2 = None, qp
1868 else:
1866 else:
1869 node1, node2 = qp, None
1867 node1, node2 = qp, None
1870 diffopts = self.diffopts(opts, patch)
1868 diffopts = self.diffopts(opts, patch)
1871 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1869 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1872
1870
1873 def refresh(self, repo, pats=None, **opts):
1871 def refresh(self, repo, pats=None, **opts):
1874 opts = pycompat.byteskwargs(opts)
1872 opts = pycompat.byteskwargs(opts)
1875 if not self.applied:
1873 if not self.applied:
1876 self.ui.write(_(b"no patches applied\n"))
1874 self.ui.write(_(b"no patches applied\n"))
1877 return 1
1875 return 1
1878 msg = opts.get(b'msg', b'').rstrip()
1876 msg = opts.get(b'msg', b'').rstrip()
1879 edit = opts.get(b'edit')
1877 edit = opts.get(b'edit')
1880 editform = opts.get(b'editform', b'mq.qrefresh')
1878 editform = opts.get(b'editform', b'mq.qrefresh')
1881 newuser = opts.get(b'user')
1879 newuser = opts.get(b'user')
1882 newdate = opts.get(b'date')
1880 newdate = opts.get(b'date')
1883 if newdate:
1881 if newdate:
1884 newdate = b'%d %d' % dateutil.parsedate(newdate)
1882 newdate = b'%d %d' % dateutil.parsedate(newdate)
1885 wlock = repo.wlock()
1883 wlock = repo.wlock()
1886
1884
1887 try:
1885 try:
1888 self.checktoppatch(repo)
1886 self.checktoppatch(repo)
1889 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1887 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1890 if repo.changelog.heads(top) != [top]:
1888 if repo.changelog.heads(top) != [top]:
1891 raise error.Abort(
1889 raise error.Abort(
1892 _(b"cannot qrefresh a revision with children")
1890 _(b"cannot qrefresh a revision with children")
1893 )
1891 )
1894 if not repo[top].mutable():
1892 if not repo[top].mutable():
1895 raise error.Abort(
1893 raise error.Abort(
1896 _(b"cannot qrefresh public revision"),
1894 _(b"cannot qrefresh public revision"),
1897 hint=_(b"see 'hg help phases' for details"),
1895 hint=_(b"see 'hg help phases' for details"),
1898 )
1896 )
1899
1897
1900 cparents = repo.changelog.parents(top)
1898 cparents = repo.changelog.parents(top)
1901 patchparent = self.qparents(repo, top)
1899 patchparent = self.qparents(repo, top)
1902
1900
1903 inclsubs = checksubstate(repo, patchparent)
1901 inclsubs = checksubstate(repo, patchparent)
1904 if inclsubs:
1902 if inclsubs:
1905 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1903 substatestate = repo.dirstate.get_entry(b'.hgsubstate')
1906
1904
1907 ph = patchheader(self.join(patchfn), self.plainmode)
1905 ph = patchheader(self.join(patchfn), self.plainmode)
1908 diffopts = self.diffopts(
1906 diffopts = self.diffopts(
1909 {b'git': opts.get(b'git')}, patchfn, plain=True
1907 {b'git': opts.get(b'git')}, patchfn, plain=True
1910 )
1908 )
1911 if newuser:
1909 if newuser:
1912 ph.setuser(newuser)
1910 ph.setuser(newuser)
1913 if newdate:
1911 if newdate:
1914 ph.setdate(newdate)
1912 ph.setdate(newdate)
1915 ph.setparent(hex(patchparent))
1913 ph.setparent(hex(patchparent))
1916
1914
1917 # only commit new patch when write is complete
1915 # only commit new patch when write is complete
1918 patchf = self.opener(patchfn, b'w', atomictemp=True)
1916 patchf = self.opener(patchfn, b'w', atomictemp=True)
1919
1917
1920 # update the dirstate in place, strip off the qtip commit
1918 # update the dirstate in place, strip off the qtip commit
1921 # and then commit.
1919 # and then commit.
1922 #
1920 #
1923 # this should really read:
1921 # this should really read:
1924 # st = repo.status(top, patchparent)
1922 # st = repo.status(top, patchparent)
1925 # but we do it backwards to take advantage of manifest/changelog
1923 # but we do it backwards to take advantage of manifest/changelog
1926 # caching against the next repo.status call
1924 # caching against the next repo.status call
1927 st = repo.status(patchparent, top)
1925 st = repo.status(patchparent, top)
1928 mm, aa, dd = st.modified, st.added, st.removed
1926 mm, aa, dd = st.modified, st.added, st.removed
1929 ctx = repo[top]
1927 ctx = repo[top]
1930 aaa = aa[:]
1928 aaa = aa[:]
1931 match1 = scmutil.match(repo[None], pats, opts)
1929 match1 = scmutil.match(repo[None], pats, opts)
1932 # in short mode, we only diff the files included in the
1930 # in short mode, we only diff the files included in the
1933 # patch already plus specified files
1931 # patch already plus specified files
1934 if opts.get(b'short'):
1932 if opts.get(b'short'):
1935 # if amending a patch, we start with existing
1933 # if amending a patch, we start with existing
1936 # files plus specified files - unfiltered
1934 # files plus specified files - unfiltered
1937 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1935 match = scmutil.matchfiles(repo, mm + aa + dd + match1.files())
1938 # filter with include/exclude options
1936 # filter with include/exclude options
1939 match1 = scmutil.match(repo[None], opts=opts)
1937 match1 = scmutil.match(repo[None], opts=opts)
1940 else:
1938 else:
1941 match = scmutil.matchall(repo)
1939 match = scmutil.matchall(repo)
1942 stb = repo.status(match=match)
1940 stb = repo.status(match=match)
1943 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1941 m, a, r, d = stb.modified, stb.added, stb.removed, stb.deleted
1944 mm = set(mm)
1942 mm = set(mm)
1945 aa = set(aa)
1943 aa = set(aa)
1946 dd = set(dd)
1944 dd = set(dd)
1947
1945
1948 # we might end up with files that were added between
1946 # we might end up with files that were added between
1949 # qtip and the dirstate parent, but then changed in the
1947 # qtip and the dirstate parent, but then changed in the
1950 # local dirstate. in this case, we want them to only
1948 # local dirstate. in this case, we want them to only
1951 # show up in the added section
1949 # show up in the added section
1952 for x in m:
1950 for x in m:
1953 if x not in aa:
1951 if x not in aa:
1954 mm.add(x)
1952 mm.add(x)
1955 # we might end up with files added by the local dirstate that
1953 # we might end up with files added by the local dirstate that
1956 # were deleted by the patch. In this case, they should only
1954 # were deleted by the patch. In this case, they should only
1957 # show up in the changed section.
1955 # show up in the changed section.
1958 for x in a:
1956 for x in a:
1959 if x in dd:
1957 if x in dd:
1960 dd.remove(x)
1958 dd.remove(x)
1961 mm.add(x)
1959 mm.add(x)
1962 else:
1960 else:
1963 aa.add(x)
1961 aa.add(x)
1964 # make sure any files deleted in the local dirstate
1962 # make sure any files deleted in the local dirstate
1965 # are not in the add or change column of the patch
1963 # are not in the add or change column of the patch
1966 forget = []
1964 forget = []
1967 for x in d + r:
1965 for x in d + r:
1968 if x in aa:
1966 if x in aa:
1969 aa.remove(x)
1967 aa.remove(x)
1970 forget.append(x)
1968 forget.append(x)
1971 continue
1969 continue
1972 else:
1970 else:
1973 mm.discard(x)
1971 mm.discard(x)
1974 dd.add(x)
1972 dd.add(x)
1975
1973
1976 m = list(mm)
1974 m = list(mm)
1977 r = list(dd)
1975 r = list(dd)
1978 a = list(aa)
1976 a = list(aa)
1979
1977
1980 # create 'match' that includes the files to be recommitted.
1978 # create 'match' that includes the files to be recommitted.
1981 # apply match1 via repo.status to ensure correct case handling.
1979 # apply match1 via repo.status to ensure correct case handling.
1982 st = repo.status(patchparent, match=match1)
1980 st = repo.status(patchparent, match=match1)
1983 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1981 cm, ca, cr, cd = st.modified, st.added, st.removed, st.deleted
1984 allmatches = set(cm + ca + cr + cd)
1982 allmatches = set(cm + ca + cr + cd)
1985 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1983 refreshchanges = [x.intersection(allmatches) for x in (mm, aa, dd)]
1986
1984
1987 files = set(inclsubs)
1985 files = set(inclsubs)
1988 for x in refreshchanges:
1986 for x in refreshchanges:
1989 files.update(x)
1987 files.update(x)
1990 match = scmutil.matchfiles(repo, files)
1988 match = scmutil.matchfiles(repo, files)
1991
1989
1992 bmlist = repo[top].bookmarks()
1990 bmlist = repo[top].bookmarks()
1993
1991
1994 with repo.dirstate.changing_parents(repo):
1992 with repo.dirstate.changing_parents(repo):
1995 if diffopts.git or diffopts.upgrade:
1993 if diffopts.git or diffopts.upgrade:
1996 copies = {}
1994 copies = {}
1997 for dst in a:
1995 for dst in a:
1998 src = repo.dirstate.copied(dst)
1996 src = repo.dirstate.copied(dst)
1999 # during qfold, the source file for copies may
1997 # during qfold, the source file for copies may
2000 # be removed. Treat this as a simple add.
1998 # be removed. Treat this as a simple add.
2001 if src is not None and src in repo.dirstate:
1999 if src is not None and src in repo.dirstate:
2002 copies.setdefault(src, []).append(dst)
2000 copies.setdefault(src, []).append(dst)
2003 repo.dirstate.update_file(
2001 repo.dirstate.update_file(
2004 dst, p1_tracked=False, wc_tracked=True
2002 dst, p1_tracked=False, wc_tracked=True
2005 )
2003 )
2006 # remember the copies between patchparent and qtip
2004 # remember the copies between patchparent and qtip
2007 for dst in aaa:
2005 for dst in aaa:
2008 src = ctx[dst].copysource()
2006 src = ctx[dst].copysource()
2009 if src:
2007 if src:
2010 copies.setdefault(src, []).extend(
2008 copies.setdefault(src, []).extend(
2011 copies.get(dst, [])
2009 copies.get(dst, [])
2012 )
2010 )
2013 if dst in a:
2011 if dst in a:
2014 copies[src].append(dst)
2012 copies[src].append(dst)
2015 # we can't copy a file created by the patch itself
2013 # we can't copy a file created by the patch itself
2016 if dst in copies:
2014 if dst in copies:
2017 del copies[dst]
2015 del copies[dst]
2018 for src, dsts in copies.items():
2016 for src, dsts in copies.items():
2019 for dst in dsts:
2017 for dst in dsts:
2020 repo.dirstate.copy(src, dst)
2018 repo.dirstate.copy(src, dst)
2021 else:
2019 else:
2022 for dst in a:
2020 for dst in a:
2023 repo.dirstate.update_file(
2021 repo.dirstate.update_file(
2024 dst, p1_tracked=False, wc_tracked=True
2022 dst, p1_tracked=False, wc_tracked=True
2025 )
2023 )
2026 # Drop useless copy information
2024 # Drop useless copy information
2027 for f in list(repo.dirstate.copies()):
2025 for f in list(repo.dirstate.copies()):
2028 repo.dirstate.copy(None, f)
2026 repo.dirstate.copy(None, f)
2029 for f in r:
2027 for f in r:
2030 repo.dirstate.update_file_p1(f, p1_tracked=True)
2028 repo.dirstate.update_file_p1(f, p1_tracked=True)
2031 # if the patch excludes a modified file, mark that
2029 # if the patch excludes a modified file, mark that
2032 # file with mtime=0 so status can see it.
2030 # file with mtime=0 so status can see it.
2033 mm = []
2031 mm = []
2034 for i in range(len(m) - 1, -1, -1):
2032 for i in range(len(m) - 1, -1, -1):
2035 if not match1(m[i]):
2033 if not match1(m[i]):
2036 mm.append(m[i])
2034 mm.append(m[i])
2037 del m[i]
2035 del m[i]
2038 for f in m:
2036 for f in m:
2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2037 repo.dirstate.update_file_p1(f, p1_tracked=True)
2040 for f in mm:
2038 for f in mm:
2041 repo.dirstate.update_file_p1(f, p1_tracked=True)
2039 repo.dirstate.update_file_p1(f, p1_tracked=True)
2042 for f in forget:
2040 for f in forget:
2043 repo.dirstate.update_file_p1(f, p1_tracked=False)
2041 repo.dirstate.update_file_p1(f, p1_tracked=False)
2044
2042
2045 user = ph.user or ctx.user()
2043 user = ph.user or ctx.user()
2046
2044
2047 oldphase = repo[top].phase()
2045 oldphase = repo[top].phase()
2048
2046
2049 # assumes strip can roll itself back if interrupted
2047 # assumes strip can roll itself back if interrupted
2050 repo.setparents(*cparents)
2048 repo.setparents(*cparents)
2051 repo.dirstate.write(repo.currenttransaction())
2049 repo.dirstate.write(repo.currenttransaction())
2052 self.applied.pop()
2050 self.applied.pop()
2053 self.applieddirty = True
2051 self.applieddirty = True
2054 strip(self.ui, repo, [top], update=False, backup=False)
2052 strip(self.ui, repo, [top], update=False, backup=False)
2055
2053
2056 try:
2054 try:
2057 # might be nice to attempt to roll back strip after this
2055 # might be nice to attempt to roll back strip after this
2058
2056
2059 defaultmsg = b"[mq]: %s" % patchfn
2057 defaultmsg = b"[mq]: %s" % patchfn
2060 editor = cmdutil.getcommiteditor(editform=editform)
2058 editor = cmdutil.getcommiteditor(editform=editform)
2061 if edit:
2059 if edit:
2062
2060
2063 def finishdesc(desc):
2061 def finishdesc(desc):
2064 if desc.rstrip():
2062 if desc.rstrip():
2065 ph.setmessage(desc)
2063 ph.setmessage(desc)
2066 return desc
2064 return desc
2067 return defaultmsg
2065 return defaultmsg
2068
2066
2069 # i18n: this message is shown in editor with "HG: " prefix
2067 # i18n: this message is shown in editor with "HG: " prefix
2070 extramsg = _(b'Leave message empty to use default message.')
2068 extramsg = _(b'Leave message empty to use default message.')
2071 editor = cmdutil.getcommiteditor(
2069 editor = cmdutil.getcommiteditor(
2072 finishdesc=finishdesc,
2070 finishdesc=finishdesc,
2073 extramsg=extramsg,
2071 extramsg=extramsg,
2074 editform=editform,
2072 editform=editform,
2075 )
2073 )
2076 message = msg or b"\n".join(ph.message)
2074 message = msg or b"\n".join(ph.message)
2077 elif not msg:
2075 elif not msg:
2078 if not ph.message:
2076 if not ph.message:
2079 message = defaultmsg
2077 message = defaultmsg
2080 else:
2078 else:
2081 message = b"\n".join(ph.message)
2079 message = b"\n".join(ph.message)
2082 else:
2080 else:
2083 message = msg
2081 message = msg
2084 ph.setmessage(msg)
2082 ph.setmessage(msg)
2085
2083
2086 # Ensure we create a new changeset in the same phase than
2084 # Ensure we create a new changeset in the same phase than
2087 # the old one.
2085 # the old one.
2088 lock = tr = None
2086 lock = tr = None
2089 try:
2087 try:
2090 lock = repo.lock()
2088 lock = repo.lock()
2091 tr = repo.transaction(b'mq')
2089 tr = repo.transaction(b'mq')
2092 n = newcommit(
2090 n = newcommit(
2093 repo,
2091 repo,
2094 oldphase,
2092 oldphase,
2095 message,
2093 message,
2096 user,
2094 user,
2097 ph.date,
2095 ph.date,
2098 match=match,
2096 match=match,
2099 force=True,
2097 force=True,
2100 editor=editor,
2098 editor=editor,
2101 )
2099 )
2102 # only write patch after a successful commit
2100 # only write patch after a successful commit
2103 c = [list(x) for x in refreshchanges]
2101 c = [list(x) for x in refreshchanges]
2104 if inclsubs:
2102 if inclsubs:
2105 self.putsubstate2changes(substatestate, c)
2103 self.putsubstate2changes(substatestate, c)
2106 chunks = patchmod.diff(
2104 chunks = patchmod.diff(
2107 repo, patchparent, changes=c, opts=diffopts
2105 repo, patchparent, changes=c, opts=diffopts
2108 )
2106 )
2109 comments = bytes(ph)
2107 comments = bytes(ph)
2110 if comments:
2108 if comments:
2111 patchf.write(comments)
2109 patchf.write(comments)
2112 for chunk in chunks:
2110 for chunk in chunks:
2113 patchf.write(chunk)
2111 patchf.write(chunk)
2114 patchf.close()
2112 patchf.close()
2115
2113
2116 marks = repo._bookmarks
2114 marks = repo._bookmarks
2117 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2115 marks.applychanges(repo, tr, [(bm, n) for bm in bmlist])
2118 tr.close()
2116 tr.close()
2119
2117
2120 self.applied.append(statusentry(n, patchfn))
2118 self.applied.append(statusentry(n, patchfn))
2121 finally:
2119 finally:
2122 lockmod.release(tr, lock)
2120 lockmod.release(tr, lock)
2123 except: # re-raises
2121 except: # re-raises
2124 with repo.dirstate.changing_parents(repo):
2122 with repo.dirstate.changing_parents(repo):
2125 ctx = repo[cparents[0]]
2123 ctx = repo[cparents[0]]
2126 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2124 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
2127 self.savedirty()
2125 self.savedirty()
2128 self.ui.warn(
2126 self.ui.warn(
2129 _(
2127 _(
2130 b'qrefresh interrupted while patch was popped! '
2128 b'qrefresh interrupted while patch was popped! '
2131 b'(revert --all, qpush to recover)\n'
2129 b'(revert --all, qpush to recover)\n'
2132 )
2130 )
2133 )
2131 )
2134 raise
2132 raise
2135 finally:
2133 finally:
2136 wlock.release()
2134 wlock.release()
2137 self.removeundo(repo)
2135 self.removeundo(repo)
2138
2136
2139 def init(self, repo, create=False):
2137 def init(self, repo, create=False):
2140 if not create and os.path.isdir(self.path):
2138 if not create and os.path.isdir(self.path):
2141 raise error.Abort(_(b"patch queue directory already exists"))
2139 raise error.Abort(_(b"patch queue directory already exists"))
2142 try:
2140 try:
2143 os.mkdir(self.path)
2141 os.mkdir(self.path)
2144 except FileExistsError:
2142 except FileExistsError:
2145 if not create:
2143 if not create:
2146 raise
2144 raise
2147 if create:
2145 if create:
2148 return self.qrepo(create=True)
2146 return self.qrepo(create=True)
2149
2147
2150 def unapplied(self, repo, patch=None):
2148 def unapplied(self, repo, patch=None):
2151 if patch and patch not in self.series:
2149 if patch and patch not in self.series:
2152 raise error.Abort(_(b"patch %s is not in series file") % patch)
2150 raise error.Abort(_(b"patch %s is not in series file") % patch)
2153 if not patch:
2151 if not patch:
2154 start = self.seriesend()
2152 start = self.seriesend()
2155 else:
2153 else:
2156 start = self.series.index(patch) + 1
2154 start = self.series.index(patch) + 1
2157 unapplied = []
2155 unapplied = []
2158 for i in range(start, len(self.series)):
2156 for i in range(start, len(self.series)):
2159 pushable, reason = self.pushable(i)
2157 pushable, reason = self.pushable(i)
2160 if pushable:
2158 if pushable:
2161 unapplied.append((i, self.series[i]))
2159 unapplied.append((i, self.series[i]))
2162 self.explainpushable(i)
2160 self.explainpushable(i)
2163 return unapplied
2161 return unapplied
2164
2162
2165 def qseries(
2163 def qseries(
2166 self,
2164 self,
2167 repo,
2165 repo,
2168 missing=None,
2166 missing=None,
2169 start=0,
2167 start=0,
2170 length=None,
2168 length=None,
2171 status=None,
2169 status=None,
2172 summary=False,
2170 summary=False,
2173 ):
2171 ):
2174 def displayname(pfx, patchname, state):
2172 def displayname(pfx, patchname, state):
2175 if pfx:
2173 if pfx:
2176 self.ui.write(pfx)
2174 self.ui.write(pfx)
2177 if summary:
2175 if summary:
2178 ph = patchheader(self.join(patchname), self.plainmode)
2176 ph = patchheader(self.join(patchname), self.plainmode)
2179 if ph.message:
2177 if ph.message:
2180 msg = ph.message[0]
2178 msg = ph.message[0]
2181 else:
2179 else:
2182 msg = b''
2180 msg = b''
2183
2181
2184 if self.ui.formatted():
2182 if self.ui.formatted():
2185 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2183 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
2186 if width > 0:
2184 if width > 0:
2187 msg = stringutil.ellipsis(msg, width)
2185 msg = stringutil.ellipsis(msg, width)
2188 else:
2186 else:
2189 msg = b''
2187 msg = b''
2190 self.ui.write(patchname, label=b'qseries.' + state)
2188 self.ui.write(patchname, label=b'qseries.' + state)
2191 self.ui.write(b': ')
2189 self.ui.write(b': ')
2192 self.ui.write(msg, label=b'qseries.message.' + state)
2190 self.ui.write(msg, label=b'qseries.message.' + state)
2193 else:
2191 else:
2194 self.ui.write(patchname, label=b'qseries.' + state)
2192 self.ui.write(patchname, label=b'qseries.' + state)
2195 self.ui.write(b'\n')
2193 self.ui.write(b'\n')
2196
2194
2197 applied = {p.name for p in self.applied}
2195 applied = {p.name for p in self.applied}
2198 if length is None:
2196 if length is None:
2199 length = len(self.series) - start
2197 length = len(self.series) - start
2200 if not missing:
2198 if not missing:
2201 if self.ui.verbose:
2199 if self.ui.verbose:
2202 idxwidth = len(b"%d" % (start + length - 1))
2200 idxwidth = len(b"%d" % (start + length - 1))
2203 for i in range(start, start + length):
2201 for i in range(start, start + length):
2204 patch = self.series[i]
2202 patch = self.series[i]
2205 if patch in applied:
2203 if patch in applied:
2206 char, state = b'A', b'applied'
2204 char, state = b'A', b'applied'
2207 elif self.pushable(i)[0]:
2205 elif self.pushable(i)[0]:
2208 char, state = b'U', b'unapplied'
2206 char, state = b'U', b'unapplied'
2209 else:
2207 else:
2210 char, state = b'G', b'guarded'
2208 char, state = b'G', b'guarded'
2211 pfx = b''
2209 pfx = b''
2212 if self.ui.verbose:
2210 if self.ui.verbose:
2213 pfx = b'%*d %s ' % (idxwidth, i, char)
2211 pfx = b'%*d %s ' % (idxwidth, i, char)
2214 elif status and status != char:
2212 elif status and status != char:
2215 continue
2213 continue
2216 displayname(pfx, patch, state)
2214 displayname(pfx, patch, state)
2217 else:
2215 else:
2218 msng_list = []
2216 msng_list = []
2219 for root, dirs, files in os.walk(self.path):
2217 for root, dirs, files in os.walk(self.path):
2220 d = root[len(self.path) + 1 :]
2218 d = root[len(self.path) + 1 :]
2221 for f in files:
2219 for f in files:
2222 fl = os.path.join(d, f)
2220 fl = os.path.join(d, f)
2223 if (
2221 if (
2224 fl not in self.series
2222 fl not in self.series
2225 and fl
2223 and fl
2226 not in (
2224 not in (
2227 self.statuspath,
2225 self.statuspath,
2228 self.seriespath,
2226 self.seriespath,
2229 self.guardspath,
2227 self.guardspath,
2230 )
2228 )
2231 and not fl.startswith(b'.')
2229 and not fl.startswith(b'.')
2232 ):
2230 ):
2233 msng_list.append(fl)
2231 msng_list.append(fl)
2234 for x in sorted(msng_list):
2232 for x in sorted(msng_list):
2235 pfx = self.ui.verbose and b'D ' or b''
2233 pfx = self.ui.verbose and b'D ' or b''
2236 displayname(pfx, x, b'missing')
2234 displayname(pfx, x, b'missing')
2237
2235
2238 def issaveline(self, l):
2236 def issaveline(self, l):
2239 if l.name == b'.hg.patches.save.line':
2237 if l.name == b'.hg.patches.save.line':
2240 return True
2238 return True
2241
2239
2242 def qrepo(self, create=False):
2240 def qrepo(self, create=False):
2243 ui = self.baseui.copy()
2241 ui = self.baseui.copy()
2244 # copy back attributes set by ui.pager()
2242 # copy back attributes set by ui.pager()
2245 if self.ui.pageractive and not ui.pageractive:
2243 if self.ui.pageractive and not ui.pageractive:
2246 ui.pageractive = self.ui.pageractive
2244 ui.pageractive = self.ui.pageractive
2247 # internal config: ui.formatted
2245 # internal config: ui.formatted
2248 ui.setconfig(
2246 ui.setconfig(
2249 b'ui',
2247 b'ui',
2250 b'formatted',
2248 b'formatted',
2251 self.ui.config(b'ui', b'formatted'),
2249 self.ui.config(b'ui', b'formatted'),
2252 b'mqpager',
2250 b'mqpager',
2253 )
2251 )
2254 ui.setconfig(
2252 ui.setconfig(
2255 b'ui',
2253 b'ui',
2256 b'interactive',
2254 b'interactive',
2257 self.ui.config(b'ui', b'interactive'),
2255 self.ui.config(b'ui', b'interactive'),
2258 b'mqpager',
2256 b'mqpager',
2259 )
2257 )
2260 if create or os.path.isdir(self.join(b".hg")):
2258 if create or os.path.isdir(self.join(b".hg")):
2261 return hg.repository(ui, path=self.path, create=create)
2259 return hg.repository(ui, path=self.path, create=create)
2262
2260
2263 def restore(self, repo, rev, delete=None, qupdate=None):
2261 def restore(self, repo, rev, delete=None, qupdate=None):
2264 desc = repo[rev].description().strip()
2262 desc = repo[rev].description().strip()
2265 lines = desc.splitlines()
2263 lines = desc.splitlines()
2266 datastart = None
2264 datastart = None
2267 series = []
2265 series = []
2268 applied = []
2266 applied = []
2269 qpp = None
2267 qpp = None
2270 for i, line in enumerate(lines):
2268 for i, line in enumerate(lines):
2271 if line == b'Patch Data:':
2269 if line == b'Patch Data:':
2272 datastart = i + 1
2270 datastart = i + 1
2273 elif line.startswith(b'Dirstate:'):
2271 elif line.startswith(b'Dirstate:'):
2274 l = line.rstrip()
2272 l = line.rstrip()
2275 l = l[10:].split(b' ')
2273 l = l[10:].split(b' ')
2276 qpp = [bin(x) for x in l]
2274 qpp = [bin(x) for x in l]
2277 elif datastart is not None:
2275 elif datastart is not None:
2278 l = line.rstrip()
2276 l = line.rstrip()
2279 n, name = l.split(b':', 1)
2277 n, name = l.split(b':', 1)
2280 if n:
2278 if n:
2281 applied.append(statusentry(bin(n), name))
2279 applied.append(statusentry(bin(n), name))
2282 else:
2280 else:
2283 series.append(l)
2281 series.append(l)
2284 if datastart is None:
2282 if datastart is None:
2285 self.ui.warn(_(b"no saved patch data found\n"))
2283 self.ui.warn(_(b"no saved patch data found\n"))
2286 return 1
2284 return 1
2287 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2285 self.ui.warn(_(b"restoring status: %s\n") % lines[0])
2288 self.fullseries = series
2286 self.fullseries = series
2289 self.applied = applied
2287 self.applied = applied
2290 self.parseseries()
2288 self.parseseries()
2291 self.seriesdirty = True
2289 self.seriesdirty = True
2292 self.applieddirty = True
2290 self.applieddirty = True
2293 heads = repo.changelog.heads()
2291 heads = repo.changelog.heads()
2294 if delete:
2292 if delete:
2295 if rev not in heads:
2293 if rev not in heads:
2296 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2294 self.ui.warn(_(b"save entry has children, leaving it alone\n"))
2297 else:
2295 else:
2298 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2296 self.ui.warn(_(b"removing save entry %s\n") % short(rev))
2299 pp = repo.dirstate.parents()
2297 pp = repo.dirstate.parents()
2300 if rev in pp:
2298 if rev in pp:
2301 update = True
2299 update = True
2302 else:
2300 else:
2303 update = False
2301 update = False
2304 strip(self.ui, repo, [rev], update=update, backup=False)
2302 strip(self.ui, repo, [rev], update=update, backup=False)
2305 if qpp:
2303 if qpp:
2306 self.ui.warn(
2304 self.ui.warn(
2307 _(b"saved queue repository parents: %s %s\n")
2305 _(b"saved queue repository parents: %s %s\n")
2308 % (short(qpp[0]), short(qpp[1]))
2306 % (short(qpp[0]), short(qpp[1]))
2309 )
2307 )
2310 if qupdate:
2308 if qupdate:
2311 self.ui.status(_(b"updating queue directory\n"))
2309 self.ui.status(_(b"updating queue directory\n"))
2312 r = self.qrepo()
2310 r = self.qrepo()
2313 if not r:
2311 if not r:
2314 self.ui.warn(_(b"unable to load queue repository\n"))
2312 self.ui.warn(_(b"unable to load queue repository\n"))
2315 return 1
2313 return 1
2316 hg.clean(r, qpp[0])
2314 hg.clean(r, qpp[0])
2317
2315
2318 def save(self, repo, msg=None):
2316 def save(self, repo, msg=None):
2319 if not self.applied:
2317 if not self.applied:
2320 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2318 self.ui.warn(_(b"save: no patches applied, exiting\n"))
2321 return 1
2319 return 1
2322 if self.issaveline(self.applied[-1]):
2320 if self.issaveline(self.applied[-1]):
2323 self.ui.warn(_(b"status is already saved\n"))
2321 self.ui.warn(_(b"status is already saved\n"))
2324 return 1
2322 return 1
2325
2323
2326 if not msg:
2324 if not msg:
2327 msg = _(b"hg patches saved state")
2325 msg = _(b"hg patches saved state")
2328 else:
2326 else:
2329 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2327 msg = b"hg patches: " + msg.rstrip(b'\r\n')
2330 r = self.qrepo()
2328 r = self.qrepo()
2331 if r:
2329 if r:
2332 pp = r.dirstate.parents()
2330 pp = r.dirstate.parents()
2333 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2331 msg += b"\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
2334 msg += b"\n\nPatch Data:\n"
2332 msg += b"\n\nPatch Data:\n"
2335 msg += b''.join(b'%s\n' % x for x in self.applied)
2333 msg += b''.join(b'%s\n' % x for x in self.applied)
2336 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2334 msg += b''.join(b':%s\n' % x for x in self.fullseries)
2337 n = repo.commit(msg, force=True)
2335 n = repo.commit(msg, force=True)
2338 if not n:
2336 if not n:
2339 self.ui.warn(_(b"repo commit failed\n"))
2337 self.ui.warn(_(b"repo commit failed\n"))
2340 return 1
2338 return 1
2341 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2339 self.applied.append(statusentry(n, b'.hg.patches.save.line'))
2342 self.applieddirty = True
2340 self.applieddirty = True
2343 self.removeundo(repo)
2341 self.removeundo(repo)
2344
2342
2345 def fullseriesend(self):
2343 def fullseriesend(self):
2346 if self.applied:
2344 if self.applied:
2347 p = self.applied[-1].name
2345 p = self.applied[-1].name
2348 end = self.findseries(p)
2346 end = self.findseries(p)
2349 if end is None:
2347 if end is None:
2350 return len(self.fullseries)
2348 return len(self.fullseries)
2351 return end + 1
2349 return end + 1
2352 return 0
2350 return 0
2353
2351
2354 def seriesend(self, all_patches=False):
2352 def seriesend(self, all_patches=False):
2355 """If all_patches is False, return the index of the next pushable patch
2353 """If all_patches is False, return the index of the next pushable patch
2356 in the series, or the series length. If all_patches is True, return the
2354 in the series, or the series length. If all_patches is True, return the
2357 index of the first patch past the last applied one.
2355 index of the first patch past the last applied one.
2358 """
2356 """
2359 end = 0
2357 end = 0
2360
2358
2361 def nextpatch(start):
2359 def nextpatch(start):
2362 if all_patches or start >= len(self.series):
2360 if all_patches or start >= len(self.series):
2363 return start
2361 return start
2364 for i in range(start, len(self.series)):
2362 for i in range(start, len(self.series)):
2365 p, reason = self.pushable(i)
2363 p, reason = self.pushable(i)
2366 if p:
2364 if p:
2367 return i
2365 return i
2368 self.explainpushable(i)
2366 self.explainpushable(i)
2369 return len(self.series)
2367 return len(self.series)
2370
2368
2371 if self.applied:
2369 if self.applied:
2372 p = self.applied[-1].name
2370 p = self.applied[-1].name
2373 try:
2371 try:
2374 end = self.series.index(p)
2372 end = self.series.index(p)
2375 except ValueError:
2373 except ValueError:
2376 return 0
2374 return 0
2377 return nextpatch(end + 1)
2375 return nextpatch(end + 1)
2378 return nextpatch(end)
2376 return nextpatch(end)
2379
2377
2380 def appliedname(self, index):
2378 def appliedname(self, index):
2381 pname = self.applied[index].name
2379 pname = self.applied[index].name
2382 if not self.ui.verbose:
2380 if not self.ui.verbose:
2383 p = pname
2381 p = pname
2384 else:
2382 else:
2385 p = (b"%d" % self.series.index(pname)) + b" " + pname
2383 p = (b"%d" % self.series.index(pname)) + b" " + pname
2386 return p
2384 return p
2387
2385
2388 def qimport(
2386 def qimport(
2389 self,
2387 self,
2390 repo,
2388 repo,
2391 files,
2389 files,
2392 patchname=None,
2390 patchname=None,
2393 rev=None,
2391 rev=None,
2394 existing=None,
2392 existing=None,
2395 force=None,
2393 force=None,
2396 git=False,
2394 git=False,
2397 ):
2395 ):
2398 def checkseries(patchname):
2396 def checkseries(patchname):
2399 if patchname in self.series:
2397 if patchname in self.series:
2400 raise error.Abort(
2398 raise error.Abort(
2401 _(b'patch %s is already in the series file') % patchname
2399 _(b'patch %s is already in the series file') % patchname
2402 )
2400 )
2403
2401
2404 if rev:
2402 if rev:
2405 if files:
2403 if files:
2406 raise error.Abort(
2404 raise error.Abort(
2407 _(b'option "-r" not valid when importing files')
2405 _(b'option "-r" not valid when importing files')
2408 )
2406 )
2409 rev = logcmdutil.revrange(repo, rev)
2407 rev = logcmdutil.revrange(repo, rev)
2410 rev.sort(reverse=True)
2408 rev.sort(reverse=True)
2411 elif not files:
2409 elif not files:
2412 raise error.Abort(_(b'no files or revisions specified'))
2410 raise error.Abort(_(b'no files or revisions specified'))
2413 if (len(files) > 1 or len(rev) > 1) and patchname:
2411 if (len(files) > 1 or len(rev) > 1) and patchname:
2414 raise error.Abort(
2412 raise error.Abort(
2415 _(b'option "-n" not valid when importing multiple patches')
2413 _(b'option "-n" not valid when importing multiple patches')
2416 )
2414 )
2417 imported = []
2415 imported = []
2418 if rev:
2416 if rev:
2419 # If mq patches are applied, we can only import revisions
2417 # If mq patches are applied, we can only import revisions
2420 # that form a linear path to qbase.
2418 # that form a linear path to qbase.
2421 # Otherwise, they should form a linear path to a head.
2419 # Otherwise, they should form a linear path to a head.
2422 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2420 heads = repo.changelog.heads(repo.changelog.node(rev.first()))
2423 if len(heads) > 1:
2421 if len(heads) > 1:
2424 raise error.Abort(
2422 raise error.Abort(
2425 _(b'revision %d is the root of more than one branch')
2423 _(b'revision %d is the root of more than one branch')
2426 % rev.last()
2424 % rev.last()
2427 )
2425 )
2428 if self.applied:
2426 if self.applied:
2429 base = repo.changelog.node(rev.first())
2427 base = repo.changelog.node(rev.first())
2430 if base in [n.node for n in self.applied]:
2428 if base in [n.node for n in self.applied]:
2431 raise error.Abort(
2429 raise error.Abort(
2432 _(b'revision %d is already managed') % rev.first()
2430 _(b'revision %d is already managed') % rev.first()
2433 )
2431 )
2434 if heads != [self.applied[-1].node]:
2432 if heads != [self.applied[-1].node]:
2435 raise error.Abort(
2433 raise error.Abort(
2436 _(b'revision %d is not the parent of the queue')
2434 _(b'revision %d is not the parent of the queue')
2437 % rev.first()
2435 % rev.first()
2438 )
2436 )
2439 base = repo.changelog.rev(self.applied[0].node)
2437 base = repo.changelog.rev(self.applied[0].node)
2440 lastparent = repo.changelog.parentrevs(base)[0]
2438 lastparent = repo.changelog.parentrevs(base)[0]
2441 else:
2439 else:
2442 if heads != [repo.changelog.node(rev.first())]:
2440 if heads != [repo.changelog.node(rev.first())]:
2443 raise error.Abort(
2441 raise error.Abort(
2444 _(b'revision %d has unmanaged children') % rev.first()
2442 _(b'revision %d has unmanaged children') % rev.first()
2445 )
2443 )
2446 lastparent = None
2444 lastparent = None
2447
2445
2448 diffopts = self.diffopts({b'git': git})
2446 diffopts = self.diffopts({b'git': git})
2449 with repo.transaction(b'qimport') as tr:
2447 with repo.transaction(b'qimport') as tr:
2450 for r in rev:
2448 for r in rev:
2451 if not repo[r].mutable():
2449 if not repo[r].mutable():
2452 raise error.Abort(
2450 raise error.Abort(
2453 _(b'revision %d is not mutable') % r,
2451 _(b'revision %d is not mutable') % r,
2454 hint=_(b"see 'hg help phases' " b'for details'),
2452 hint=_(b"see 'hg help phases' " b'for details'),
2455 )
2453 )
2456 p1, p2 = repo.changelog.parentrevs(r)
2454 p1, p2 = repo.changelog.parentrevs(r)
2457 n = repo.changelog.node(r)
2455 n = repo.changelog.node(r)
2458 if p2 != nullrev:
2456 if p2 != nullrev:
2459 raise error.Abort(
2457 raise error.Abort(
2460 _(b'cannot import merge revision %d') % r
2458 _(b'cannot import merge revision %d') % r
2461 )
2459 )
2462 if lastparent and lastparent != r:
2460 if lastparent and lastparent != r:
2463 raise error.Abort(
2461 raise error.Abort(
2464 _(b'revision %d is not the parent of %d')
2462 _(b'revision %d is not the parent of %d')
2465 % (r, lastparent)
2463 % (r, lastparent)
2466 )
2464 )
2467 lastparent = p1
2465 lastparent = p1
2468
2466
2469 if not patchname:
2467 if not patchname:
2470 patchname = self.makepatchname(
2468 patchname = self.makepatchname(
2471 repo[r].description().split(b'\n', 1)[0],
2469 repo[r].description().split(b'\n', 1)[0],
2472 b'%d.diff' % r,
2470 b'%d.diff' % r,
2473 )
2471 )
2474 checkseries(patchname)
2472 checkseries(patchname)
2475 self.checkpatchname(patchname, force)
2473 self.checkpatchname(patchname, force)
2476 self.fullseries.insert(0, patchname)
2474 self.fullseries.insert(0, patchname)
2477
2475
2478 with self.opener(patchname, b"w") as fp:
2476 with self.opener(patchname, b"w") as fp:
2479 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2477 cmdutil.exportfile(repo, [n], fp, opts=diffopts)
2480
2478
2481 se = statusentry(n, patchname)
2479 se = statusentry(n, patchname)
2482 self.applied.insert(0, se)
2480 self.applied.insert(0, se)
2483
2481
2484 self.added.append(patchname)
2482 self.added.append(patchname)
2485 imported.append(patchname)
2483 imported.append(patchname)
2486 patchname = None
2484 patchname = None
2487 if rev and repo.ui.configbool(b'mq', b'secret'):
2485 if rev and repo.ui.configbool(b'mq', b'secret'):
2488 # if we added anything with --rev, move the secret root
2486 # if we added anything with --rev, move the secret root
2489 phases.retractboundary(repo, tr, phases.secret, [n])
2487 phases.retractboundary(repo, tr, phases.secret, [n])
2490 self.parseseries()
2488 self.parseseries()
2491 self.applieddirty = True
2489 self.applieddirty = True
2492 self.seriesdirty = True
2490 self.seriesdirty = True
2493
2491
2494 for i, filename in enumerate(files):
2492 for i, filename in enumerate(files):
2495 if existing:
2493 if existing:
2496 if filename == b'-':
2494 if filename == b'-':
2497 raise error.Abort(
2495 raise error.Abort(
2498 _(b'-e is incompatible with import from -')
2496 _(b'-e is incompatible with import from -')
2499 )
2497 )
2500 filename = normname(filename)
2498 filename = normname(filename)
2501 self.checkreservedname(filename)
2499 self.checkreservedname(filename)
2502 if urlutil.url(filename).islocal():
2500 if urlutil.url(filename).islocal():
2503 originpath = self.join(filename)
2501 originpath = self.join(filename)
2504 if not os.path.isfile(originpath):
2502 if not os.path.isfile(originpath):
2505 raise error.Abort(
2503 raise error.Abort(
2506 _(b"patch %s does not exist") % filename
2504 _(b"patch %s does not exist") % filename
2507 )
2505 )
2508
2506
2509 if patchname:
2507 if patchname:
2510 self.checkpatchname(patchname, force)
2508 self.checkpatchname(patchname, force)
2511
2509
2512 self.ui.write(
2510 self.ui.write(
2513 _(b'renaming %s to %s\n') % (filename, patchname)
2511 _(b'renaming %s to %s\n') % (filename, patchname)
2514 )
2512 )
2515 util.rename(originpath, self.join(patchname))
2513 util.rename(originpath, self.join(patchname))
2516 else:
2514 else:
2517 patchname = filename
2515 patchname = filename
2518
2516
2519 else:
2517 else:
2520 if filename == b'-' and not patchname:
2518 if filename == b'-' and not patchname:
2521 raise error.Abort(
2519 raise error.Abort(
2522 _(b'need --name to import a patch from -')
2520 _(b'need --name to import a patch from -')
2523 )
2521 )
2524 elif not patchname:
2522 elif not patchname:
2525 patchname = normname(
2523 patchname = normname(
2526 os.path.basename(filename.rstrip(b'/'))
2524 os.path.basename(filename.rstrip(b'/'))
2527 )
2525 )
2528 self.checkpatchname(patchname, force)
2526 self.checkpatchname(patchname, force)
2529 try:
2527 try:
2530 if filename == b'-':
2528 if filename == b'-':
2531 text = self.ui.fin.read()
2529 text = self.ui.fin.read()
2532 else:
2530 else:
2533 fp = hg.openpath(self.ui, filename)
2531 fp = hg.openpath(self.ui, filename)
2534 text = fp.read()
2532 text = fp.read()
2535 fp.close()
2533 fp.close()
2536 except (OSError, IOError):
2534 except (OSError, IOError):
2537 raise error.Abort(_(b"unable to read file %s") % filename)
2535 raise error.Abort(_(b"unable to read file %s") % filename)
2538 patchf = self.opener(patchname, b"w")
2536 patchf = self.opener(patchname, b"w")
2539 patchf.write(text)
2537 patchf.write(text)
2540 patchf.close()
2538 patchf.close()
2541 if not force:
2539 if not force:
2542 checkseries(patchname)
2540 checkseries(patchname)
2543 if patchname not in self.series:
2541 if patchname not in self.series:
2544 index = self.fullseriesend() + i
2542 index = self.fullseriesend() + i
2545 self.fullseries[index:index] = [patchname]
2543 self.fullseries[index:index] = [patchname]
2546 self.parseseries()
2544 self.parseseries()
2547 self.seriesdirty = True
2545 self.seriesdirty = True
2548 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2546 self.ui.warn(_(b"adding %s to series file\n") % patchname)
2549 self.added.append(patchname)
2547 self.added.append(patchname)
2550 imported.append(patchname)
2548 imported.append(patchname)
2551 patchname = None
2549 patchname = None
2552
2550
2553 self.removeundo(repo)
2551 self.removeundo(repo)
2554 return imported
2552 return imported
2555
2553
2556
2554
2557 def fixkeepchangesopts(ui, opts):
2555 def fixkeepchangesopts(ui, opts):
2558 if (
2556 if (
2559 not ui.configbool(b'mq', b'keepchanges')
2557 not ui.configbool(b'mq', b'keepchanges')
2560 or opts.get(b'force')
2558 or opts.get(b'force')
2561 or opts.get(b'exact')
2559 or opts.get(b'exact')
2562 ):
2560 ):
2563 return opts
2561 return opts
2564 opts = dict(opts)
2562 opts = dict(opts)
2565 opts[b'keep_changes'] = True
2563 opts[b'keep_changes'] = True
2566 return opts
2564 return opts
2567
2565
2568
2566
2569 @command(
2567 @command(
2570 b"qdelete|qremove|qrm",
2568 b"qdelete|qremove|qrm",
2571 [
2569 [
2572 (b'k', b'keep', None, _(b'keep patch file')),
2570 (b'k', b'keep', None, _(b'keep patch file')),
2573 (
2571 (
2574 b'r',
2572 b'r',
2575 b'rev',
2573 b'rev',
2576 [],
2574 [],
2577 _(b'stop managing a revision (DEPRECATED)'),
2575 _(b'stop managing a revision (DEPRECATED)'),
2578 _(b'REV'),
2576 _(b'REV'),
2579 ),
2577 ),
2580 ],
2578 ],
2581 _(b'hg qdelete [-k] [PATCH]...'),
2579 _(b'hg qdelete [-k] [PATCH]...'),
2582 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2580 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2583 )
2581 )
2584 def delete(ui, repo, *patches, **opts):
2582 def delete(ui, repo, *patches, **opts):
2585 """remove patches from queue
2583 """remove patches from queue
2586
2584
2587 The patches must not be applied, and at least one patch is required. Exact
2585 The patches must not be applied, and at least one patch is required. Exact
2588 patch identifiers must be given. With -k/--keep, the patch files are
2586 patch identifiers must be given. With -k/--keep, the patch files are
2589 preserved in the patch directory.
2587 preserved in the patch directory.
2590
2588
2591 To stop managing a patch and move it into permanent history,
2589 To stop managing a patch and move it into permanent history,
2592 use the :hg:`qfinish` command."""
2590 use the :hg:`qfinish` command."""
2593 q = repo.mq
2591 q = repo.mq
2594 q.delete(repo, patches, pycompat.byteskwargs(opts))
2592 q.delete(repo, patches, pycompat.byteskwargs(opts))
2595 q.savedirty()
2593 q.savedirty()
2596 return 0
2594 return 0
2597
2595
2598
2596
2599 @command(
2597 @command(
2600 b"qapplied",
2598 b"qapplied",
2601 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2599 [(b'1', b'last', None, _(b'show only the preceding applied patch'))]
2602 + seriesopts,
2600 + seriesopts,
2603 _(b'hg qapplied [-1] [-s] [PATCH]'),
2601 _(b'hg qapplied [-1] [-s] [PATCH]'),
2604 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2602 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2605 )
2603 )
2606 def applied(ui, repo, patch=None, **opts):
2604 def applied(ui, repo, patch=None, **opts):
2607 """print the patches already applied
2605 """print the patches already applied
2608
2606
2609 Returns 0 on success."""
2607 Returns 0 on success."""
2610
2608
2611 q = repo.mq
2609 q = repo.mq
2612 opts = pycompat.byteskwargs(opts)
2610 opts = pycompat.byteskwargs(opts)
2613
2611
2614 if patch:
2612 if patch:
2615 if patch not in q.series:
2613 if patch not in q.series:
2616 raise error.Abort(_(b"patch %s is not in series file") % patch)
2614 raise error.Abort(_(b"patch %s is not in series file") % patch)
2617 end = q.series.index(patch) + 1
2615 end = q.series.index(patch) + 1
2618 else:
2616 else:
2619 end = q.seriesend(True)
2617 end = q.seriesend(True)
2620
2618
2621 if opts.get(b'last') and not end:
2619 if opts.get(b'last') and not end:
2622 ui.write(_(b"no patches applied\n"))
2620 ui.write(_(b"no patches applied\n"))
2623 return 1
2621 return 1
2624 elif opts.get(b'last') and end == 1:
2622 elif opts.get(b'last') and end == 1:
2625 ui.write(_(b"only one patch applied\n"))
2623 ui.write(_(b"only one patch applied\n"))
2626 return 1
2624 return 1
2627 elif opts.get(b'last'):
2625 elif opts.get(b'last'):
2628 start = end - 2
2626 start = end - 2
2629 end = 1
2627 end = 1
2630 else:
2628 else:
2631 start = 0
2629 start = 0
2632
2630
2633 q.qseries(
2631 q.qseries(
2634 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2632 repo, length=end, start=start, status=b'A', summary=opts.get(b'summary')
2635 )
2633 )
2636
2634
2637
2635
2638 @command(
2636 @command(
2639 b"qunapplied",
2637 b"qunapplied",
2640 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2638 [(b'1', b'first', None, _(b'show only the first patch'))] + seriesopts,
2641 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2639 _(b'hg qunapplied [-1] [-s] [PATCH]'),
2642 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2640 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2643 )
2641 )
2644 def unapplied(ui, repo, patch=None, **opts):
2642 def unapplied(ui, repo, patch=None, **opts):
2645 """print the patches not yet applied
2643 """print the patches not yet applied
2646
2644
2647 Returns 0 on success."""
2645 Returns 0 on success."""
2648
2646
2649 q = repo.mq
2647 q = repo.mq
2650 opts = pycompat.byteskwargs(opts)
2648 opts = pycompat.byteskwargs(opts)
2651 if patch:
2649 if patch:
2652 if patch not in q.series:
2650 if patch not in q.series:
2653 raise error.Abort(_(b"patch %s is not in series file") % patch)
2651 raise error.Abort(_(b"patch %s is not in series file") % patch)
2654 start = q.series.index(patch) + 1
2652 start = q.series.index(patch) + 1
2655 else:
2653 else:
2656 start = q.seriesend(True)
2654 start = q.seriesend(True)
2657
2655
2658 if start == len(q.series) and opts.get(b'first'):
2656 if start == len(q.series) and opts.get(b'first'):
2659 ui.write(_(b"all patches applied\n"))
2657 ui.write(_(b"all patches applied\n"))
2660 return 1
2658 return 1
2661
2659
2662 if opts.get(b'first'):
2660 if opts.get(b'first'):
2663 length = 1
2661 length = 1
2664 else:
2662 else:
2665 length = None
2663 length = None
2666 q.qseries(
2664 q.qseries(
2667 repo,
2665 repo,
2668 start=start,
2666 start=start,
2669 length=length,
2667 length=length,
2670 status=b'U',
2668 status=b'U',
2671 summary=opts.get(b'summary'),
2669 summary=opts.get(b'summary'),
2672 )
2670 )
2673
2671
2674
2672
2675 @command(
2673 @command(
2676 b"qimport",
2674 b"qimport",
2677 [
2675 [
2678 (b'e', b'existing', None, _(b'import file in patch directory')),
2676 (b'e', b'existing', None, _(b'import file in patch directory')),
2679 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2677 (b'n', b'name', b'', _(b'name of patch file'), _(b'NAME')),
2680 (b'f', b'force', None, _(b'overwrite existing files')),
2678 (b'f', b'force', None, _(b'overwrite existing files')),
2681 (
2679 (
2682 b'r',
2680 b'r',
2683 b'rev',
2681 b'rev',
2684 [],
2682 [],
2685 _(b'place existing revisions under mq control'),
2683 _(b'place existing revisions under mq control'),
2686 _(b'REV'),
2684 _(b'REV'),
2687 ),
2685 ),
2688 (b'g', b'git', None, _(b'use git extended diff format')),
2686 (b'g', b'git', None, _(b'use git extended diff format')),
2689 (b'P', b'push', None, _(b'qpush after importing')),
2687 (b'P', b'push', None, _(b'qpush after importing')),
2690 ],
2688 ],
2691 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2689 _(b'hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... [FILE]...'),
2692 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2690 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2693 )
2691 )
2694 def qimport(ui, repo, *filename, **opts):
2692 def qimport(ui, repo, *filename, **opts):
2695 """import a patch or existing changeset
2693 """import a patch or existing changeset
2696
2694
2697 The patch is inserted into the series after the last applied
2695 The patch is inserted into the series after the last applied
2698 patch. If no patches have been applied, qimport prepends the patch
2696 patch. If no patches have been applied, qimport prepends the patch
2699 to the series.
2697 to the series.
2700
2698
2701 The patch will have the same name as its source file unless you
2699 The patch will have the same name as its source file unless you
2702 give it a new one with -n/--name.
2700 give it a new one with -n/--name.
2703
2701
2704 You can register an existing patch inside the patch directory with
2702 You can register an existing patch inside the patch directory with
2705 the -e/--existing flag.
2703 the -e/--existing flag.
2706
2704
2707 With -f/--force, an existing patch of the same name will be
2705 With -f/--force, an existing patch of the same name will be
2708 overwritten.
2706 overwritten.
2709
2707
2710 An existing changeset may be placed under mq control with -r/--rev
2708 An existing changeset may be placed under mq control with -r/--rev
2711 (e.g. qimport --rev . -n patch will place the current revision
2709 (e.g. qimport --rev . -n patch will place the current revision
2712 under mq control). With -g/--git, patches imported with --rev will
2710 under mq control). With -g/--git, patches imported with --rev will
2713 use the git diff format. See the diffs help topic for information
2711 use the git diff format. See the diffs help topic for information
2714 on why this is important for preserving rename/copy information
2712 on why this is important for preserving rename/copy information
2715 and permission changes. Use :hg:`qfinish` to remove changesets
2713 and permission changes. Use :hg:`qfinish` to remove changesets
2716 from mq control.
2714 from mq control.
2717
2715
2718 To import a patch from standard input, pass - as the patch file.
2716 To import a patch from standard input, pass - as the patch file.
2719 When importing from standard input, a patch name must be specified
2717 When importing from standard input, a patch name must be specified
2720 using the --name flag.
2718 using the --name flag.
2721
2719
2722 To import an existing patch while renaming it::
2720 To import an existing patch while renaming it::
2723
2721
2724 hg qimport -e existing-patch -n new-name
2722 hg qimport -e existing-patch -n new-name
2725
2723
2726 Returns 0 if import succeeded.
2724 Returns 0 if import succeeded.
2727 """
2725 """
2728 opts = pycompat.byteskwargs(opts)
2726 opts = pycompat.byteskwargs(opts)
2729 with repo.lock(): # cause this may move phase
2727 with repo.lock(): # cause this may move phase
2730 q = repo.mq
2728 q = repo.mq
2731 try:
2729 try:
2732 imported = q.qimport(
2730 imported = q.qimport(
2733 repo,
2731 repo,
2734 filename,
2732 filename,
2735 patchname=opts.get(b'name'),
2733 patchname=opts.get(b'name'),
2736 existing=opts.get(b'existing'),
2734 existing=opts.get(b'existing'),
2737 force=opts.get(b'force'),
2735 force=opts.get(b'force'),
2738 rev=opts.get(b'rev'),
2736 rev=opts.get(b'rev'),
2739 git=opts.get(b'git'),
2737 git=opts.get(b'git'),
2740 )
2738 )
2741 finally:
2739 finally:
2742 q.savedirty()
2740 q.savedirty()
2743
2741
2744 if imported and opts.get(b'push') and not opts.get(b'rev'):
2742 if imported and opts.get(b'push') and not opts.get(b'rev'):
2745 return q.push(repo, imported[-1])
2743 return q.push(repo, imported[-1])
2746 return 0
2744 return 0
2747
2745
2748
2746
2749 def qinit(ui, repo, create):
2747 def qinit(ui, repo, create):
2750 """initialize a new queue repository
2748 """initialize a new queue repository
2751
2749
2752 This command also creates a series file for ordering patches, and
2750 This command also creates a series file for ordering patches, and
2753 an mq-specific .hgignore file in the queue repository, to exclude
2751 an mq-specific .hgignore file in the queue repository, to exclude
2754 the status and guards files (these contain mostly transient state).
2752 the status and guards files (these contain mostly transient state).
2755
2753
2756 Returns 0 if initialization succeeded."""
2754 Returns 0 if initialization succeeded."""
2757 q = repo.mq
2755 q = repo.mq
2758 r = q.init(repo, create)
2756 r = q.init(repo, create)
2759 q.savedirty()
2757 q.savedirty()
2760 if r:
2758 if r:
2761 with r.wlock(), r.dirstate.changing_files(r):
2759 with r.wlock(), r.dirstate.changing_files(r):
2762 if not os.path.exists(r.wjoin(b'.hgignore')):
2760 if not os.path.exists(r.wjoin(b'.hgignore')):
2763 fp = r.wvfs(b'.hgignore', b'w')
2761 fp = r.wvfs(b'.hgignore', b'w')
2764 fp.write(b'^\\.hg\n')
2762 fp.write(b'^\\.hg\n')
2765 fp.write(b'^\\.mq\n')
2763 fp.write(b'^\\.mq\n')
2766 fp.write(b'syntax: glob\n')
2764 fp.write(b'syntax: glob\n')
2767 fp.write(b'status\n')
2765 fp.write(b'status\n')
2768 fp.write(b'guards\n')
2766 fp.write(b'guards\n')
2769 fp.close()
2767 fp.close()
2770 if not os.path.exists(r.wjoin(b'series')):
2768 if not os.path.exists(r.wjoin(b'series')):
2771 r.wvfs(b'series', b'w').close()
2769 r.wvfs(b'series', b'w').close()
2772 r[None].add([b'.hgignore', b'series'])
2770 r[None].add([b'.hgignore', b'series'])
2773 commands.add(ui, r)
2771 commands.add(ui, r)
2774 return 0
2772 return 0
2775
2773
2776
2774
2777 @command(
2775 @command(
2778 b"qinit",
2776 b"qinit",
2779 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2777 [(b'c', b'create-repo', None, _(b'create queue repository'))],
2780 _(b'hg qinit [-c]'),
2778 _(b'hg qinit [-c]'),
2781 helpcategory=command.CATEGORY_REPO_CREATION,
2779 helpcategory=command.CATEGORY_REPO_CREATION,
2782 helpbasic=True,
2780 helpbasic=True,
2783 )
2781 )
2784 def init(ui, repo, **opts):
2782 def init(ui, repo, **opts):
2785 """init a new queue repository (DEPRECATED)
2783 """init a new queue repository (DEPRECATED)
2786
2784
2787 The queue repository is unversioned by default. If
2785 The queue repository is unversioned by default. If
2788 -c/--create-repo is specified, qinit will create a separate nested
2786 -c/--create-repo is specified, qinit will create a separate nested
2789 repository for patches (qinit -c may also be run later to convert
2787 repository for patches (qinit -c may also be run later to convert
2790 an unversioned patch repository into a versioned one). You can use
2788 an unversioned patch repository into a versioned one). You can use
2791 qcommit to commit changes to this queue repository.
2789 qcommit to commit changes to this queue repository.
2792
2790
2793 This command is deprecated. Without -c, it's implied by other relevant
2791 This command is deprecated. Without -c, it's implied by other relevant
2794 commands. With -c, use :hg:`init --mq` instead."""
2792 commands. With -c, use :hg:`init --mq` instead."""
2795 return qinit(ui, repo, create=opts.get('create_repo'))
2793 return qinit(ui, repo, create=opts.get('create_repo'))
2796
2794
2797
2795
2798 @command(
2796 @command(
2799 b"qclone",
2797 b"qclone",
2800 [
2798 [
2801 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2799 (b'', b'pull', None, _(b'use pull protocol to copy metadata')),
2802 (
2800 (
2803 b'U',
2801 b'U',
2804 b'noupdate',
2802 b'noupdate',
2805 None,
2803 None,
2806 _(b'do not update the new working directories'),
2804 _(b'do not update the new working directories'),
2807 ),
2805 ),
2808 (
2806 (
2809 b'',
2807 b'',
2810 b'uncompressed',
2808 b'uncompressed',
2811 None,
2809 None,
2812 _(b'use uncompressed transfer (fast over LAN)'),
2810 _(b'use uncompressed transfer (fast over LAN)'),
2813 ),
2811 ),
2814 (
2812 (
2815 b'p',
2813 b'p',
2816 b'patches',
2814 b'patches',
2817 b'',
2815 b'',
2818 _(b'location of source patch repository'),
2816 _(b'location of source patch repository'),
2819 _(b'REPO'),
2817 _(b'REPO'),
2820 ),
2818 ),
2821 ]
2819 ]
2822 + cmdutil.remoteopts,
2820 + cmdutil.remoteopts,
2823 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2821 _(b'hg qclone [OPTION]... SOURCE [DEST]'),
2824 helpcategory=command.CATEGORY_REPO_CREATION,
2822 helpcategory=command.CATEGORY_REPO_CREATION,
2825 norepo=True,
2823 norepo=True,
2826 )
2824 )
2827 def clone(ui, source, dest=None, **opts):
2825 def clone(ui, source, dest=None, **opts):
2828 """clone main and patch repository at same time
2826 """clone main and patch repository at same time
2829
2827
2830 If source is local, destination will have no patches applied. If
2828 If source is local, destination will have no patches applied. If
2831 source is remote, this command can not check if patches are
2829 source is remote, this command can not check if patches are
2832 applied in source, so cannot guarantee that patches are not
2830 applied in source, so cannot guarantee that patches are not
2833 applied in destination. If you clone remote repository, be sure
2831 applied in destination. If you clone remote repository, be sure
2834 before that it has no patches applied.
2832 before that it has no patches applied.
2835
2833
2836 Source patch repository is looked for in <src>/.hg/patches by
2834 Source patch repository is looked for in <src>/.hg/patches by
2837 default. Use -p <url> to change.
2835 default. Use -p <url> to change.
2838
2836
2839 The patch directory must be a nested Mercurial repository, as
2837 The patch directory must be a nested Mercurial repository, as
2840 would be created by :hg:`init --mq`.
2838 would be created by :hg:`init --mq`.
2841
2839
2842 Return 0 on success.
2840 Return 0 on success.
2843 """
2841 """
2844 opts = pycompat.byteskwargs(opts)
2842 opts = pycompat.byteskwargs(opts)
2845
2843
2846 def patchdir(repo):
2844 def patchdir(repo):
2847 """compute a patch repo url from a repo object"""
2845 """compute a patch repo url from a repo object"""
2848 url = repo.url()
2846 url = repo.url()
2849 if url.endswith(b'/'):
2847 if url.endswith(b'/'):
2850 url = url[:-1]
2848 url = url[:-1]
2851 return url + b'/.hg/patches'
2849 return url + b'/.hg/patches'
2852
2850
2853 # main repo (destination and sources)
2851 # main repo (destination and sources)
2854 if dest is None:
2852 if dest is None:
2855 dest = hg.defaultdest(source)
2853 dest = hg.defaultdest(source)
2856 source_path = urlutil.get_clone_path_obj(ui, source)
2854 source_path = urlutil.get_clone_path_obj(ui, source)
2857 sr = hg.peer(ui, opts, source_path)
2855 sr = hg.peer(ui, opts, source_path)
2858
2856
2859 # patches repo (source only)
2857 # patches repo (source only)
2860 if opts.get(b'patches'):
2858 if opts.get(b'patches'):
2861 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2859 patches_path = urlutil.get_clone_path_obj(ui, opts.get(b'patches'))
2862 else:
2860 else:
2863 # XXX path: we should turn this into a path object
2861 # XXX path: we should turn this into a path object
2864 patches_path = patchdir(sr)
2862 patches_path = patchdir(sr)
2865 try:
2863 try:
2866 hg.peer(ui, opts, patches_path)
2864 hg.peer(ui, opts, patches_path)
2867 except error.RepoError:
2865 except error.RepoError:
2868 raise error.Abort(
2866 raise error.Abort(
2869 _(b'versioned patch repository not found (see init --mq)')
2867 _(b'versioned patch repository not found (see init --mq)')
2870 )
2868 )
2871 qbase, destrev = None, None
2869 qbase, destrev = None, None
2872 if sr.local():
2870 if sr.local():
2873 repo = sr.local()
2871 repo = sr.local()
2874 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2872 if repo.mq.applied and repo[qbase].phase() != phases.secret:
2875 qbase = repo.mq.applied[0].node
2873 qbase = repo.mq.applied[0].node
2876 if not hg.islocal(dest):
2874 if not hg.islocal(dest):
2877 heads = set(repo.heads())
2875 heads = set(repo.heads())
2878 destrev = list(heads.difference(repo.heads(qbase)))
2876 destrev = list(heads.difference(repo.heads(qbase)))
2879 destrev.append(repo.changelog.parents(qbase)[0])
2877 destrev.append(repo.changelog.parents(qbase)[0])
2880 elif sr.capable(b'lookup'):
2878 elif sr.capable(b'lookup'):
2881 try:
2879 try:
2882 qbase = sr.lookup(b'qbase')
2880 qbase = sr.lookup(b'qbase')
2883 except error.RepoError:
2881 except error.RepoError:
2884 pass
2882 pass
2885
2883
2886 ui.note(_(b'cloning main repository\n'))
2884 ui.note(_(b'cloning main repository\n'))
2887 sr, dr = hg.clone(
2885 sr, dr = hg.clone(
2888 ui,
2886 ui,
2889 opts,
2887 opts,
2890 sr.url(),
2888 sr.url(),
2891 dest,
2889 dest,
2892 pull=opts.get(b'pull'),
2890 pull=opts.get(b'pull'),
2893 revs=destrev,
2891 revs=destrev,
2894 update=False,
2892 update=False,
2895 stream=opts.get(b'uncompressed'),
2893 stream=opts.get(b'uncompressed'),
2896 )
2894 )
2897
2895
2898 ui.note(_(b'cloning patch repository\n'))
2896 ui.note(_(b'cloning patch repository\n'))
2899 hg.clone(
2897 hg.clone(
2900 ui,
2898 ui,
2901 opts,
2899 opts,
2902 opts.get(b'patches') or patchdir(sr),
2900 opts.get(b'patches') or patchdir(sr),
2903 patchdir(dr),
2901 patchdir(dr),
2904 pull=opts.get(b'pull'),
2902 pull=opts.get(b'pull'),
2905 update=not opts.get(b'noupdate'),
2903 update=not opts.get(b'noupdate'),
2906 stream=opts.get(b'uncompressed'),
2904 stream=opts.get(b'uncompressed'),
2907 )
2905 )
2908
2906
2909 if dr.local():
2907 if dr.local():
2910 repo = dr.local()
2908 repo = dr.local()
2911 if qbase:
2909 if qbase:
2912 ui.note(
2910 ui.note(
2913 _(
2911 _(
2914 b'stripping applied patches from destination '
2912 b'stripping applied patches from destination '
2915 b'repository\n'
2913 b'repository\n'
2916 )
2914 )
2917 )
2915 )
2918 strip(ui, repo, [qbase], update=False, backup=None)
2916 strip(ui, repo, [qbase], update=False, backup=None)
2919 if not opts.get(b'noupdate'):
2917 if not opts.get(b'noupdate'):
2920 ui.note(_(b'updating destination repository\n'))
2918 ui.note(_(b'updating destination repository\n'))
2921 hg.update(repo, repo.changelog.tip())
2919 hg.update(repo, repo.changelog.tip())
2922
2920
2923
2921
2924 @command(
2922 @command(
2925 b"qcommit|qci",
2923 b"qcommit|qci",
2926 commands.table[b"commit|ci"][1],
2924 commands.table[b"commit|ci"][1],
2927 _(b'hg qcommit [OPTION]... [FILE]...'),
2925 _(b'hg qcommit [OPTION]... [FILE]...'),
2928 helpcategory=command.CATEGORY_COMMITTING,
2926 helpcategory=command.CATEGORY_COMMITTING,
2929 inferrepo=True,
2927 inferrepo=True,
2930 )
2928 )
2931 def commit(ui, repo, *pats, **opts):
2929 def commit(ui, repo, *pats, **opts):
2932 """commit changes in the queue repository (DEPRECATED)
2930 """commit changes in the queue repository (DEPRECATED)
2933
2931
2934 This command is deprecated; use :hg:`commit --mq` instead."""
2932 This command is deprecated; use :hg:`commit --mq` instead."""
2935 q = repo.mq
2933 q = repo.mq
2936 r = q.qrepo()
2934 r = q.qrepo()
2937 if not r:
2935 if not r:
2938 raise error.Abort(b'no queue repository')
2936 raise error.Abort(b'no queue repository')
2939 commands.commit(r.ui, r, *pats, **opts)
2937 commands.commit(r.ui, r, *pats, **opts)
2940
2938
2941
2939
2942 @command(
2940 @command(
2943 b"qseries",
2941 b"qseries",
2944 [
2942 [
2945 (b'm', b'missing', None, _(b'print patches not in series')),
2943 (b'm', b'missing', None, _(b'print patches not in series')),
2946 ]
2944 ]
2947 + seriesopts,
2945 + seriesopts,
2948 _(b'hg qseries [-ms]'),
2946 _(b'hg qseries [-ms]'),
2949 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2947 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2950 )
2948 )
2951 def series(ui, repo, **opts):
2949 def series(ui, repo, **opts):
2952 """print the entire series file
2950 """print the entire series file
2953
2951
2954 Returns 0 on success."""
2952 Returns 0 on success."""
2955 repo.mq.qseries(
2953 repo.mq.qseries(
2956 repo, missing=opts.get('missing'), summary=opts.get('summary')
2954 repo, missing=opts.get('missing'), summary=opts.get('summary')
2957 )
2955 )
2958 return 0
2956 return 0
2959
2957
2960
2958
2961 @command(
2959 @command(
2962 b"qtop",
2960 b"qtop",
2963 seriesopts,
2961 seriesopts,
2964 _(b'hg qtop [-s]'),
2962 _(b'hg qtop [-s]'),
2965 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2963 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2966 )
2964 )
2967 def top(ui, repo, **opts):
2965 def top(ui, repo, **opts):
2968 """print the name of the current patch
2966 """print the name of the current patch
2969
2967
2970 Returns 0 on success."""
2968 Returns 0 on success."""
2971 q = repo.mq
2969 q = repo.mq
2972 if q.applied:
2970 if q.applied:
2973 t = q.seriesend(True)
2971 t = q.seriesend(True)
2974 else:
2972 else:
2975 t = 0
2973 t = 0
2976
2974
2977 if t:
2975 if t:
2978 q.qseries(
2976 q.qseries(
2979 repo,
2977 repo,
2980 start=t - 1,
2978 start=t - 1,
2981 length=1,
2979 length=1,
2982 status=b'A',
2980 status=b'A',
2983 summary=opts.get('summary'),
2981 summary=opts.get('summary'),
2984 )
2982 )
2985 else:
2983 else:
2986 ui.write(_(b"no patches applied\n"))
2984 ui.write(_(b"no patches applied\n"))
2987 return 1
2985 return 1
2988
2986
2989
2987
2990 @command(
2988 @command(
2991 b"qnext",
2989 b"qnext",
2992 seriesopts,
2990 seriesopts,
2993 _(b'hg qnext [-s]'),
2991 _(b'hg qnext [-s]'),
2994 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2992 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
2995 )
2993 )
2996 def next(ui, repo, **opts):
2994 def next(ui, repo, **opts):
2997 """print the name of the next pushable patch
2995 """print the name of the next pushable patch
2998
2996
2999 Returns 0 on success."""
2997 Returns 0 on success."""
3000 q = repo.mq
2998 q = repo.mq
3001 end = q.seriesend()
2999 end = q.seriesend()
3002 if end == len(q.series):
3000 if end == len(q.series):
3003 ui.write(_(b"all patches applied\n"))
3001 ui.write(_(b"all patches applied\n"))
3004 return 1
3002 return 1
3005 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3003 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
3006
3004
3007
3005
3008 @command(
3006 @command(
3009 b"qprev",
3007 b"qprev",
3010 seriesopts,
3008 seriesopts,
3011 _(b'hg qprev [-s]'),
3009 _(b'hg qprev [-s]'),
3012 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3010 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3013 )
3011 )
3014 def prev(ui, repo, **opts):
3012 def prev(ui, repo, **opts):
3015 """print the name of the preceding applied patch
3013 """print the name of the preceding applied patch
3016
3014
3017 Returns 0 on success."""
3015 Returns 0 on success."""
3018 q = repo.mq
3016 q = repo.mq
3019 l = len(q.applied)
3017 l = len(q.applied)
3020 if l == 1:
3018 if l == 1:
3021 ui.write(_(b"only one patch applied\n"))
3019 ui.write(_(b"only one patch applied\n"))
3022 return 1
3020 return 1
3023 if not l:
3021 if not l:
3024 ui.write(_(b"no patches applied\n"))
3022 ui.write(_(b"no patches applied\n"))
3025 return 1
3023 return 1
3026 idx = q.series.index(q.applied[-2].name)
3024 idx = q.series.index(q.applied[-2].name)
3027 q.qseries(
3025 q.qseries(
3028 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3026 repo, start=idx, length=1, status=b'A', summary=opts.get('summary')
3029 )
3027 )
3030
3028
3031
3029
3032 def setupheaderopts(ui, opts):
3030 def setupheaderopts(ui, opts):
3033 if not opts.get(b'user') and opts.get(b'currentuser'):
3031 if not opts.get(b'user') and opts.get(b'currentuser'):
3034 opts[b'user'] = ui.username()
3032 opts[b'user'] = ui.username()
3035 if not opts.get(b'date') and opts.get(b'currentdate'):
3033 if not opts.get(b'date') and opts.get(b'currentdate'):
3036 opts[b'date'] = b"%d %d" % dateutil.makedate()
3034 opts[b'date'] = b"%d %d" % dateutil.makedate()
3037
3035
3038
3036
3039 @command(
3037 @command(
3040 b"qnew",
3038 b"qnew",
3041 [
3039 [
3042 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3040 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3043 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3041 (b'f', b'force', None, _(b'import uncommitted changes (DEPRECATED)')),
3044 (b'g', b'git', None, _(b'use git extended diff format')),
3042 (b'g', b'git', None, _(b'use git extended diff format')),
3045 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3043 (b'U', b'currentuser', None, _(b'add "From: <current user>" to patch')),
3046 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3044 (b'u', b'user', b'', _(b'add "From: <USER>" to patch'), _(b'USER')),
3047 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3045 (b'D', b'currentdate', None, _(b'add "Date: <current date>" to patch')),
3048 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3046 (b'd', b'date', b'', _(b'add "Date: <DATE>" to patch'), _(b'DATE')),
3049 ]
3047 ]
3050 + cmdutil.walkopts
3048 + cmdutil.walkopts
3051 + cmdutil.commitopts,
3049 + cmdutil.commitopts,
3052 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3050 _(b'hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'),
3053 helpcategory=command.CATEGORY_COMMITTING,
3051 helpcategory=command.CATEGORY_COMMITTING,
3054 helpbasic=True,
3052 helpbasic=True,
3055 inferrepo=True,
3053 inferrepo=True,
3056 )
3054 )
3057 def new(ui, repo, patch, *args, **opts):
3055 def new(ui, repo, patch, *args, **opts):
3058 """create a new patch
3056 """create a new patch
3059
3057
3060 qnew creates a new patch on top of the currently-applied patch (if
3058 qnew creates a new patch on top of the currently-applied patch (if
3061 any). The patch will be initialized with any outstanding changes
3059 any). The patch will be initialized with any outstanding changes
3062 in the working directory. You may also use -I/--include,
3060 in the working directory. You may also use -I/--include,
3063 -X/--exclude, and/or a list of files after the patch name to add
3061 -X/--exclude, and/or a list of files after the patch name to add
3064 only changes to matching files to the new patch, leaving the rest
3062 only changes to matching files to the new patch, leaving the rest
3065 as uncommitted modifications.
3063 as uncommitted modifications.
3066
3064
3067 -u/--user and -d/--date can be used to set the (given) user and
3065 -u/--user and -d/--date can be used to set the (given) user and
3068 date, respectively. -U/--currentuser and -D/--currentdate set user
3066 date, respectively. -U/--currentuser and -D/--currentdate set user
3069 to current user and date to current date.
3067 to current user and date to current date.
3070
3068
3071 -e/--edit, -m/--message or -l/--logfile set the patch header as
3069 -e/--edit, -m/--message or -l/--logfile set the patch header as
3072 well as the commit message. If none is specified, the header is
3070 well as the commit message. If none is specified, the header is
3073 empty and the commit message is '[mq]: PATCH'.
3071 empty and the commit message is '[mq]: PATCH'.
3074
3072
3075 Use the -g/--git option to keep the patch in the git extended diff
3073 Use the -g/--git option to keep the patch in the git extended diff
3076 format. Read the diffs help topic for more information on why this
3074 format. Read the diffs help topic for more information on why this
3077 is important for preserving permission changes and copy/rename
3075 is important for preserving permission changes and copy/rename
3078 information.
3076 information.
3079
3077
3080 Returns 0 on successful creation of a new patch.
3078 Returns 0 on successful creation of a new patch.
3081 """
3079 """
3082 opts = pycompat.byteskwargs(opts)
3080 opts = pycompat.byteskwargs(opts)
3083 msg = cmdutil.logmessage(ui, opts)
3081 msg = cmdutil.logmessage(ui, opts)
3084 q = repo.mq
3082 q = repo.mq
3085 opts[b'msg'] = msg
3083 opts[b'msg'] = msg
3086 setupheaderopts(ui, opts)
3084 setupheaderopts(ui, opts)
3087 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3085 q.new(repo, patch, *args, **pycompat.strkwargs(opts))
3088 q.savedirty()
3086 q.savedirty()
3089 return 0
3087 return 0
3090
3088
3091
3089
3092 @command(
3090 @command(
3093 b"qrefresh",
3091 b"qrefresh",
3094 [
3092 [
3095 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3093 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3096 (b'g', b'git', None, _(b'use git extended diff format')),
3094 (b'g', b'git', None, _(b'use git extended diff format')),
3097 (
3095 (
3098 b's',
3096 b's',
3099 b'short',
3097 b'short',
3100 None,
3098 None,
3101 _(b'refresh only files already in the patch and specified files'),
3099 _(b'refresh only files already in the patch and specified files'),
3102 ),
3100 ),
3103 (
3101 (
3104 b'U',
3102 b'U',
3105 b'currentuser',
3103 b'currentuser',
3106 None,
3104 None,
3107 _(b'add/update author field in patch with current user'),
3105 _(b'add/update author field in patch with current user'),
3108 ),
3106 ),
3109 (
3107 (
3110 b'u',
3108 b'u',
3111 b'user',
3109 b'user',
3112 b'',
3110 b'',
3113 _(b'add/update author field in patch with given user'),
3111 _(b'add/update author field in patch with given user'),
3114 _(b'USER'),
3112 _(b'USER'),
3115 ),
3113 ),
3116 (
3114 (
3117 b'D',
3115 b'D',
3118 b'currentdate',
3116 b'currentdate',
3119 None,
3117 None,
3120 _(b'add/update date field in patch with current date'),
3118 _(b'add/update date field in patch with current date'),
3121 ),
3119 ),
3122 (
3120 (
3123 b'd',
3121 b'd',
3124 b'date',
3122 b'date',
3125 b'',
3123 b'',
3126 _(b'add/update date field in patch with given date'),
3124 _(b'add/update date field in patch with given date'),
3127 _(b'DATE'),
3125 _(b'DATE'),
3128 ),
3126 ),
3129 ]
3127 ]
3130 + cmdutil.walkopts
3128 + cmdutil.walkopts
3131 + cmdutil.commitopts,
3129 + cmdutil.commitopts,
3132 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3130 _(b'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
3133 helpcategory=command.CATEGORY_COMMITTING,
3131 helpcategory=command.CATEGORY_COMMITTING,
3134 helpbasic=True,
3132 helpbasic=True,
3135 inferrepo=True,
3133 inferrepo=True,
3136 )
3134 )
3137 def refresh(ui, repo, *pats, **opts):
3135 def refresh(ui, repo, *pats, **opts):
3138 """update the current patch
3136 """update the current patch
3139
3137
3140 If any file patterns are provided, the refreshed patch will
3138 If any file patterns are provided, the refreshed patch will
3141 contain only the modifications that match those patterns; the
3139 contain only the modifications that match those patterns; the
3142 remaining modifications will remain in the working directory.
3140 remaining modifications will remain in the working directory.
3143
3141
3144 If -s/--short is specified, files currently included in the patch
3142 If -s/--short is specified, files currently included in the patch
3145 will be refreshed just like matched files and remain in the patch.
3143 will be refreshed just like matched files and remain in the patch.
3146
3144
3147 If -e/--edit is specified, Mercurial will start your configured editor for
3145 If -e/--edit is specified, Mercurial will start your configured editor for
3148 you to enter a message. In case qrefresh fails, you will find a backup of
3146 you to enter a message. In case qrefresh fails, you will find a backup of
3149 your message in ``.hg/last-message.txt``.
3147 your message in ``.hg/last-message.txt``.
3150
3148
3151 hg add/remove/copy/rename work as usual, though you might want to
3149 hg add/remove/copy/rename work as usual, though you might want to
3152 use git-style patches (-g/--git or [diff] git=1) to track copies
3150 use git-style patches (-g/--git or [diff] git=1) to track copies
3153 and renames. See the diffs help topic for more information on the
3151 and renames. See the diffs help topic for more information on the
3154 git diff format.
3152 git diff format.
3155
3153
3156 Returns 0 on success.
3154 Returns 0 on success.
3157 """
3155 """
3158 opts = pycompat.byteskwargs(opts)
3156 opts = pycompat.byteskwargs(opts)
3159 q = repo.mq
3157 q = repo.mq
3160 message = cmdutil.logmessage(ui, opts)
3158 message = cmdutil.logmessage(ui, opts)
3161 setupheaderopts(ui, opts)
3159 setupheaderopts(ui, opts)
3162 with repo.wlock():
3160 with repo.wlock():
3163 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3161 ret = q.refresh(repo, pats, msg=message, **pycompat.strkwargs(opts))
3164 q.savedirty()
3162 q.savedirty()
3165 return ret
3163 return ret
3166
3164
3167
3165
3168 @command(
3166 @command(
3169 b"qdiff",
3167 b"qdiff",
3170 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3168 cmdutil.diffopts + cmdutil.diffopts2 + cmdutil.walkopts,
3171 _(b'hg qdiff [OPTION]... [FILE]...'),
3169 _(b'hg qdiff [OPTION]... [FILE]...'),
3172 helpcategory=command.CATEGORY_FILE_CONTENTS,
3170 helpcategory=command.CATEGORY_FILE_CONTENTS,
3173 helpbasic=True,
3171 helpbasic=True,
3174 inferrepo=True,
3172 inferrepo=True,
3175 )
3173 )
3176 def diff(ui, repo, *pats, **opts):
3174 def diff(ui, repo, *pats, **opts):
3177 """diff of the current patch and subsequent modifications
3175 """diff of the current patch and subsequent modifications
3178
3176
3179 Shows a diff which includes the current patch as well as any
3177 Shows a diff which includes the current patch as well as any
3180 changes which have been made in the working directory since the
3178 changes which have been made in the working directory since the
3181 last refresh (thus showing what the current patch would become
3179 last refresh (thus showing what the current patch would become
3182 after a qrefresh).
3180 after a qrefresh).
3183
3181
3184 Use :hg:`diff` if you only want to see the changes made since the
3182 Use :hg:`diff` if you only want to see the changes made since the
3185 last qrefresh, or :hg:`export qtip` if you want to see changes
3183 last qrefresh, or :hg:`export qtip` if you want to see changes
3186 made by the current patch without including changes made since the
3184 made by the current patch without including changes made since the
3187 qrefresh.
3185 qrefresh.
3188
3186
3189 Returns 0 on success.
3187 Returns 0 on success.
3190 """
3188 """
3191 ui.pager(b'qdiff')
3189 ui.pager(b'qdiff')
3192 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3190 repo.mq.diff(repo, pats, pycompat.byteskwargs(opts))
3193 return 0
3191 return 0
3194
3192
3195
3193
3196 @command(
3194 @command(
3197 b'qfold',
3195 b'qfold',
3198 [
3196 [
3199 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3197 (b'e', b'edit', None, _(b'invoke editor on commit messages')),
3200 (b'k', b'keep', None, _(b'keep folded patch files')),
3198 (b'k', b'keep', None, _(b'keep folded patch files')),
3201 ]
3199 ]
3202 + cmdutil.commitopts,
3200 + cmdutil.commitopts,
3203 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3201 _(b'hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'),
3204 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3202 helpcategory=command.CATEGORY_CHANGE_MANAGEMENT,
3205 )
3203 )
3206 def fold(ui, repo, *files, **opts):
3204 def fold(ui, repo, *files, **opts):
3207 """fold the named patches into the current patch
3205 """fold the named patches into the current patch
3208
3206
3209 Patches must not yet be applied. Each patch will be successively
3207 Patches must not yet be applied. Each patch will be successively
3210 applied to the current patch in the order given. If all the
3208 applied to the current patch in the order given. If all the
3211 patches apply successfully, the current patch will be refreshed
3209 patches apply successfully, the current patch will be refreshed
3212 with the new cumulative patch, and the folded patches will be
3210 with the new cumulative patch, and the folded patches will be
3213 deleted. With -k/--keep, the folded patch files will not be
3211 deleted. With -k/--keep, the folded patch files will not be
3214 removed afterwards.
3212 removed afterwards.
3215
3213
3216 The header for each folded patch will be concatenated with the
3214 The header for each folded patch will be concatenated with the
3217 current patch header, separated by a line of ``* * *``.
3215 current patch header, separated by a line of ``* * *``.
3218
3216
3219 Returns 0 on success."""
3217 Returns 0 on success."""
3220 opts = pycompat.byteskwargs(opts)
3218 opts = pycompat.byteskwargs(opts)
3221 q = repo.mq
3219 q = repo.mq
3222 if not files:
3220 if not files:
3223 raise error.Abort(_(b'qfold requires at least one patch name'))
3221 raise error.Abort(_(b'qfold requires at least one patch name'))
3224 if not q.checktoppatch(repo)[0]:
3222 if not q.checktoppatch(repo)[0]:
3225 raise error.Abort(_(b'no patches applied'))
3223 raise error.Abort(_(b'no patches applied'))
3226
3224
3227 with repo.wlock():
3225 with repo.wlock():
3228 q.checklocalchanges(repo)
3226 q.checklocalchanges(repo)
3229
3227
3230 message = cmdutil.logmessage(ui, opts)
3228 message = cmdutil.logmessage(ui, opts)
3231
3229
3232 parent = q.lookup(b'qtip')
3230 parent = q.lookup(b'qtip')
3233 patches = []
3231 patches = []
3234 messages = []
3232 messages = []
3235 for f in files:
3233 for f in files:
3236 p = q.lookup(f)
3234 p = q.lookup(f)
3237 if p in patches or p == parent:
3235 if p in patches or p == parent:
3238 ui.warn(_(b'skipping already folded patch %s\n') % p)
3236 ui.warn(_(b'skipping already folded patch %s\n') % p)
3239 if q.isapplied(p):
3237 if q.isapplied(p):
3240 raise error.Abort(
3238 raise error.Abort(
3241 _(b'qfold cannot fold already applied patch %s') % p
3239 _(b'qfold cannot fold already applied patch %s') % p
3242 )
3240 )
3243 patches.append(p)
3241 patches.append(p)
3244
3242
3245 for p in patches:
3243 for p in patches:
3246 if not message:
3244 if not message:
3247 ph = patchheader(q.join(p), q.plainmode)
3245 ph = patchheader(q.join(p), q.plainmode)
3248 if ph.message:
3246 if ph.message:
3249 messages.append(ph.message)
3247 messages.append(ph.message)
3250 pf = q.join(p)
3248 pf = q.join(p)
3251 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3249 (patchsuccess, files, fuzz) = q.patch(repo, pf)
3252 if not patchsuccess:
3250 if not patchsuccess:
3253 raise error.Abort(_(b'error folding patch %s') % p)
3251 raise error.Abort(_(b'error folding patch %s') % p)
3254
3252
3255 if not message:
3253 if not message:
3256 ph = patchheader(q.join(parent), q.plainmode)
3254 ph = patchheader(q.join(parent), q.plainmode)
3257 message = ph.message
3255 message = ph.message
3258 for msg in messages:
3256 for msg in messages:
3259 if msg:
3257 if msg:
3260 if message:
3258 if message:
3261 message.append(b'* * *')
3259 message.append(b'* * *')
3262 message.extend(msg)
3260 message.extend(msg)
3263 message = b'\n'.join(message)
3261 message = b'\n'.join(message)
3264
3262
3265 diffopts = q.patchopts(q.diffopts(), *patches)
3263 diffopts = q.patchopts(q.diffopts(), *patches)
3266 q.refresh(
3264 q.refresh(
3267 repo,
3265 repo,
3268 msg=message,
3266 msg=message,
3269 git=diffopts.git,
3267 git=diffopts.git,
3270 edit=opts.get(b'edit'),
3268 edit=opts.get(b'edit'),
3271 editform=b'mq.qfold',
3269 editform=b'mq.qfold',
3272 )
3270 )
3273 q.delete(repo, patches, opts)
3271 q.delete(repo, patches, opts)
3274 q.savedirty()
3272 q.savedirty()
3275
3273
3276
3274
3277 @command(
3275 @command(
3278 b"qgoto",
3276 b"qgoto",
3279 [
3277 [
3280 (
3278 (
3281 b'',
3279 b'',
3282 b'keep-changes',
3280 b'keep-changes',
3283 None,
3281 None,
3284 _(b'tolerate non-conflicting local changes'),
3282 _(b'tolerate non-conflicting local changes'),
3285 ),
3283 ),
3286 (b'f', b'force', None, _(b'overwrite any local changes')),
3284 (b'f', b'force', None, _(b'overwrite any local changes')),
3287 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3285 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3288 ],
3286 ],
3289 _(b'hg qgoto [OPTION]... PATCH'),
3287 _(b'hg qgoto [OPTION]... PATCH'),
3290 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3288 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3291 )
3289 )
3292 def goto(ui, repo, patch, **opts):
3290 def goto(ui, repo, patch, **opts):
3293 """push or pop patches until named patch is at top of stack
3291 """push or pop patches until named patch is at top of stack
3294
3292
3295 Returns 0 on success."""
3293 Returns 0 on success."""
3296 opts = pycompat.byteskwargs(opts)
3294 opts = pycompat.byteskwargs(opts)
3297 opts = fixkeepchangesopts(ui, opts)
3295 opts = fixkeepchangesopts(ui, opts)
3298 q = repo.mq
3296 q = repo.mq
3299 patch = q.lookup(patch)
3297 patch = q.lookup(patch)
3300 nobackup = opts.get(b'no_backup')
3298 nobackup = opts.get(b'no_backup')
3301 keepchanges = opts.get(b'keep_changes')
3299 keepchanges = opts.get(b'keep_changes')
3302 if q.isapplied(patch):
3300 if q.isapplied(patch):
3303 ret = q.pop(
3301 ret = q.pop(
3304 repo,
3302 repo,
3305 patch,
3303 patch,
3306 force=opts.get(b'force'),
3304 force=opts.get(b'force'),
3307 nobackup=nobackup,
3305 nobackup=nobackup,
3308 keepchanges=keepchanges,
3306 keepchanges=keepchanges,
3309 )
3307 )
3310 else:
3308 else:
3311 ret = q.push(
3309 ret = q.push(
3312 repo,
3310 repo,
3313 patch,
3311 patch,
3314 force=opts.get(b'force'),
3312 force=opts.get(b'force'),
3315 nobackup=nobackup,
3313 nobackup=nobackup,
3316 keepchanges=keepchanges,
3314 keepchanges=keepchanges,
3317 )
3315 )
3318 q.savedirty()
3316 q.savedirty()
3319 return ret
3317 return ret
3320
3318
3321
3319
3322 @command(
3320 @command(
3323 b"qguard",
3321 b"qguard",
3324 [
3322 [
3325 (b'l', b'list', None, _(b'list all patches and guards')),
3323 (b'l', b'list', None, _(b'list all patches and guards')),
3326 (b'n', b'none', None, _(b'drop all guards')),
3324 (b'n', b'none', None, _(b'drop all guards')),
3327 ],
3325 ],
3328 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3326 _(b'hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'),
3329 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3327 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3330 )
3328 )
3331 def guard(ui, repo, *args, **opts):
3329 def guard(ui, repo, *args, **opts):
3332 """set or print guards for a patch
3330 """set or print guards for a patch
3333
3331
3334 Guards control whether a patch can be pushed. A patch with no
3332 Guards control whether a patch can be pushed. A patch with no
3335 guards is always pushed. A patch with a positive guard ("+foo") is
3333 guards is always pushed. A patch with a positive guard ("+foo") is
3336 pushed only if the :hg:`qselect` command has activated it. A patch with
3334 pushed only if the :hg:`qselect` command has activated it. A patch with
3337 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3335 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
3338 has activated it.
3336 has activated it.
3339
3337
3340 With no arguments, print the currently active guards.
3338 With no arguments, print the currently active guards.
3341 With arguments, set guards for the named patch.
3339 With arguments, set guards for the named patch.
3342
3340
3343 .. note::
3341 .. note::
3344
3342
3345 Specifying negative guards now requires '--'.
3343 Specifying negative guards now requires '--'.
3346
3344
3347 To set guards on another patch::
3345 To set guards on another patch::
3348
3346
3349 hg qguard other.patch -- +2.6.17 -stable
3347 hg qguard other.patch -- +2.6.17 -stable
3350
3348
3351 Returns 0 on success.
3349 Returns 0 on success.
3352 """
3350 """
3353
3351
3354 def status(idx):
3352 def status(idx):
3355 guards = q.seriesguards[idx] or [b'unguarded']
3353 guards = q.seriesguards[idx] or [b'unguarded']
3356 if q.series[idx] in applied:
3354 if q.series[idx] in applied:
3357 state = b'applied'
3355 state = b'applied'
3358 elif q.pushable(idx)[0]:
3356 elif q.pushable(idx)[0]:
3359 state = b'unapplied'
3357 state = b'unapplied'
3360 else:
3358 else:
3361 state = b'guarded'
3359 state = b'guarded'
3362 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3360 label = b'qguard.patch qguard.%s qseries.%s' % (state, state)
3363 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3361 ui.write(b'%s: ' % ui.label(q.series[idx], label))
3364
3362
3365 for i, guard in enumerate(guards):
3363 for i, guard in enumerate(guards):
3366 if guard.startswith(b'+'):
3364 if guard.startswith(b'+'):
3367 ui.write(guard, label=b'qguard.positive')
3365 ui.write(guard, label=b'qguard.positive')
3368 elif guard.startswith(b'-'):
3366 elif guard.startswith(b'-'):
3369 ui.write(guard, label=b'qguard.negative')
3367 ui.write(guard, label=b'qguard.negative')
3370 else:
3368 else:
3371 ui.write(guard, label=b'qguard.unguarded')
3369 ui.write(guard, label=b'qguard.unguarded')
3372 if i != len(guards) - 1:
3370 if i != len(guards) - 1:
3373 ui.write(b' ')
3371 ui.write(b' ')
3374 ui.write(b'\n')
3372 ui.write(b'\n')
3375
3373
3376 q = repo.mq
3374 q = repo.mq
3377 applied = {p.name for p in q.applied}
3375 applied = {p.name for p in q.applied}
3378 patch = None
3376 patch = None
3379 args = list(args)
3377 args = list(args)
3380 if opts.get('list'):
3378 if opts.get('list'):
3381 if args or opts.get('none'):
3379 if args or opts.get('none'):
3382 raise error.Abort(
3380 raise error.Abort(
3383 _(b'cannot mix -l/--list with options or arguments')
3381 _(b'cannot mix -l/--list with options or arguments')
3384 )
3382 )
3385 for i in range(len(q.series)):
3383 for i in range(len(q.series)):
3386 status(i)
3384 status(i)
3387 return
3385 return
3388 if not args or args[0][0:1] in b'-+':
3386 if not args or args[0][0:1] in b'-+':
3389 if not q.applied:
3387 if not q.applied:
3390 raise error.Abort(_(b'no patches applied'))
3388 raise error.Abort(_(b'no patches applied'))
3391 patch = q.applied[-1].name
3389 patch = q.applied[-1].name
3392 if patch is None and args[0][0:1] not in b'-+':
3390 if patch is None and args[0][0:1] not in b'-+':
3393 patch = args.pop(0)
3391 patch = args.pop(0)
3394 if patch is None:
3392 if patch is None:
3395 raise error.Abort(_(b'no patch to work with'))
3393 raise error.Abort(_(b'no patch to work with'))
3396 if args or opts.get('none'):
3394 if args or opts.get('none'):
3397 idx = q.findseries(patch)
3395 idx = q.findseries(patch)
3398 if idx is None:
3396 if idx is None:
3399 raise error.Abort(_(b'no patch named %s') % patch)
3397 raise error.Abort(_(b'no patch named %s') % patch)
3400 q.setguards(idx, args)
3398 q.setguards(idx, args)
3401 q.savedirty()
3399 q.savedirty()
3402 else:
3400 else:
3403 status(q.series.index(q.lookup(patch)))
3401 status(q.series.index(q.lookup(patch)))
3404
3402
3405
3403
3406 @command(
3404 @command(
3407 b"qheader",
3405 b"qheader",
3408 [],
3406 [],
3409 _(b'hg qheader [PATCH]'),
3407 _(b'hg qheader [PATCH]'),
3410 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3408 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3411 )
3409 )
3412 def header(ui, repo, patch=None):
3410 def header(ui, repo, patch=None):
3413 """print the header of the topmost or specified patch
3411 """print the header of the topmost or specified patch
3414
3412
3415 Returns 0 on success."""
3413 Returns 0 on success."""
3416 q = repo.mq
3414 q = repo.mq
3417
3415
3418 if patch:
3416 if patch:
3419 patch = q.lookup(patch)
3417 patch = q.lookup(patch)
3420 else:
3418 else:
3421 if not q.applied:
3419 if not q.applied:
3422 ui.write(_(b'no patches applied\n'))
3420 ui.write(_(b'no patches applied\n'))
3423 return 1
3421 return 1
3424 patch = q.lookup(b'qtip')
3422 patch = q.lookup(b'qtip')
3425 ph = patchheader(q.join(patch), q.plainmode)
3423 ph = patchheader(q.join(patch), q.plainmode)
3426
3424
3427 ui.write(b'\n'.join(ph.message) + b'\n')
3425 ui.write(b'\n'.join(ph.message) + b'\n')
3428
3426
3429
3427
3430 def lastsavename(path):
3428 def lastsavename(path):
3431 (directory, base) = os.path.split(path)
3429 (directory, base) = os.path.split(path)
3432 names = os.listdir(directory)
3430 names = os.listdir(directory)
3433 namere = re.compile(b"%s.([0-9]+)" % base)
3431 namere = re.compile(b"%s.([0-9]+)" % base)
3434 maxindex = None
3432 maxindex = None
3435 maxname = None
3433 maxname = None
3436 for f in names:
3434 for f in names:
3437 m = namere.match(f)
3435 m = namere.match(f)
3438 if m:
3436 if m:
3439 index = int(m.group(1))
3437 index = int(m.group(1))
3440 if maxindex is None or index > maxindex:
3438 if maxindex is None or index > maxindex:
3441 maxindex = index
3439 maxindex = index
3442 maxname = f
3440 maxname = f
3443 if maxname:
3441 if maxname:
3444 return (os.path.join(directory, maxname), maxindex)
3442 return (os.path.join(directory, maxname), maxindex)
3445 return (None, None)
3443 return (None, None)
3446
3444
3447
3445
3448 def savename(path):
3446 def savename(path):
3449 (last, index) = lastsavename(path)
3447 (last, index) = lastsavename(path)
3450 if last is None:
3448 if last is None:
3451 index = 0
3449 index = 0
3452 newpath = path + b".%d" % (index + 1)
3450 newpath = path + b".%d" % (index + 1)
3453 return newpath
3451 return newpath
3454
3452
3455
3453
3456 @command(
3454 @command(
3457 b"qpush",
3455 b"qpush",
3458 [
3456 [
3459 (
3457 (
3460 b'',
3458 b'',
3461 b'keep-changes',
3459 b'keep-changes',
3462 None,
3460 None,
3463 _(b'tolerate non-conflicting local changes'),
3461 _(b'tolerate non-conflicting local changes'),
3464 ),
3462 ),
3465 (b'f', b'force', None, _(b'apply on top of local changes')),
3463 (b'f', b'force', None, _(b'apply on top of local changes')),
3466 (
3464 (
3467 b'e',
3465 b'e',
3468 b'exact',
3466 b'exact',
3469 None,
3467 None,
3470 _(b'apply the target patch to its recorded parent'),
3468 _(b'apply the target patch to its recorded parent'),
3471 ),
3469 ),
3472 (b'l', b'list', None, _(b'list patch name in commit text')),
3470 (b'l', b'list', None, _(b'list patch name in commit text')),
3473 (b'a', b'all', None, _(b'apply all patches')),
3471 (b'a', b'all', None, _(b'apply all patches')),
3474 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3472 (b'm', b'merge', None, _(b'merge from another queue (DEPRECATED)')),
3475 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3473 (b'n', b'name', b'', _(b'merge queue name (DEPRECATED)'), _(b'NAME')),
3476 (
3474 (
3477 b'',
3475 b'',
3478 b'move',
3476 b'move',
3479 None,
3477 None,
3480 _(b'reorder patch series and apply only the patch'),
3478 _(b'reorder patch series and apply only the patch'),
3481 ),
3479 ),
3482 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3480 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3483 ],
3481 ],
3484 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3482 _(b'hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'),
3485 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3483 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3486 helpbasic=True,
3484 helpbasic=True,
3487 )
3485 )
3488 def push(ui, repo, patch=None, **opts):
3486 def push(ui, repo, patch=None, **opts):
3489 """push the next patch onto the stack
3487 """push the next patch onto the stack
3490
3488
3491 By default, abort if the working directory contains uncommitted
3489 By default, abort if the working directory contains uncommitted
3492 changes. With --keep-changes, abort only if the uncommitted files
3490 changes. With --keep-changes, abort only if the uncommitted files
3493 overlap with patched files. With -f/--force, backup and patch over
3491 overlap with patched files. With -f/--force, backup and patch over
3494 uncommitted changes.
3492 uncommitted changes.
3495
3493
3496 Return 0 on success.
3494 Return 0 on success.
3497 """
3495 """
3498 q = repo.mq
3496 q = repo.mq
3499 mergeq = None
3497 mergeq = None
3500
3498
3501 opts = pycompat.byteskwargs(opts)
3499 opts = pycompat.byteskwargs(opts)
3502 opts = fixkeepchangesopts(ui, opts)
3500 opts = fixkeepchangesopts(ui, opts)
3503 if opts.get(b'merge'):
3501 if opts.get(b'merge'):
3504 if opts.get(b'name'):
3502 if opts.get(b'name'):
3505 newpath = repo.vfs.join(opts.get(b'name'))
3503 newpath = repo.vfs.join(opts.get(b'name'))
3506 else:
3504 else:
3507 newpath, i = lastsavename(q.path)
3505 newpath, i = lastsavename(q.path)
3508 if not newpath:
3506 if not newpath:
3509 ui.warn(_(b"no saved queues found, please use -n\n"))
3507 ui.warn(_(b"no saved queues found, please use -n\n"))
3510 return 1
3508 return 1
3511 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3509 mergeq = queue(ui, repo.baseui, repo.path, newpath)
3512 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3510 ui.warn(_(b"merging with queue at: %s\n") % mergeq.path)
3513 ret = q.push(
3511 ret = q.push(
3514 repo,
3512 repo,
3515 patch,
3513 patch,
3516 force=opts.get(b'force'),
3514 force=opts.get(b'force'),
3517 list=opts.get(b'list'),
3515 list=opts.get(b'list'),
3518 mergeq=mergeq,
3516 mergeq=mergeq,
3519 all=opts.get(b'all'),
3517 all=opts.get(b'all'),
3520 move=opts.get(b'move'),
3518 move=opts.get(b'move'),
3521 exact=opts.get(b'exact'),
3519 exact=opts.get(b'exact'),
3522 nobackup=opts.get(b'no_backup'),
3520 nobackup=opts.get(b'no_backup'),
3523 keepchanges=opts.get(b'keep_changes'),
3521 keepchanges=opts.get(b'keep_changes'),
3524 )
3522 )
3525 return ret
3523 return ret
3526
3524
3527
3525
3528 @command(
3526 @command(
3529 b"qpop",
3527 b"qpop",
3530 [
3528 [
3531 (b'a', b'all', None, _(b'pop all patches')),
3529 (b'a', b'all', None, _(b'pop all patches')),
3532 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3530 (b'n', b'name', b'', _(b'queue name to pop (DEPRECATED)'), _(b'NAME')),
3533 (
3531 (
3534 b'',
3532 b'',
3535 b'keep-changes',
3533 b'keep-changes',
3536 None,
3534 None,
3537 _(b'tolerate non-conflicting local changes'),
3535 _(b'tolerate non-conflicting local changes'),
3538 ),
3536 ),
3539 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3537 (b'f', b'force', None, _(b'forget any local changes to patched files')),
3540 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3538 (b'', b'no-backup', None, _(b'do not save backup copies of files')),
3541 ],
3539 ],
3542 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3540 _(b'hg qpop [-a] [-f] [PATCH | INDEX]'),
3543 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3541 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3544 helpbasic=True,
3542 helpbasic=True,
3545 )
3543 )
3546 def pop(ui, repo, patch=None, **opts):
3544 def pop(ui, repo, patch=None, **opts):
3547 """pop the current patch off the stack
3545 """pop the current patch off the stack
3548
3546
3549 Without argument, pops off the top of the patch stack. If given a
3547 Without argument, pops off the top of the patch stack. If given a
3550 patch name, keeps popping off patches until the named patch is at
3548 patch name, keeps popping off patches until the named patch is at
3551 the top of the stack.
3549 the top of the stack.
3552
3550
3553 By default, abort if the working directory contains uncommitted
3551 By default, abort if the working directory contains uncommitted
3554 changes. With --keep-changes, abort only if the uncommitted files
3552 changes. With --keep-changes, abort only if the uncommitted files
3555 overlap with patched files. With -f/--force, backup and discard
3553 overlap with patched files. With -f/--force, backup and discard
3556 changes made to such files.
3554 changes made to such files.
3557
3555
3558 Return 0 on success.
3556 Return 0 on success.
3559 """
3557 """
3560 opts = pycompat.byteskwargs(opts)
3558 opts = pycompat.byteskwargs(opts)
3561 opts = fixkeepchangesopts(ui, opts)
3559 opts = fixkeepchangesopts(ui, opts)
3562 localupdate = True
3560 localupdate = True
3563 if opts.get(b'name'):
3561 if opts.get(b'name'):
3564 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3562 q = queue(ui, repo.baseui, repo.path, repo.vfs.join(opts.get(b'name')))
3565 ui.warn(_(b'using patch queue: %s\n') % q.path)
3563 ui.warn(_(b'using patch queue: %s\n') % q.path)
3566 localupdate = False
3564 localupdate = False
3567 else:
3565 else:
3568 q = repo.mq
3566 q = repo.mq
3569 ret = q.pop(
3567 ret = q.pop(
3570 repo,
3568 repo,
3571 patch,
3569 patch,
3572 force=opts.get(b'force'),
3570 force=opts.get(b'force'),
3573 update=localupdate,
3571 update=localupdate,
3574 all=opts.get(b'all'),
3572 all=opts.get(b'all'),
3575 nobackup=opts.get(b'no_backup'),
3573 nobackup=opts.get(b'no_backup'),
3576 keepchanges=opts.get(b'keep_changes'),
3574 keepchanges=opts.get(b'keep_changes'),
3577 )
3575 )
3578 q.savedirty()
3576 q.savedirty()
3579 return ret
3577 return ret
3580
3578
3581
3579
3582 @command(
3580 @command(
3583 b"qrename|qmv",
3581 b"qrename|qmv",
3584 [],
3582 [],
3585 _(b'hg qrename PATCH1 [PATCH2]'),
3583 _(b'hg qrename PATCH1 [PATCH2]'),
3586 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3584 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3587 )
3585 )
3588 def rename(ui, repo, patch, name=None, **opts):
3586 def rename(ui, repo, patch, name=None, **opts):
3589 """rename a patch
3587 """rename a patch
3590
3588
3591 With one argument, renames the current patch to PATCH1.
3589 With one argument, renames the current patch to PATCH1.
3592 With two arguments, renames PATCH1 to PATCH2.
3590 With two arguments, renames PATCH1 to PATCH2.
3593
3591
3594 Returns 0 on success."""
3592 Returns 0 on success."""
3595 q = repo.mq
3593 q = repo.mq
3596 if not name:
3594 if not name:
3597 name = patch
3595 name = patch
3598 patch = None
3596 patch = None
3599
3597
3600 if patch:
3598 if patch:
3601 patch = q.lookup(patch)
3599 patch = q.lookup(patch)
3602 else:
3600 else:
3603 if not q.applied:
3601 if not q.applied:
3604 ui.write(_(b'no patches applied\n'))
3602 ui.write(_(b'no patches applied\n'))
3605 return
3603 return
3606 patch = q.lookup(b'qtip')
3604 patch = q.lookup(b'qtip')
3607 absdest = q.join(name)
3605 absdest = q.join(name)
3608 if os.path.isdir(absdest):
3606 if os.path.isdir(absdest):
3609 name = normname(os.path.join(name, os.path.basename(patch)))
3607 name = normname(os.path.join(name, os.path.basename(patch)))
3610 absdest = q.join(name)
3608 absdest = q.join(name)
3611 q.checkpatchname(name)
3609 q.checkpatchname(name)
3612
3610
3613 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3611 ui.note(_(b'renaming %s to %s\n') % (patch, name))
3614 i = q.findseries(patch)
3612 i = q.findseries(patch)
3615 guards = q.guard_re.findall(q.fullseries[i])
3613 guards = q.guard_re.findall(q.fullseries[i])
3616 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3614 q.fullseries[i] = name + b''.join([b' #' + g for g in guards])
3617 q.parseseries()
3615 q.parseseries()
3618 q.seriesdirty = True
3616 q.seriesdirty = True
3619
3617
3620 info = q.isapplied(patch)
3618 info = q.isapplied(patch)
3621 if info:
3619 if info:
3622 q.applied[info[0]] = statusentry(info[1], name)
3620 q.applied[info[0]] = statusentry(info[1], name)
3623 q.applieddirty = True
3621 q.applieddirty = True
3624
3622
3625 destdir = os.path.dirname(absdest)
3623 destdir = os.path.dirname(absdest)
3626 if not os.path.isdir(destdir):
3624 if not os.path.isdir(destdir):
3627 os.makedirs(destdir)
3625 os.makedirs(destdir)
3628 util.rename(q.join(patch), absdest)
3626 util.rename(q.join(patch), absdest)
3629 r = q.qrepo()
3627 r = q.qrepo()
3630 if r and patch in r.dirstate:
3628 if r and patch in r.dirstate:
3631 with r.wlock(), r.dirstate.changing_files(r):
3629 with r.wlock(), r.dirstate.changing_files(r):
3632 wctx = r[None]
3630 wctx = r[None]
3633 if r.dirstate.get_entry(patch).added:
3631 if r.dirstate.get_entry(patch).added:
3634 r.dirstate.set_untracked(patch)
3632 r.dirstate.set_untracked(patch)
3635 r.dirstate.set_tracked(name)
3633 r.dirstate.set_tracked(name)
3636 else:
3634 else:
3637 wctx.copy(patch, name)
3635 wctx.copy(patch, name)
3638 wctx.forget([patch])
3636 wctx.forget([patch])
3639
3637
3640 q.savedirty()
3638 q.savedirty()
3641
3639
3642
3640
3643 @command(
3641 @command(
3644 b"qrestore",
3642 b"qrestore",
3645 [
3643 [
3646 (b'd', b'delete', None, _(b'delete save entry')),
3644 (b'd', b'delete', None, _(b'delete save entry')),
3647 (b'u', b'update', None, _(b'update queue working directory')),
3645 (b'u', b'update', None, _(b'update queue working directory')),
3648 ],
3646 ],
3649 _(b'hg qrestore [-d] [-u] REV'),
3647 _(b'hg qrestore [-d] [-u] REV'),
3650 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3648 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3651 )
3649 )
3652 def restore(ui, repo, rev, **opts):
3650 def restore(ui, repo, rev, **opts):
3653 """restore the queue state saved by a revision (DEPRECATED)
3651 """restore the queue state saved by a revision (DEPRECATED)
3654
3652
3655 This command is deprecated, use :hg:`rebase` instead."""
3653 This command is deprecated, use :hg:`rebase` instead."""
3656 rev = repo.lookup(rev)
3654 rev = repo.lookup(rev)
3657 q = repo.mq
3655 q = repo.mq
3658 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3656 q.restore(repo, rev, delete=opts.get('delete'), qupdate=opts.get('update'))
3659 q.savedirty()
3657 q.savedirty()
3660 return 0
3658 return 0
3661
3659
3662
3660
3663 @command(
3661 @command(
3664 b"qsave",
3662 b"qsave",
3665 [
3663 [
3666 (b'c', b'copy', None, _(b'copy patch directory')),
3664 (b'c', b'copy', None, _(b'copy patch directory')),
3667 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3665 (b'n', b'name', b'', _(b'copy directory name'), _(b'NAME')),
3668 (b'e', b'empty', None, _(b'clear queue status file')),
3666 (b'e', b'empty', None, _(b'clear queue status file')),
3669 (b'f', b'force', None, _(b'force copy')),
3667 (b'f', b'force', None, _(b'force copy')),
3670 ]
3668 ]
3671 + cmdutil.commitopts,
3669 + cmdutil.commitopts,
3672 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3670 _(b'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
3673 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3671 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3674 )
3672 )
3675 def save(ui, repo, **opts):
3673 def save(ui, repo, **opts):
3676 """save current queue state (DEPRECATED)
3674 """save current queue state (DEPRECATED)
3677
3675
3678 This command is deprecated, use :hg:`rebase` instead."""
3676 This command is deprecated, use :hg:`rebase` instead."""
3679 q = repo.mq
3677 q = repo.mq
3680 opts = pycompat.byteskwargs(opts)
3678 opts = pycompat.byteskwargs(opts)
3681 message = cmdutil.logmessage(ui, opts)
3679 message = cmdutil.logmessage(ui, opts)
3682 ret = q.save(repo, msg=message)
3680 ret = q.save(repo, msg=message)
3683 if ret:
3681 if ret:
3684 return ret
3682 return ret
3685 q.savedirty() # save to .hg/patches before copying
3683 q.savedirty() # save to .hg/patches before copying
3686 if opts.get(b'copy'):
3684 if opts.get(b'copy'):
3687 path = q.path
3685 path = q.path
3688 if opts.get(b'name'):
3686 if opts.get(b'name'):
3689 newpath = os.path.join(q.basepath, opts.get(b'name'))
3687 newpath = os.path.join(q.basepath, opts.get(b'name'))
3690 if os.path.exists(newpath):
3688 if os.path.exists(newpath):
3691 if not os.path.isdir(newpath):
3689 if not os.path.isdir(newpath):
3692 raise error.Abort(
3690 raise error.Abort(
3693 _(b'destination %s exists and is not a directory')
3691 _(b'destination %s exists and is not a directory')
3694 % newpath
3692 % newpath
3695 )
3693 )
3696 if not opts.get(b'force'):
3694 if not opts.get(b'force'):
3697 raise error.Abort(
3695 raise error.Abort(
3698 _(b'destination %s exists, use -f to force') % newpath
3696 _(b'destination %s exists, use -f to force') % newpath
3699 )
3697 )
3700 else:
3698 else:
3701 newpath = savename(path)
3699 newpath = savename(path)
3702 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3700 ui.warn(_(b"copy %s to %s\n") % (path, newpath))
3703 util.copyfiles(path, newpath)
3701 util.copyfiles(path, newpath)
3704 if opts.get(b'empty'):
3702 if opts.get(b'empty'):
3705 del q.applied[:]
3703 del q.applied[:]
3706 q.applieddirty = True
3704 q.applieddirty = True
3707 q.savedirty()
3705 q.savedirty()
3708 return 0
3706 return 0
3709
3707
3710
3708
3711 @command(
3709 @command(
3712 b"qselect",
3710 b"qselect",
3713 [
3711 [
3714 (b'n', b'none', None, _(b'disable all guards')),
3712 (b'n', b'none', None, _(b'disable all guards')),
3715 (b's', b'series', None, _(b'list all guards in series file')),
3713 (b's', b'series', None, _(b'list all guards in series file')),
3716 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3714 (b'', b'pop', None, _(b'pop to before first guarded applied patch')),
3717 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3715 (b'', b'reapply', None, _(b'pop, then reapply patches')),
3718 ],
3716 ],
3719 _(b'hg qselect [OPTION]... [GUARD]...'),
3717 _(b'hg qselect [OPTION]... [GUARD]...'),
3720 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3718 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3721 )
3719 )
3722 def select(ui, repo, *args, **opts):
3720 def select(ui, repo, *args, **opts):
3723 """set or print guarded patches to push
3721 """set or print guarded patches to push
3724
3722
3725 Use the :hg:`qguard` command to set or print guards on patch, then use
3723 Use the :hg:`qguard` command to set or print guards on patch, then use
3726 qselect to tell mq which guards to use. A patch will be pushed if
3724 qselect to tell mq which guards to use. A patch will be pushed if
3727 it has no guards or any positive guards match the currently
3725 it has no guards or any positive guards match the currently
3728 selected guard, but will not be pushed if any negative guards
3726 selected guard, but will not be pushed if any negative guards
3729 match the current guard. For example::
3727 match the current guard. For example::
3730
3728
3731 qguard foo.patch -- -stable (negative guard)
3729 qguard foo.patch -- -stable (negative guard)
3732 qguard bar.patch +stable (positive guard)
3730 qguard bar.patch +stable (positive guard)
3733 qselect stable
3731 qselect stable
3734
3732
3735 This activates the "stable" guard. mq will skip foo.patch (because
3733 This activates the "stable" guard. mq will skip foo.patch (because
3736 it has a negative match) but push bar.patch (because it has a
3734 it has a negative match) but push bar.patch (because it has a
3737 positive match).
3735 positive match).
3738
3736
3739 With no arguments, prints the currently active guards.
3737 With no arguments, prints the currently active guards.
3740 With one argument, sets the active guard.
3738 With one argument, sets the active guard.
3741
3739
3742 Use -n/--none to deactivate guards (no other arguments needed).
3740 Use -n/--none to deactivate guards (no other arguments needed).
3743 When no guards are active, patches with positive guards are
3741 When no guards are active, patches with positive guards are
3744 skipped and patches with negative guards are pushed.
3742 skipped and patches with negative guards are pushed.
3745
3743
3746 qselect can change the guards on applied patches. It does not pop
3744 qselect can change the guards on applied patches. It does not pop
3747 guarded patches by default. Use --pop to pop back to the last
3745 guarded patches by default. Use --pop to pop back to the last
3748 applied patch that is not guarded. Use --reapply (which implies
3746 applied patch that is not guarded. Use --reapply (which implies
3749 --pop) to push back to the current patch afterwards, but skip
3747 --pop) to push back to the current patch afterwards, but skip
3750 guarded patches.
3748 guarded patches.
3751
3749
3752 Use -s/--series to print a list of all guards in the series file
3750 Use -s/--series to print a list of all guards in the series file
3753 (no other arguments needed). Use -v for more information.
3751 (no other arguments needed). Use -v for more information.
3754
3752
3755 Returns 0 on success."""
3753 Returns 0 on success."""
3756
3754
3757 q = repo.mq
3755 q = repo.mq
3758 opts = pycompat.byteskwargs(opts)
3756 opts = pycompat.byteskwargs(opts)
3759 guards = q.active()
3757 guards = q.active()
3760 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3758 pushable = lambda i: q.pushable(q.applied[i].name)[0]
3761 if args or opts.get(b'none'):
3759 if args or opts.get(b'none'):
3762 old_unapplied = q.unapplied(repo)
3760 old_unapplied = q.unapplied(repo)
3763 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3761 old_guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3764 q.setactive(args)
3762 q.setactive(args)
3765 q.savedirty()
3763 q.savedirty()
3766 if not args:
3764 if not args:
3767 ui.status(_(b'guards deactivated\n'))
3765 ui.status(_(b'guards deactivated\n'))
3768 if not opts.get(b'pop') and not opts.get(b'reapply'):
3766 if not opts.get(b'pop') and not opts.get(b'reapply'):
3769 unapplied = q.unapplied(repo)
3767 unapplied = q.unapplied(repo)
3770 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3768 guarded = [i for i in range(len(q.applied)) if not pushable(i)]
3771 if len(unapplied) != len(old_unapplied):
3769 if len(unapplied) != len(old_unapplied):
3772 ui.status(
3770 ui.status(
3773 _(
3771 _(
3774 b'number of unguarded, unapplied patches has '
3772 b'number of unguarded, unapplied patches has '
3775 b'changed from %d to %d\n'
3773 b'changed from %d to %d\n'
3776 )
3774 )
3777 % (len(old_unapplied), len(unapplied))
3775 % (len(old_unapplied), len(unapplied))
3778 )
3776 )
3779 if len(guarded) != len(old_guarded):
3777 if len(guarded) != len(old_guarded):
3780 ui.status(
3778 ui.status(
3781 _(
3779 _(
3782 b'number of guarded, applied patches has changed '
3780 b'number of guarded, applied patches has changed '
3783 b'from %d to %d\n'
3781 b'from %d to %d\n'
3784 )
3782 )
3785 % (len(old_guarded), len(guarded))
3783 % (len(old_guarded), len(guarded))
3786 )
3784 )
3787 elif opts.get(b'series'):
3785 elif opts.get(b'series'):
3788 guards = {}
3786 guards = {}
3789 noguards = 0
3787 noguards = 0
3790 for gs in q.seriesguards:
3788 for gs in q.seriesguards:
3791 if not gs:
3789 if not gs:
3792 noguards += 1
3790 noguards += 1
3793 for g in gs:
3791 for g in gs:
3794 guards.setdefault(g, 0)
3792 guards.setdefault(g, 0)
3795 guards[g] += 1
3793 guards[g] += 1
3796 if ui.verbose:
3794 if ui.verbose:
3797 guards[b'NONE'] = noguards
3795 guards[b'NONE'] = noguards
3798 guards = list(guards.items())
3796 guards = list(guards.items())
3799 guards.sort(key=lambda x: x[0][1:])
3797 guards.sort(key=lambda x: x[0][1:])
3800 if guards:
3798 if guards:
3801 ui.note(_(b'guards in series file:\n'))
3799 ui.note(_(b'guards in series file:\n'))
3802 for guard, count in guards:
3800 for guard, count in guards:
3803 ui.note(b'%2d ' % count)
3801 ui.note(b'%2d ' % count)
3804 ui.write(guard, b'\n')
3802 ui.write(guard, b'\n')
3805 else:
3803 else:
3806 ui.note(_(b'no guards in series file\n'))
3804 ui.note(_(b'no guards in series file\n'))
3807 else:
3805 else:
3808 if guards:
3806 if guards:
3809 ui.note(_(b'active guards:\n'))
3807 ui.note(_(b'active guards:\n'))
3810 for g in guards:
3808 for g in guards:
3811 ui.write(g, b'\n')
3809 ui.write(g, b'\n')
3812 else:
3810 else:
3813 ui.write(_(b'no active guards\n'))
3811 ui.write(_(b'no active guards\n'))
3814 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3812 reapply = opts.get(b'reapply') and q.applied and q.applied[-1].name
3815 popped = False
3813 popped = False
3816 if opts.get(b'pop') or opts.get(b'reapply'):
3814 if opts.get(b'pop') or opts.get(b'reapply'):
3817 for i in range(len(q.applied)):
3815 for i in range(len(q.applied)):
3818 if not pushable(i):
3816 if not pushable(i):
3819 ui.status(_(b'popping guarded patches\n'))
3817 ui.status(_(b'popping guarded patches\n'))
3820 popped = True
3818 popped = True
3821 if i == 0:
3819 if i == 0:
3822 q.pop(repo, all=True)
3820 q.pop(repo, all=True)
3823 else:
3821 else:
3824 q.pop(repo, q.applied[i - 1].name)
3822 q.pop(repo, q.applied[i - 1].name)
3825 break
3823 break
3826 if popped:
3824 if popped:
3827 try:
3825 try:
3828 if reapply:
3826 if reapply:
3829 ui.status(_(b'reapplying unguarded patches\n'))
3827 ui.status(_(b'reapplying unguarded patches\n'))
3830 q.push(repo, reapply)
3828 q.push(repo, reapply)
3831 finally:
3829 finally:
3832 q.savedirty()
3830 q.savedirty()
3833
3831
3834
3832
3835 @command(
3833 @command(
3836 b"qfinish",
3834 b"qfinish",
3837 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3835 [(b'a', b'applied', None, _(b'finish all applied changesets'))],
3838 _(b'hg qfinish [-a] [REV]...'),
3836 _(b'hg qfinish [-a] [REV]...'),
3839 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3837 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3840 )
3838 )
3841 def finish(ui, repo, *revrange, **opts):
3839 def finish(ui, repo, *revrange, **opts):
3842 """move applied patches into repository history
3840 """move applied patches into repository history
3843
3841
3844 Finishes the specified revisions (corresponding to applied
3842 Finishes the specified revisions (corresponding to applied
3845 patches) by moving them out of mq control into regular repository
3843 patches) by moving them out of mq control into regular repository
3846 history.
3844 history.
3847
3845
3848 Accepts a revision range or the -a/--applied option. If --applied
3846 Accepts a revision range or the -a/--applied option. If --applied
3849 is specified, all applied mq revisions are removed from mq
3847 is specified, all applied mq revisions are removed from mq
3850 control. Otherwise, the given revisions must be at the base of the
3848 control. Otherwise, the given revisions must be at the base of the
3851 stack of applied patches.
3849 stack of applied patches.
3852
3850
3853 This can be especially useful if your changes have been applied to
3851 This can be especially useful if your changes have been applied to
3854 an upstream repository, or if you are about to push your changes
3852 an upstream repository, or if you are about to push your changes
3855 to upstream.
3853 to upstream.
3856
3854
3857 Returns 0 on success.
3855 Returns 0 on success.
3858 """
3856 """
3859 if not opts.get('applied') and not revrange:
3857 if not opts.get('applied') and not revrange:
3860 raise error.Abort(_(b'no revisions specified'))
3858 raise error.Abort(_(b'no revisions specified'))
3861 elif opts.get('applied'):
3859 elif opts.get('applied'):
3862 revrange = (b'qbase::qtip',) + revrange
3860 revrange = (b'qbase::qtip',) + revrange
3863
3861
3864 q = repo.mq
3862 q = repo.mq
3865 if not q.applied:
3863 if not q.applied:
3866 ui.status(_(b'no patches applied\n'))
3864 ui.status(_(b'no patches applied\n'))
3867 return 0
3865 return 0
3868
3866
3869 revs = logcmdutil.revrange(repo, revrange)
3867 revs = logcmdutil.revrange(repo, revrange)
3870 if repo[b'.'].rev() in revs and repo[None].files():
3868 if repo[b'.'].rev() in revs and repo[None].files():
3871 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3869 ui.warn(_(b'warning: uncommitted changes in the working directory\n'))
3872 # queue.finish may changes phases but leave the responsibility to lock the
3870 # queue.finish may changes phases but leave the responsibility to lock the
3873 # repo to the caller to avoid deadlock with wlock. This command code is
3871 # repo to the caller to avoid deadlock with wlock. This command code is
3874 # responsibility for this locking.
3872 # responsibility for this locking.
3875 with repo.lock():
3873 with repo.lock():
3876 q.finish(repo, revs)
3874 q.finish(repo, revs)
3877 q.savedirty()
3875 q.savedirty()
3878 return 0
3876 return 0
3879
3877
3880
3878
3881 @command(
3879 @command(
3882 b"qqueue",
3880 b"qqueue",
3883 [
3881 [
3884 (b'l', b'list', False, _(b'list all available queues')),
3882 (b'l', b'list', False, _(b'list all available queues')),
3885 (b'', b'active', False, _(b'print name of active queue')),
3883 (b'', b'active', False, _(b'print name of active queue')),
3886 (b'c', b'create', False, _(b'create new queue')),
3884 (b'c', b'create', False, _(b'create new queue')),
3887 (b'', b'rename', False, _(b'rename active queue')),
3885 (b'', b'rename', False, _(b'rename active queue')),
3888 (b'', b'delete', False, _(b'delete reference to queue')),
3886 (b'', b'delete', False, _(b'delete reference to queue')),
3889 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3887 (b'', b'purge', False, _(b'delete queue, and remove patch dir')),
3890 ],
3888 ],
3891 _(b'[OPTION] [QUEUE]'),
3889 _(b'[OPTION] [QUEUE]'),
3892 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3890 helpcategory=command.CATEGORY_CHANGE_ORGANIZATION,
3893 )
3891 )
3894 def qqueue(ui, repo, name=None, **opts):
3892 def qqueue(ui, repo, name=None, **opts):
3895 """manage multiple patch queues
3893 """manage multiple patch queues
3896
3894
3897 Supports switching between different patch queues, as well as creating
3895 Supports switching between different patch queues, as well as creating
3898 new patch queues and deleting existing ones.
3896 new patch queues and deleting existing ones.
3899
3897
3900 Omitting a queue name or specifying -l/--list will show you the registered
3898 Omitting a queue name or specifying -l/--list will show you the registered
3901 queues - by default the "normal" patches queue is registered. The currently
3899 queues - by default the "normal" patches queue is registered. The currently
3902 active queue will be marked with "(active)". Specifying --active will print
3900 active queue will be marked with "(active)". Specifying --active will print
3903 only the name of the active queue.
3901 only the name of the active queue.
3904
3902
3905 To create a new queue, use -c/--create. The queue is automatically made
3903 To create a new queue, use -c/--create. The queue is automatically made
3906 active, except in the case where there are applied patches from the
3904 active, except in the case where there are applied patches from the
3907 currently active queue in the repository. Then the queue will only be
3905 currently active queue in the repository. Then the queue will only be
3908 created and switching will fail.
3906 created and switching will fail.
3909
3907
3910 To delete an existing queue, use --delete. You cannot delete the currently
3908 To delete an existing queue, use --delete. You cannot delete the currently
3911 active queue.
3909 active queue.
3912
3910
3913 Returns 0 on success.
3911 Returns 0 on success.
3914 """
3912 """
3915 q = repo.mq
3913 q = repo.mq
3916 _defaultqueue = b'patches'
3914 _defaultqueue = b'patches'
3917 _allqueues = b'patches.queues'
3915 _allqueues = b'patches.queues'
3918 _activequeue = b'patches.queue'
3916 _activequeue = b'patches.queue'
3919
3917
3920 def _getcurrent():
3918 def _getcurrent():
3921 cur = os.path.basename(q.path)
3919 cur = os.path.basename(q.path)
3922 if cur.startswith(b'patches-'):
3920 if cur.startswith(b'patches-'):
3923 cur = cur[8:]
3921 cur = cur[8:]
3924 return cur
3922 return cur
3925
3923
3926 def _noqueues():
3924 def _noqueues():
3927 try:
3925 try:
3928 fh = repo.vfs(_allqueues, b'r')
3926 fh = repo.vfs(_allqueues, b'r')
3929 fh.close()
3927 fh.close()
3930 except IOError:
3928 except IOError:
3931 return True
3929 return True
3932
3930
3933 return False
3931 return False
3934
3932
3935 def _getqueues():
3933 def _getqueues():
3936 current = _getcurrent()
3934 current = _getcurrent()
3937
3935
3938 try:
3936 try:
3939 fh = repo.vfs(_allqueues, b'r')
3937 fh = repo.vfs(_allqueues, b'r')
3940 queues = [queue.strip() for queue in fh if queue.strip()]
3938 queues = [queue.strip() for queue in fh if queue.strip()]
3941 fh.close()
3939 fh.close()
3942 if current not in queues:
3940 if current not in queues:
3943 queues.append(current)
3941 queues.append(current)
3944 except IOError:
3942 except IOError:
3945 queues = [_defaultqueue]
3943 queues = [_defaultqueue]
3946
3944
3947 return sorted(queues)
3945 return sorted(queues)
3948
3946
3949 def _setactive(name):
3947 def _setactive(name):
3950 if q.applied:
3948 if q.applied:
3951 raise error.Abort(
3949 raise error.Abort(
3952 _(
3950 _(
3953 b'new queue created, but cannot make active '
3951 b'new queue created, but cannot make active '
3954 b'as patches are applied'
3952 b'as patches are applied'
3955 )
3953 )
3956 )
3954 )
3957 _setactivenocheck(name)
3955 _setactivenocheck(name)
3958
3956
3959 def _setactivenocheck(name):
3957 def _setactivenocheck(name):
3960 fh = repo.vfs(_activequeue, b'w')
3958 fh = repo.vfs(_activequeue, b'w')
3961 if name != b'patches':
3959 if name != b'patches':
3962 fh.write(name)
3960 fh.write(name)
3963 fh.close()
3961 fh.close()
3964
3962
3965 def _addqueue(name):
3963 def _addqueue(name):
3966 fh = repo.vfs(_allqueues, b'a')
3964 fh = repo.vfs(_allqueues, b'a')
3967 fh.write(b'%s\n' % (name,))
3965 fh.write(b'%s\n' % (name,))
3968 fh.close()
3966 fh.close()
3969
3967
3970 def _queuedir(name):
3968 def _queuedir(name):
3971 if name == b'patches':
3969 if name == b'patches':
3972 return repo.vfs.join(b'patches')
3970 return repo.vfs.join(b'patches')
3973 else:
3971 else:
3974 return repo.vfs.join(b'patches-' + name)
3972 return repo.vfs.join(b'patches-' + name)
3975
3973
3976 def _validname(name):
3974 def _validname(name):
3977 for n in name:
3975 for n in name:
3978 if n in b':\\/.':
3976 if n in b':\\/.':
3979 return False
3977 return False
3980 return True
3978 return True
3981
3979
3982 def _delete(name):
3980 def _delete(name):
3983 if name not in existing:
3981 if name not in existing:
3984 raise error.Abort(_(b'cannot delete queue that does not exist'))
3982 raise error.Abort(_(b'cannot delete queue that does not exist'))
3985
3983
3986 current = _getcurrent()
3984 current = _getcurrent()
3987
3985
3988 if name == current:
3986 if name == current:
3989 raise error.Abort(_(b'cannot delete currently active queue'))
3987 raise error.Abort(_(b'cannot delete currently active queue'))
3990
3988
3991 fh = repo.vfs(b'patches.queues.new', b'w')
3989 fh = repo.vfs(b'patches.queues.new', b'w')
3992 for queue in existing:
3990 for queue in existing:
3993 if queue == name:
3991 if queue == name:
3994 continue
3992 continue
3995 fh.write(b'%s\n' % (queue,))
3993 fh.write(b'%s\n' % (queue,))
3996 fh.close()
3994 fh.close()
3997 repo.vfs.rename(b'patches.queues.new', _allqueues)
3995 repo.vfs.rename(b'patches.queues.new', _allqueues)
3998
3996
3999 opts = pycompat.byteskwargs(opts)
3997 opts = pycompat.byteskwargs(opts)
4000 if not name or opts.get(b'list') or opts.get(b'active'):
3998 if not name or opts.get(b'list') or opts.get(b'active'):
4001 current = _getcurrent()
3999 current = _getcurrent()
4002 if opts.get(b'active'):
4000 if opts.get(b'active'):
4003 ui.write(b'%s\n' % (current,))
4001 ui.write(b'%s\n' % (current,))
4004 return
4002 return
4005 for queue in _getqueues():
4003 for queue in _getqueues():
4006 ui.write(b'%s' % (queue,))
4004 ui.write(b'%s' % (queue,))
4007 if queue == current and not ui.quiet:
4005 if queue == current and not ui.quiet:
4008 ui.write(_(b' (active)\n'))
4006 ui.write(_(b' (active)\n'))
4009 else:
4007 else:
4010 ui.write(b'\n')
4008 ui.write(b'\n')
4011 return
4009 return
4012
4010
4013 if not _validname(name):
4011 if not _validname(name):
4014 raise error.Abort(
4012 raise error.Abort(
4015 _(b'invalid queue name, may not contain the characters ":\\/."')
4013 _(b'invalid queue name, may not contain the characters ":\\/."')
4016 )
4014 )
4017
4015
4018 with repo.wlock():
4016 with repo.wlock():
4019 existing = _getqueues()
4017 existing = _getqueues()
4020
4018
4021 if opts.get(b'create'):
4019 if opts.get(b'create'):
4022 if name in existing:
4020 if name in existing:
4023 raise error.Abort(_(b'queue "%s" already exists') % name)
4021 raise error.Abort(_(b'queue "%s" already exists') % name)
4024 if _noqueues():
4022 if _noqueues():
4025 _addqueue(_defaultqueue)
4023 _addqueue(_defaultqueue)
4026 _addqueue(name)
4024 _addqueue(name)
4027 _setactive(name)
4025 _setactive(name)
4028 elif opts.get(b'rename'):
4026 elif opts.get(b'rename'):
4029 current = _getcurrent()
4027 current = _getcurrent()
4030 if name == current:
4028 if name == current:
4031 raise error.Abort(
4029 raise error.Abort(
4032 _(b'can\'t rename "%s" to its current name') % name
4030 _(b'can\'t rename "%s" to its current name') % name
4033 )
4031 )
4034 if name in existing:
4032 if name in existing:
4035 raise error.Abort(_(b'queue "%s" already exists') % name)
4033 raise error.Abort(_(b'queue "%s" already exists') % name)
4036
4034
4037 olddir = _queuedir(current)
4035 olddir = _queuedir(current)
4038 newdir = _queuedir(name)
4036 newdir = _queuedir(name)
4039
4037
4040 if os.path.exists(newdir):
4038 if os.path.exists(newdir):
4041 raise error.Abort(
4039 raise error.Abort(
4042 _(b'non-queue directory "%s" already exists') % newdir
4040 _(b'non-queue directory "%s" already exists') % newdir
4043 )
4041 )
4044
4042
4045 fh = repo.vfs(b'patches.queues.new', b'w')
4043 fh = repo.vfs(b'patches.queues.new', b'w')
4046 for queue in existing:
4044 for queue in existing:
4047 if queue == current:
4045 if queue == current:
4048 fh.write(b'%s\n' % (name,))
4046 fh.write(b'%s\n' % (name,))
4049 if os.path.exists(olddir):
4047 if os.path.exists(olddir):
4050 util.rename(olddir, newdir)
4048 util.rename(olddir, newdir)
4051 else:
4049 else:
4052 fh.write(b'%s\n' % (queue,))
4050 fh.write(b'%s\n' % (queue,))
4053 fh.close()
4051 fh.close()
4054 repo.vfs.rename(b'patches.queues.new', _allqueues)
4052 repo.vfs.rename(b'patches.queues.new', _allqueues)
4055 _setactivenocheck(name)
4053 _setactivenocheck(name)
4056 elif opts.get(b'delete'):
4054 elif opts.get(b'delete'):
4057 _delete(name)
4055 _delete(name)
4058 elif opts.get(b'purge'):
4056 elif opts.get(b'purge'):
4059 if name in existing:
4057 if name in existing:
4060 _delete(name)
4058 _delete(name)
4061 qdir = _queuedir(name)
4059 qdir = _queuedir(name)
4062 if os.path.exists(qdir):
4060 if os.path.exists(qdir):
4063 shutil.rmtree(qdir)
4061 shutil.rmtree(qdir)
4064 else:
4062 else:
4065 if name not in existing:
4063 if name not in existing:
4066 raise error.Abort(_(b'use --create to create a new queue'))
4064 raise error.Abort(_(b'use --create to create a new queue'))
4067 _setactive(name)
4065 _setactive(name)
4068
4066
4069
4067
4070 def mqphasedefaults(repo, roots):
4068 def mqphasedefaults(repo, roots):
4071 """callback used to set mq changeset as secret when no phase data exists"""
4069 """callback used to set mq changeset as secret when no phase data exists"""
4072 if repo.mq.applied:
4070 if repo.mq.applied:
4073 if repo.ui.configbool(b'mq', b'secret'):
4071 if repo.ui.configbool(b'mq', b'secret'):
4074 mqphase = phases.secret
4072 mqphase = phases.secret
4075 else:
4073 else:
4076 mqphase = phases.draft
4074 mqphase = phases.draft
4077 qbase = repo[repo.mq.applied[0].node]
4075 qbase = repo[repo.mq.applied[0].node]
4078 roots[mqphase].add(qbase.node())
4076 roots[mqphase].add(qbase.node())
4079 return roots
4077 return roots
4080
4078
4081
4079
4082 def reposetup(ui, repo):
4080 def reposetup(ui, repo):
4083 class mqrepo(repo.__class__):
4081 class mqrepo(repo.__class__):
4084 @localrepo.unfilteredpropertycache
4082 @localrepo.unfilteredpropertycache
4085 def mq(self):
4083 def mq(self):
4086 return queue(self.ui, self.baseui, self.path)
4084 return queue(self.ui, self.baseui, self.path)
4087
4085
4088 def invalidateall(self):
4086 def invalidateall(self):
4089 super(mqrepo, self).invalidateall()
4087 super(mqrepo, self).invalidateall()
4090 if localrepo.hasunfilteredcache(self, 'mq'):
4088 if localrepo.hasunfilteredcache(self, 'mq'):
4091 # recreate mq in case queue path was changed
4089 # recreate mq in case queue path was changed
4092 delattr(self.unfiltered(), 'mq')
4090 delattr(self.unfiltered(), 'mq')
4093
4091
4094 def abortifwdirpatched(self, errmsg, force=False):
4092 def abortifwdirpatched(self, errmsg, force=False):
4095 if self.mq.applied and self.mq.checkapplied and not force:
4093 if self.mq.applied and self.mq.checkapplied and not force:
4096 parents = self.dirstate.parents()
4094 parents = self.dirstate.parents()
4097 patches = [s.node for s in self.mq.applied]
4095 patches = [s.node for s in self.mq.applied]
4098 if any(p in patches for p in parents):
4096 if any(p in patches for p in parents):
4099 raise error.Abort(errmsg)
4097 raise error.Abort(errmsg)
4100
4098
4101 def commit(
4099 def commit(
4102 self,
4100 self,
4103 text=b"",
4101 text=b"",
4104 user=None,
4102 user=None,
4105 date=None,
4103 date=None,
4106 match=None,
4104 match=None,
4107 force=False,
4105 force=False,
4108 editor=False,
4106 editor=False,
4109 extra=None,
4107 extra=None,
4110 ):
4108 ):
4111 if extra is None:
4109 if extra is None:
4112 extra = {}
4110 extra = {}
4113 self.abortifwdirpatched(
4111 self.abortifwdirpatched(
4114 _(b'cannot commit over an applied mq patch'), force
4112 _(b'cannot commit over an applied mq patch'), force
4115 )
4113 )
4116
4114
4117 return super(mqrepo, self).commit(
4115 return super(mqrepo, self).commit(
4118 text, user, date, match, force, editor, extra
4116 text, user, date, match, force, editor, extra
4119 )
4117 )
4120
4118
4121 def checkpush(self, pushop):
4119 def checkpush(self, pushop):
4122 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4120 if self.mq.applied and self.mq.checkapplied and not pushop.force:
4123 outapplied = [e.node for e in self.mq.applied]
4121 outapplied = [e.node for e in self.mq.applied]
4124 if pushop.revs:
4122 if pushop.revs:
4125 # Assume applied patches have no non-patch descendants and
4123 # Assume applied patches have no non-patch descendants and
4126 # are not on remote already. Filtering any changeset not
4124 # are not on remote already. Filtering any changeset not
4127 # pushed.
4125 # pushed.
4128 heads = set(pushop.revs)
4126 heads = set(pushop.revs)
4129 for node in reversed(outapplied):
4127 for node in reversed(outapplied):
4130 if node in heads:
4128 if node in heads:
4131 break
4129 break
4132 else:
4130 else:
4133 outapplied.pop()
4131 outapplied.pop()
4134 # looking for pushed and shared changeset
4132 # looking for pushed and shared changeset
4135 for node in outapplied:
4133 for node in outapplied:
4136 if self[node].phase() < phases.secret:
4134 if self[node].phase() < phases.secret:
4137 raise error.Abort(_(b'source has mq patches applied'))
4135 raise error.Abort(_(b'source has mq patches applied'))
4138 # no non-secret patches pushed
4136 # no non-secret patches pushed
4139 super(mqrepo, self).checkpush(pushop)
4137 super(mqrepo, self).checkpush(pushop)
4140
4138
4141 def _findtags(self):
4139 def _findtags(self):
4142 '''augment tags from base class with patch tags'''
4140 '''augment tags from base class with patch tags'''
4143 result = super(mqrepo, self)._findtags()
4141 result = super(mqrepo, self)._findtags()
4144
4142
4145 q = self.mq
4143 q = self.mq
4146 if not q.applied:
4144 if not q.applied:
4147 return result
4145 return result
4148
4146
4149 mqtags = [(patch.node, patch.name) for patch in q.applied]
4147 mqtags = [(patch.node, patch.name) for patch in q.applied]
4150
4148
4151 try:
4149 try:
4152 # for now ignore filtering business
4150 # for now ignore filtering business
4153 self.unfiltered().changelog.rev(mqtags[-1][0])
4151 self.unfiltered().changelog.rev(mqtags[-1][0])
4154 except error.LookupError:
4152 except error.LookupError:
4155 self.ui.warn(
4153 self.ui.warn(
4156 _(b'mq status file refers to unknown node %s\n')
4154 _(b'mq status file refers to unknown node %s\n')
4157 % short(mqtags[-1][0])
4155 % short(mqtags[-1][0])
4158 )
4156 )
4159 return result
4157 return result
4160
4158
4161 # do not add fake tags for filtered revisions
4159 # do not add fake tags for filtered revisions
4162 included = self.changelog.hasnode
4160 included = self.changelog.hasnode
4163 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4161 mqtags = [mqt for mqt in mqtags if included(mqt[0])]
4164 if not mqtags:
4162 if not mqtags:
4165 return result
4163 return result
4166
4164
4167 mqtags.append((mqtags[-1][0], b'qtip'))
4165 mqtags.append((mqtags[-1][0], b'qtip'))
4168 mqtags.append((mqtags[0][0], b'qbase'))
4166 mqtags.append((mqtags[0][0], b'qbase'))
4169 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4167 mqtags.append((self.changelog.parents(mqtags[0][0])[0], b'qparent'))
4170 tags = result[0]
4168 tags = result[0]
4171 for patch in mqtags:
4169 for patch in mqtags:
4172 if patch[1] in tags:
4170 if patch[1] in tags:
4173 self.ui.warn(
4171 self.ui.warn(
4174 _(b'tag %s overrides mq patch of the same name\n')
4172 _(b'tag %s overrides mq patch of the same name\n')
4175 % patch[1]
4173 % patch[1]
4176 )
4174 )
4177 else:
4175 else:
4178 tags[patch[1]] = patch[0]
4176 tags[patch[1]] = patch[0]
4179
4177
4180 return result
4178 return result
4181
4179
4182 if repo.local():
4180 if repo.local():
4183 repo.__class__ = mqrepo
4181 repo.__class__ = mqrepo
4184
4182
4185 repo._phasedefaults.append(mqphasedefaults)
4183 repo._phasedefaults.append(mqphasedefaults)
4186
4184
4187
4185
4188 def mqimport(orig, ui, repo, *args, **kwargs):
4186 def mqimport(orig, ui, repo, *args, **kwargs):
4189 if hasattr(repo, 'abortifwdirpatched') and not kwargs.get(
4187 if hasattr(repo, 'abortifwdirpatched') and not kwargs.get(
4190 'no_commit', False
4188 'no_commit', False
4191 ):
4189 ):
4192 repo.abortifwdirpatched(
4190 repo.abortifwdirpatched(
4193 _(b'cannot import over an applied patch'), kwargs.get('force')
4191 _(b'cannot import over an applied patch'), kwargs.get('force')
4194 )
4192 )
4195 return orig(ui, repo, *args, **kwargs)
4193 return orig(ui, repo, *args, **kwargs)
4196
4194
4197
4195
4198 def mqinit(orig, ui, *args, **kwargs):
4196 def mqinit(orig, ui, *args, **kwargs):
4199 mq = kwargs.pop('mq', None)
4197 mq = kwargs.pop('mq', None)
4200
4198
4201 if not mq:
4199 if not mq:
4202 return orig(ui, *args, **kwargs)
4200 return orig(ui, *args, **kwargs)
4203
4201
4204 if args:
4202 if args:
4205 repopath = args[0]
4203 repopath = args[0]
4206 if not hg.islocal(repopath):
4204 if not hg.islocal(repopath):
4207 raise error.Abort(
4205 raise error.Abort(
4208 _(b'only a local queue repository may be initialized')
4206 _(b'only a local queue repository may be initialized')
4209 )
4207 )
4210 else:
4208 else:
4211 repopath = cmdutil.findrepo(encoding.getcwd())
4209 repopath = cmdutil.findrepo(encoding.getcwd())
4212 if not repopath:
4210 if not repopath:
4213 raise error.Abort(
4211 raise error.Abort(
4214 _(b'there is no Mercurial repository here (.hg not found)')
4212 _(b'there is no Mercurial repository here (.hg not found)')
4215 )
4213 )
4216 repo = hg.repository(ui, repopath)
4214 repo = hg.repository(ui, repopath)
4217 return qinit(ui, repo, True)
4215 return qinit(ui, repo, True)
4218
4216
4219
4217
4220 def mqcommand(orig, ui, repo, *args, **kwargs):
4218 def mqcommand(orig, ui, repo, *args, **kwargs):
4221 """Add --mq option to operate on patch repository instead of main"""
4219 """Add --mq option to operate on patch repository instead of main"""
4222
4220
4223 # some commands do not like getting unknown options
4221 # some commands do not like getting unknown options
4224 mq = kwargs.pop('mq', None)
4222 mq = kwargs.pop('mq', None)
4225
4223
4226 if not mq:
4224 if not mq:
4227 return orig(ui, repo, *args, **kwargs)
4225 return orig(ui, repo, *args, **kwargs)
4228
4226
4229 q = repo.mq
4227 q = repo.mq
4230 r = q.qrepo()
4228 r = q.qrepo()
4231 if not r:
4229 if not r:
4232 raise error.Abort(_(b'no queue repository'))
4230 raise error.Abort(_(b'no queue repository'))
4233 return orig(r.ui, r, *args, **kwargs)
4231 return orig(r.ui, r, *args, **kwargs)
4234
4232
4235
4233
4236 def summaryhook(ui, repo):
4234 def summaryhook(ui, repo):
4237 q = repo.mq
4235 q = repo.mq
4238 m = []
4236 m = []
4239 a, u = len(q.applied), len(q.unapplied(repo))
4237 a, u = len(q.applied), len(q.unapplied(repo))
4240 if a:
4238 if a:
4241 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4239 m.append(ui.label(_(b"%d applied"), b'qseries.applied') % a)
4242 if u:
4240 if u:
4243 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4241 m.append(ui.label(_(b"%d unapplied"), b'qseries.unapplied') % u)
4244 if m:
4242 if m:
4245 # i18n: column positioning for "hg summary"
4243 # i18n: column positioning for "hg summary"
4246 ui.write(_(b"mq: %s\n") % b', '.join(m))
4244 ui.write(_(b"mq: %s\n") % b', '.join(m))
4247 else:
4245 else:
4248 # i18n: column positioning for "hg summary"
4246 # i18n: column positioning for "hg summary"
4249 ui.note(_(b"mq: (empty queue)\n"))
4247 ui.note(_(b"mq: (empty queue)\n"))
4250
4248
4251
4249
4252 revsetpredicate = registrar.revsetpredicate()
4250 revsetpredicate = registrar.revsetpredicate()
4253
4251
4254
4252
4255 @revsetpredicate(b'mq()')
4253 @revsetpredicate(b'mq()')
4256 def revsetmq(repo, subset, x):
4254 def revsetmq(repo, subset, x):
4257 """Changesets managed by MQ."""
4255 """Changesets managed by MQ."""
4258 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4256 revsetlang.getargs(x, 0, 0, _(b"mq takes no arguments"))
4259 applied = {repo[r.node].rev() for r in repo.mq.applied}
4257 applied = {repo[r.node].rev() for r in repo.mq.applied}
4260 return smartset.baseset([r for r in subset if r in applied])
4258 return smartset.baseset([r for r in subset if r in applied])
4261
4259
4262
4260
4263 # tell hggettext to extract docstrings from these functions:
4261 # tell hggettext to extract docstrings from these functions:
4264 i18nfunctions = [revsetmq]
4262 i18nfunctions = [revsetmq]
4265
4263
4266
4264
4267 def extsetup(ui):
4265 def extsetup(ui):
4268 # Ensure mq wrappers are called first, regardless of extension load order by
4266 # Ensure mq wrappers are called first, regardless of extension load order by
4269 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4267 # NOT wrapping in uisetup() and instead deferring to init stage two here.
4270 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4268 mqopt = [(b'', b'mq', None, _(b"operate on patch repository"))]
4271
4269
4272 extensions.wrapcommand(commands.table, b'import', mqimport)
4270 extensions.wrapcommand(commands.table, b'import', mqimport)
4273 cmdutil.summaryhooks.add(b'mq', summaryhook)
4271 cmdutil.summaryhooks.add(b'mq', summaryhook)
4274
4272
4275 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4273 entry = extensions.wrapcommand(commands.table, b'init', mqinit)
4276 entry[1].extend(mqopt)
4274 entry[1].extend(mqopt)
4277
4275
4278 def dotable(cmdtable):
4276 def dotable(cmdtable):
4279 for cmd, entry in cmdtable.items():
4277 for cmd, entry in cmdtable.items():
4280 cmd = cmdutil.parsealiases(cmd)[0]
4278 cmd = cmdutil.parsealiases(cmd)[0]
4281 func = entry[0]
4279 func = entry[0]
4282 if func.norepo:
4280 if func.norepo:
4283 continue
4281 continue
4284 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4282 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
4285 entry[1].extend(mqopt)
4283 entry[1].extend(mqopt)
4286
4284
4287 dotable(commands.table)
4285 dotable(commands.table)
4288
4286
4289 thismodule = sys.modules["hgext.mq"]
4287 thismodule = sys.modules["hgext.mq"]
4290 for extname, extmodule in extensions.extensions():
4288 for extname, extmodule in extensions.extensions():
4291 if extmodule != thismodule:
4289 if extmodule != thismodule:
4292 dotable(getattr(extmodule, 'cmdtable', {}))
4290 dotable(getattr(extmodule, 'cmdtable', {}))
4293
4291
4294
4292
4295 colortable = {
4293 colortable = {
4296 b'qguard.negative': b'red',
4294 b'qguard.negative': b'red',
4297 b'qguard.positive': b'yellow',
4295 b'qguard.positive': b'yellow',
4298 b'qguard.unguarded': b'green',
4296 b'qguard.unguarded': b'green',
4299 b'qseries.applied': b'blue bold underline',
4297 b'qseries.applied': b'blue bold underline',
4300 b'qseries.guarded': b'black bold',
4298 b'qseries.guarded': b'black bold',
4301 b'qseries.missing': b'red bold',
4299 b'qseries.missing': b'red bold',
4302 b'qseries.unapplied': b'black bold',
4300 b'qseries.unapplied': b'black bold',
4303 }
4301 }
@@ -1,2401 +1,2400 b''
1 # phabricator.py - simple Phabricator integration
1 # phabricator.py - simple Phabricator integration
2 #
2 #
3 # Copyright 2017 Facebook, Inc.
3 # Copyright 2017 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 """simple Phabricator integration (EXPERIMENTAL)
7 """simple Phabricator integration (EXPERIMENTAL)
8
8
9 This extension provides a ``phabsend`` command which sends a stack of
9 This extension provides a ``phabsend`` command which sends a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 to update statuses in batch.
12 to update statuses in batch.
13
13
14 A "phabstatus" view for :hg:`show` is also provided; it displays status
14 A "phabstatus" view for :hg:`show` is also provided; it displays status
15 information of Phabricator differentials associated with unfinished
15 information of Phabricator differentials associated with unfinished
16 changesets.
16 changesets.
17
17
18 By default, Phabricator requires ``Test Plan`` which might prevent some
18 By default, Phabricator requires ``Test Plan`` which might prevent some
19 changeset from being sent. The requirement could be disabled by changing
19 changeset from being sent. The requirement could be disabled by changing
20 ``differential.require-test-plan-field`` config server side.
20 ``differential.require-test-plan-field`` config server side.
21
21
22 Config::
22 Config::
23
23
24 [phabricator]
24 [phabricator]
25 # Phabricator URL
25 # Phabricator URL
26 url = https://phab.example.com/
26 url = https://phab.example.com/
27
27
28 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
28 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
29 # callsign is "FOO".
29 # callsign is "FOO".
30 callsign = FOO
30 callsign = FOO
31
31
32 # curl command to use. If not set (default), use builtin HTTP library to
32 # curl command to use. If not set (default), use builtin HTTP library to
33 # communicate. If set, use the specified curl command. This could be useful
33 # communicate. If set, use the specified curl command. This could be useful
34 # if you need to specify advanced options that is not easily supported by
34 # if you need to specify advanced options that is not easily supported by
35 # the internal library.
35 # the internal library.
36 curlcmd = curl --connect-timeout 2 --retry 3 --silent
36 curlcmd = curl --connect-timeout 2 --retry 3 --silent
37
37
38 # retry failed command N time (default 0). Useful when using the extension
38 # retry failed command N time (default 0). Useful when using the extension
39 # over flakly connection.
39 # over flakly connection.
40 #
40 #
41 # We wait `retry.interval` between each retry, in seconds.
41 # We wait `retry.interval` between each retry, in seconds.
42 # (default 1 second).
42 # (default 1 second).
43 retry = 3
43 retry = 3
44 retry.interval = 10
44 retry.interval = 10
45
45
46 # the retry option can combine well with the http.timeout one.
46 # the retry option can combine well with the http.timeout one.
47 #
47 #
48 # For example to give up on http request after 20 seconds:
48 # For example to give up on http request after 20 seconds:
49 [http]
49 [http]
50 timeout=20
50 timeout=20
51
51
52 [auth]
52 [auth]
53 example.schemes = https
53 example.schemes = https
54 example.prefix = phab.example.com
54 example.prefix = phab.example.com
55
55
56 # API token. Get it from https://$HOST/conduit/login/
56 # API token. Get it from https://$HOST/conduit/login/
57 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
57 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
58 """
58 """
59
59
60
60
61 import base64
61 import base64
62 import contextlib
62 import contextlib
63 import hashlib
63 import hashlib
64 import io
64 import io
65 import itertools
65 import itertools
66 import json
66 import json
67 import mimetypes
67 import mimetypes
68 import operator
68 import operator
69 import re
69 import re
70 import time
70 import time
71
71
72 from mercurial.node import bin, short
72 from mercurial.node import bin, short
73 from mercurial.i18n import _
73 from mercurial.i18n import _
74 from mercurial.pycompat import getattr
75 from mercurial.thirdparty import attr
74 from mercurial.thirdparty import attr
76 from mercurial import (
75 from mercurial import (
77 cmdutil,
76 cmdutil,
78 context,
77 context,
79 copies,
78 copies,
80 encoding,
79 encoding,
81 error,
80 error,
82 exthelper,
81 exthelper,
83 graphmod,
82 graphmod,
84 httpconnection as httpconnectionmod,
83 httpconnection as httpconnectionmod,
85 localrepo,
84 localrepo,
86 logcmdutil,
85 logcmdutil,
87 match,
86 match,
88 mdiff,
87 mdiff,
89 obsutil,
88 obsutil,
90 parser,
89 parser,
91 patch,
90 patch,
92 phases,
91 phases,
93 pycompat,
92 pycompat,
94 rewriteutil,
93 rewriteutil,
95 scmutil,
94 scmutil,
96 smartset,
95 smartset,
97 tags,
96 tags,
98 templatefilters,
97 templatefilters,
99 templateutil,
98 templateutil,
100 url as urlmod,
99 url as urlmod,
101 util,
100 util,
102 )
101 )
103 from mercurial.utils import (
102 from mercurial.utils import (
104 procutil,
103 procutil,
105 stringutil,
104 stringutil,
106 urlutil,
105 urlutil,
107 )
106 )
108 from . import show
107 from . import show
109
108
110
109
111 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
110 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
112 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
111 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
113 # be specifying the version(s) of Mercurial they are tested with, or
112 # be specifying the version(s) of Mercurial they are tested with, or
114 # leave the attribute unspecified.
113 # leave the attribute unspecified.
115 testedwith = b'ships-with-hg-core'
114 testedwith = b'ships-with-hg-core'
116
115
117 eh = exthelper.exthelper()
116 eh = exthelper.exthelper()
118
117
119 cmdtable = eh.cmdtable
118 cmdtable = eh.cmdtable
120 command = eh.command
119 command = eh.command
121 configtable = eh.configtable
120 configtable = eh.configtable
122 templatekeyword = eh.templatekeyword
121 templatekeyword = eh.templatekeyword
123 uisetup = eh.finaluisetup
122 uisetup = eh.finaluisetup
124
123
125 # developer config: phabricator.batchsize
124 # developer config: phabricator.batchsize
126 eh.configitem(
125 eh.configitem(
127 b'phabricator',
126 b'phabricator',
128 b'batchsize',
127 b'batchsize',
129 default=12,
128 default=12,
130 )
129 )
131 eh.configitem(
130 eh.configitem(
132 b'phabricator',
131 b'phabricator',
133 b'callsign',
132 b'callsign',
134 default=None,
133 default=None,
135 )
134 )
136 eh.configitem(
135 eh.configitem(
137 b'phabricator',
136 b'phabricator',
138 b'curlcmd',
137 b'curlcmd',
139 default=None,
138 default=None,
140 )
139 )
141 # developer config: phabricator.debug
140 # developer config: phabricator.debug
142 eh.configitem(
141 eh.configitem(
143 b'phabricator',
142 b'phabricator',
144 b'debug',
143 b'debug',
145 default=False,
144 default=False,
146 )
145 )
147 # developer config: phabricator.repophid
146 # developer config: phabricator.repophid
148 eh.configitem(
147 eh.configitem(
149 b'phabricator',
148 b'phabricator',
150 b'repophid',
149 b'repophid',
151 default=None,
150 default=None,
152 )
151 )
153 eh.configitem(
152 eh.configitem(
154 b'phabricator',
153 b'phabricator',
155 b'retry',
154 b'retry',
156 default=0,
155 default=0,
157 )
156 )
158 eh.configitem(
157 eh.configitem(
159 b'phabricator',
158 b'phabricator',
160 b'retry.interval',
159 b'retry.interval',
161 default=1,
160 default=1,
162 )
161 )
163 eh.configitem(
162 eh.configitem(
164 b'phabricator',
163 b'phabricator',
165 b'url',
164 b'url',
166 default=None,
165 default=None,
167 )
166 )
168 eh.configitem(
167 eh.configitem(
169 b'phabsend',
168 b'phabsend',
170 b'confirm',
169 b'confirm',
171 default=False,
170 default=False,
172 )
171 )
173 eh.configitem(
172 eh.configitem(
174 b'phabimport',
173 b'phabimport',
175 b'secret',
174 b'secret',
176 default=False,
175 default=False,
177 )
176 )
178 eh.configitem(
177 eh.configitem(
179 b'phabimport',
178 b'phabimport',
180 b'obsolete',
179 b'obsolete',
181 default=False,
180 default=False,
182 )
181 )
183
182
184 colortable = {
183 colortable = {
185 b'phabricator.action.created': b'green',
184 b'phabricator.action.created': b'green',
186 b'phabricator.action.skipped': b'magenta',
185 b'phabricator.action.skipped': b'magenta',
187 b'phabricator.action.updated': b'magenta',
186 b'phabricator.action.updated': b'magenta',
188 b'phabricator.drev': b'bold',
187 b'phabricator.drev': b'bold',
189 b'phabricator.status.abandoned': b'magenta dim',
188 b'phabricator.status.abandoned': b'magenta dim',
190 b'phabricator.status.accepted': b'green bold',
189 b'phabricator.status.accepted': b'green bold',
191 b'phabricator.status.closed': b'green',
190 b'phabricator.status.closed': b'green',
192 b'phabricator.status.needsreview': b'yellow',
191 b'phabricator.status.needsreview': b'yellow',
193 b'phabricator.status.needsrevision': b'red',
192 b'phabricator.status.needsrevision': b'red',
194 b'phabricator.status.changesplanned': b'red',
193 b'phabricator.status.changesplanned': b'red',
195 }
194 }
196
195
197 _VCR_FLAGS = [
196 _VCR_FLAGS = [
198 (
197 (
199 b'',
198 b'',
200 b'test-vcr',
199 b'test-vcr',
201 b'',
200 b'',
202 _(
201 _(
203 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
202 b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
204 b', otherwise will mock all http requests using the specified vcr file.'
203 b', otherwise will mock all http requests using the specified vcr file.'
205 b' (ADVANCED)'
204 b' (ADVANCED)'
206 ),
205 ),
207 ),
206 ),
208 ]
207 ]
209
208
210
209
211 @eh.wrapfunction(localrepo, "loadhgrc")
210 @eh.wrapfunction(localrepo, "loadhgrc")
212 def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
211 def _loadhgrc(orig, ui, wdirvfs, hgvfs, requirements, *args, **opts):
213 """Load ``.arcconfig`` content into a ui instance on repository open."""
212 """Load ``.arcconfig`` content into a ui instance on repository open."""
214 result = False
213 result = False
215 arcconfig = {}
214 arcconfig = {}
216
215
217 try:
216 try:
218 # json.loads only accepts bytes from 3.6+
217 # json.loads only accepts bytes from 3.6+
219 rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
218 rawparams = encoding.unifromlocal(wdirvfs.read(b".arcconfig"))
220 # json.loads only returns unicode strings
219 # json.loads only returns unicode strings
221 arcconfig = pycompat.rapply(
220 arcconfig = pycompat.rapply(
222 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
221 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
223 pycompat.json_loads(rawparams),
222 pycompat.json_loads(rawparams),
224 )
223 )
225
224
226 result = True
225 result = True
227 except ValueError:
226 except ValueError:
228 ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
227 ui.warn(_(b"invalid JSON in %s\n") % wdirvfs.join(b".arcconfig"))
229 except IOError:
228 except IOError:
230 pass
229 pass
231
230
232 cfg = util.sortdict()
231 cfg = util.sortdict()
233
232
234 if b"repository.callsign" in arcconfig:
233 if b"repository.callsign" in arcconfig:
235 cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
234 cfg[(b"phabricator", b"callsign")] = arcconfig[b"repository.callsign"]
236
235
237 if b"phabricator.uri" in arcconfig:
236 if b"phabricator.uri" in arcconfig:
238 cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
237 cfg[(b"phabricator", b"url")] = arcconfig[b"phabricator.uri"]
239
238
240 if cfg:
239 if cfg:
241 ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
240 ui.applyconfig(cfg, source=wdirvfs.join(b".arcconfig"))
242
241
243 return (
242 return (
244 orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
243 orig(ui, wdirvfs, hgvfs, requirements, *args, **opts) or result
245 ) # Load .hg/hgrc
244 ) # Load .hg/hgrc
246
245
247
246
248 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
247 def vcrcommand(name, flags, spec, helpcategory=None, optionalrepo=False):
249 fullflags = flags + _VCR_FLAGS
248 fullflags = flags + _VCR_FLAGS
250
249
251 def hgmatcher(r1, r2):
250 def hgmatcher(r1, r2):
252 if r1.uri != r2.uri or r1.method != r2.method:
251 if r1.uri != r2.uri or r1.method != r2.method:
253 return False
252 return False
254 r1params = util.urlreq.parseqs(r1.body)
253 r1params = util.urlreq.parseqs(r1.body)
255 r2params = util.urlreq.parseqs(r2.body)
254 r2params = util.urlreq.parseqs(r2.body)
256 for key in r1params:
255 for key in r1params:
257 if key not in r2params:
256 if key not in r2params:
258 return False
257 return False
259 value = r1params[key][0]
258 value = r1params[key][0]
260 # we want to compare json payloads without worrying about ordering
259 # we want to compare json payloads without worrying about ordering
261 if value.startswith(b'{') and value.endswith(b'}'):
260 if value.startswith(b'{') and value.endswith(b'}'):
262 r1json = pycompat.json_loads(value)
261 r1json = pycompat.json_loads(value)
263 r2json = pycompat.json_loads(r2params[key][0])
262 r2json = pycompat.json_loads(r2params[key][0])
264 if r1json != r2json:
263 if r1json != r2json:
265 return False
264 return False
266 elif r2params[key][0] != value:
265 elif r2params[key][0] != value:
267 return False
266 return False
268 return True
267 return True
269
268
270 def sanitiserequest(request):
269 def sanitiserequest(request):
271 request.body = re.sub(
270 request.body = re.sub(
272 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
271 br'cli-[a-z0-9]+', br'cli-hahayouwish', request.body
273 )
272 )
274 return request
273 return request
275
274
276 def sanitiseresponse(response):
275 def sanitiseresponse(response):
277 if 'set-cookie' in response['headers']:
276 if 'set-cookie' in response['headers']:
278 del response['headers']['set-cookie']
277 del response['headers']['set-cookie']
279 return response
278 return response
280
279
281 def decorate(fn):
280 def decorate(fn):
282 def inner(*args, **kwargs):
281 def inner(*args, **kwargs):
283 vcr = kwargs.pop('test_vcr')
282 vcr = kwargs.pop('test_vcr')
284 if vcr:
283 if vcr:
285 cassette = pycompat.fsdecode(vcr)
284 cassette = pycompat.fsdecode(vcr)
286 import hgdemandimport
285 import hgdemandimport
287
286
288 with hgdemandimport.deactivated():
287 with hgdemandimport.deactivated():
289 # pytype: disable=import-error
288 # pytype: disable=import-error
290 import vcr as vcrmod
289 import vcr as vcrmod
291 import vcr.stubs as stubs
290 import vcr.stubs as stubs
292
291
293 # pytype: enable=import-error
292 # pytype: enable=import-error
294
293
295 vcr = vcrmod.VCR(
294 vcr = vcrmod.VCR(
296 serializer='json',
295 serializer='json',
297 before_record_request=sanitiserequest,
296 before_record_request=sanitiserequest,
298 before_record_response=sanitiseresponse,
297 before_record_response=sanitiseresponse,
299 custom_patches=[
298 custom_patches=[
300 (
299 (
301 urlmod,
300 urlmod,
302 'httpconnection',
301 'httpconnection',
303 stubs.VCRHTTPConnection,
302 stubs.VCRHTTPConnection,
304 ),
303 ),
305 (
304 (
306 urlmod,
305 urlmod,
307 'httpsconnection',
306 'httpsconnection',
308 stubs.VCRHTTPSConnection,
307 stubs.VCRHTTPSConnection,
309 ),
308 ),
310 ],
309 ],
311 )
310 )
312 vcr.register_matcher('hgmatcher', hgmatcher)
311 vcr.register_matcher('hgmatcher', hgmatcher)
313 with vcr.use_cassette(cassette, match_on=['hgmatcher']):
312 with vcr.use_cassette(cassette, match_on=['hgmatcher']):
314 return fn(*args, **kwargs)
313 return fn(*args, **kwargs)
315 return fn(*args, **kwargs)
314 return fn(*args, **kwargs)
316
315
317 cmd = util.checksignature(inner, depth=2)
316 cmd = util.checksignature(inner, depth=2)
318 cmd.__name__ = fn.__name__
317 cmd.__name__ = fn.__name__
319 cmd.__doc__ = fn.__doc__
318 cmd.__doc__ = fn.__doc__
320
319
321 return command(
320 return command(
322 name,
321 name,
323 fullflags,
322 fullflags,
324 spec,
323 spec,
325 helpcategory=helpcategory,
324 helpcategory=helpcategory,
326 optionalrepo=optionalrepo,
325 optionalrepo=optionalrepo,
327 )(cmd)
326 )(cmd)
328
327
329 return decorate
328 return decorate
330
329
331
330
332 def _debug(ui, *msg, **opts):
331 def _debug(ui, *msg, **opts):
333 """write debug output for Phabricator if ``phabricator.debug`` is set
332 """write debug output for Phabricator if ``phabricator.debug`` is set
334
333
335 Specifically, this avoids dumping Conduit and HTTP auth chatter that is
334 Specifically, this avoids dumping Conduit and HTTP auth chatter that is
336 printed with the --debug argument.
335 printed with the --debug argument.
337 """
336 """
338 if ui.configbool(b"phabricator", b"debug"):
337 if ui.configbool(b"phabricator", b"debug"):
339 flag = ui.debugflag
338 flag = ui.debugflag
340 try:
339 try:
341 ui.debugflag = True
340 ui.debugflag = True
342 ui.write(*msg, **opts)
341 ui.write(*msg, **opts)
343 finally:
342 finally:
344 ui.debugflag = flag
343 ui.debugflag = flag
345
344
346
345
347 def urlencodenested(params):
346 def urlencodenested(params):
348 """like urlencode, but works with nested parameters.
347 """like urlencode, but works with nested parameters.
349
348
350 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
349 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
351 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
350 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
352 urlencode. Note: the encoding is consistent with PHP's http_build_query.
351 urlencode. Note: the encoding is consistent with PHP's http_build_query.
353 """
352 """
354 flatparams = util.sortdict()
353 flatparams = util.sortdict()
355
354
356 def process(prefix: bytes, obj):
355 def process(prefix: bytes, obj):
357 if isinstance(obj, bool):
356 if isinstance(obj, bool):
358 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
357 obj = {True: b'true', False: b'false'}[obj] # Python -> PHP form
359 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
358 lister = lambda l: [(b'%d' % k, v) for k, v in enumerate(l)]
360 # .items() will only be called for a dict type
359 # .items() will only be called for a dict type
361 # pytype: disable=attribute-error
360 # pytype: disable=attribute-error
362 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
361 items = {list: lister, dict: lambda x: x.items()}.get(type(obj))
363 # pytype: enable=attribute-error
362 # pytype: enable=attribute-error
364 if items is None:
363 if items is None:
365 flatparams[prefix] = obj
364 flatparams[prefix] = obj
366 else:
365 else:
367 for k, v in items(obj):
366 for k, v in items(obj):
368 if prefix:
367 if prefix:
369 process(b'%s[%s]' % (prefix, k), v)
368 process(b'%s[%s]' % (prefix, k), v)
370 else:
369 else:
371 process(k, v)
370 process(k, v)
372
371
373 process(b'', params)
372 process(b'', params)
374 return urlutil.urlreq.urlencode(flatparams)
373 return urlutil.urlreq.urlencode(flatparams)
375
374
376
375
377 def readurltoken(ui):
376 def readurltoken(ui):
378 """return conduit url, token and make sure they exist
377 """return conduit url, token and make sure they exist
379
378
380 Currently read from [auth] config section. In the future, it might
379 Currently read from [auth] config section. In the future, it might
381 make sense to read from .arcconfig and .arcrc as well.
380 make sense to read from .arcconfig and .arcrc as well.
382 """
381 """
383 url = ui.config(b'phabricator', b'url')
382 url = ui.config(b'phabricator', b'url')
384 if not url:
383 if not url:
385 raise error.Abort(
384 raise error.Abort(
386 _(b'config %s.%s is required') % (b'phabricator', b'url')
385 _(b'config %s.%s is required') % (b'phabricator', b'url')
387 )
386 )
388
387
389 res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
388 res = httpconnectionmod.readauthforuri(ui, url, urlutil.url(url).user)
390 token = None
389 token = None
391
390
392 if res:
391 if res:
393 group, auth = res
392 group, auth = res
394
393
395 ui.debug(b"using auth.%s.* for authentication\n" % group)
394 ui.debug(b"using auth.%s.* for authentication\n" % group)
396
395
397 token = auth.get(b'phabtoken')
396 token = auth.get(b'phabtoken')
398
397
399 if not token:
398 if not token:
400 raise error.Abort(
399 raise error.Abort(
401 _(b'Can\'t find conduit token associated to %s') % (url,)
400 _(b'Can\'t find conduit token associated to %s') % (url,)
402 )
401 )
403
402
404 return url, token
403 return url, token
405
404
406
405
407 def callconduit(ui, name, params):
406 def callconduit(ui, name, params):
408 """call Conduit API, params is a dict. return json.loads result, or None"""
407 """call Conduit API, params is a dict. return json.loads result, or None"""
409 host, token = readurltoken(ui)
408 host, token = readurltoken(ui)
410 url, authinfo = urlutil.url(b'/'.join([host, b'api', name])).authinfo()
409 url, authinfo = urlutil.url(b'/'.join([host, b'api', name])).authinfo()
411 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
410 ui.debug(b'Conduit Call: %s %s\n' % (url, pycompat.byterepr(params)))
412 params = params.copy()
411 params = params.copy()
413 params[b'__conduit__'] = {
412 params[b'__conduit__'] = {
414 b'token': token,
413 b'token': token,
415 }
414 }
416 rawdata = {
415 rawdata = {
417 b'params': templatefilters.json(params),
416 b'params': templatefilters.json(params),
418 b'output': b'json',
417 b'output': b'json',
419 b'__conduit__': 1,
418 b'__conduit__': 1,
420 }
419 }
421 data = urlencodenested(rawdata)
420 data = urlencodenested(rawdata)
422 curlcmd = ui.config(b'phabricator', b'curlcmd')
421 curlcmd = ui.config(b'phabricator', b'curlcmd')
423 if curlcmd:
422 if curlcmd:
424 sin, sout = procutil.popen2(
423 sin, sout = procutil.popen2(
425 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
424 b'%s -d @- %s' % (curlcmd, procutil.shellquote(url))
426 )
425 )
427 sin.write(data)
426 sin.write(data)
428 sin.close()
427 sin.close()
429 body = sout.read()
428 body = sout.read()
430 else:
429 else:
431 urlopener = urlmod.opener(ui, authinfo)
430 urlopener = urlmod.opener(ui, authinfo)
432 request = util.urlreq.request(pycompat.strurl(url), data=data)
431 request = util.urlreq.request(pycompat.strurl(url), data=data)
433 max_try = ui.configint(b'phabricator', b'retry') + 1
432 max_try = ui.configint(b'phabricator', b'retry') + 1
434 timeout = ui.configwith(float, b'http', b'timeout')
433 timeout = ui.configwith(float, b'http', b'timeout')
435 for try_count in range(max_try):
434 for try_count in range(max_try):
436 try:
435 try:
437 with contextlib.closing(
436 with contextlib.closing(
438 urlopener.open(request, timeout=timeout)
437 urlopener.open(request, timeout=timeout)
439 ) as rsp:
438 ) as rsp:
440 body = rsp.read()
439 body = rsp.read()
441 break
440 break
442 except util.urlerr.urlerror as err:
441 except util.urlerr.urlerror as err:
443 if try_count == max_try - 1:
442 if try_count == max_try - 1:
444 raise
443 raise
445 ui.debug(
444 ui.debug(
446 b'Conduit Request failed (try %d/%d): %r\n'
445 b'Conduit Request failed (try %d/%d): %r\n'
447 % (try_count + 1, max_try, err)
446 % (try_count + 1, max_try, err)
448 )
447 )
449 # failing request might come from overloaded server
448 # failing request might come from overloaded server
450 retry_interval = ui.configint(b'phabricator', b'retry.interval')
449 retry_interval = ui.configint(b'phabricator', b'retry.interval')
451 time.sleep(retry_interval)
450 time.sleep(retry_interval)
452 ui.debug(b'Conduit Response: %s\n' % body)
451 ui.debug(b'Conduit Response: %s\n' % body)
453 parsed = pycompat.rapply(
452 parsed = pycompat.rapply(
454 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
453 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
455 # json.loads only accepts bytes from py3.6+
454 # json.loads only accepts bytes from py3.6+
456 pycompat.json_loads(encoding.unifromlocal(body)),
455 pycompat.json_loads(encoding.unifromlocal(body)),
457 )
456 )
458 if parsed.get(b'error_code'):
457 if parsed.get(b'error_code'):
459 msg = _(b'Conduit Error (%s): %s') % (
458 msg = _(b'Conduit Error (%s): %s') % (
460 parsed[b'error_code'],
459 parsed[b'error_code'],
461 parsed[b'error_info'],
460 parsed[b'error_info'],
462 )
461 )
463 raise error.Abort(msg)
462 raise error.Abort(msg)
464 return parsed[b'result']
463 return parsed[b'result']
465
464
466
465
467 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
466 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'), optionalrepo=True)
468 def debugcallconduit(ui, repo, name):
467 def debugcallconduit(ui, repo, name):
469 """call Conduit API
468 """call Conduit API
470
469
471 Call parameters are read from stdin as a JSON blob. Result will be written
470 Call parameters are read from stdin as a JSON blob. Result will be written
472 to stdout as a JSON blob.
471 to stdout as a JSON blob.
473 """
472 """
474 # json.loads only accepts bytes from 3.6+
473 # json.loads only accepts bytes from 3.6+
475 rawparams = encoding.unifromlocal(ui.fin.read())
474 rawparams = encoding.unifromlocal(ui.fin.read())
476 # json.loads only returns unicode strings
475 # json.loads only returns unicode strings
477 params = pycompat.rapply(
476 params = pycompat.rapply(
478 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
477 lambda x: encoding.unitolocal(x) if isinstance(x, str) else x,
479 pycompat.json_loads(rawparams),
478 pycompat.json_loads(rawparams),
480 )
479 )
481 # json.dumps only accepts unicode strings
480 # json.dumps only accepts unicode strings
482 result = pycompat.rapply(
481 result = pycompat.rapply(
483 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
482 lambda x: encoding.unifromlocal(x) if isinstance(x, bytes) else x,
484 callconduit(ui, name, params),
483 callconduit(ui, name, params),
485 )
484 )
486 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
485 s = json.dumps(result, sort_keys=True, indent=2, separators=(u',', u': '))
487 ui.write(b'%s\n' % encoding.unitolocal(s))
486 ui.write(b'%s\n' % encoding.unitolocal(s))
488
487
489
488
490 def getrepophid(repo):
489 def getrepophid(repo):
491 """given callsign, return repository PHID or None"""
490 """given callsign, return repository PHID or None"""
492 # developer config: phabricator.repophid
491 # developer config: phabricator.repophid
493 repophid = repo.ui.config(b'phabricator', b'repophid')
492 repophid = repo.ui.config(b'phabricator', b'repophid')
494 if repophid:
493 if repophid:
495 return repophid
494 return repophid
496 callsign = repo.ui.config(b'phabricator', b'callsign')
495 callsign = repo.ui.config(b'phabricator', b'callsign')
497 if not callsign:
496 if not callsign:
498 return None
497 return None
499 query = callconduit(
498 query = callconduit(
500 repo.ui,
499 repo.ui,
501 b'diffusion.repository.search',
500 b'diffusion.repository.search',
502 {b'constraints': {b'callsigns': [callsign]}},
501 {b'constraints': {b'callsigns': [callsign]}},
503 )
502 )
504 if len(query[b'data']) == 0:
503 if len(query[b'data']) == 0:
505 return None
504 return None
506 repophid = query[b'data'][0][b'phid']
505 repophid = query[b'data'][0][b'phid']
507 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
506 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
508 return repophid
507 return repophid
509
508
510
509
511 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
510 _differentialrevisiontagre = re.compile(br'\AD([1-9][0-9]*)\Z')
512 _differentialrevisiondescre = re.compile(
511 _differentialrevisiondescre = re.compile(
513 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
512 br'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M
514 )
513 )
515
514
516
515
517 def getoldnodedrevmap(repo, nodelist):
516 def getoldnodedrevmap(repo, nodelist):
518 """find previous nodes that has been sent to Phabricator
517 """find previous nodes that has been sent to Phabricator
519
518
520 return {node: (oldnode, Differential diff, Differential Revision ID)}
519 return {node: (oldnode, Differential diff, Differential Revision ID)}
521 for node in nodelist with known previous sent versions, or associated
520 for node in nodelist with known previous sent versions, or associated
522 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
521 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
523 be ``None``.
522 be ``None``.
524
523
525 Examines commit messages like "Differential Revision:" to get the
524 Examines commit messages like "Differential Revision:" to get the
526 association information.
525 association information.
527
526
528 If such commit message line is not found, examines all precursors and their
527 If such commit message line is not found, examines all precursors and their
529 tags. Tags with format like "D1234" are considered a match and the node
528 tags. Tags with format like "D1234" are considered a match and the node
530 with that tag, and the number after "D" (ex. 1234) will be returned.
529 with that tag, and the number after "D" (ex. 1234) will be returned.
531
530
532 The ``old node``, if not None, is guaranteed to be the last diff of
531 The ``old node``, if not None, is guaranteed to be the last diff of
533 corresponding Differential Revision, and exist in the repo.
532 corresponding Differential Revision, and exist in the repo.
534 """
533 """
535 unfi = repo.unfiltered()
534 unfi = repo.unfiltered()
536 has_node = unfi.changelog.index.has_node
535 has_node = unfi.changelog.index.has_node
537
536
538 result = {} # {node: (oldnode?, lastdiff?, drev)}
537 result = {} # {node: (oldnode?, lastdiff?, drev)}
539 # ordered for test stability when printing new -> old mapping below
538 # ordered for test stability when printing new -> old mapping below
540 toconfirm = util.sortdict() # {node: (force, {precnode}, drev)}
539 toconfirm = util.sortdict() # {node: (force, {precnode}, drev)}
541 for node in nodelist:
540 for node in nodelist:
542 ctx = unfi[node]
541 ctx = unfi[node]
543 # For tags like "D123", put them into "toconfirm" to verify later
542 # For tags like "D123", put them into "toconfirm" to verify later
544 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
543 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
545 for n in precnodes:
544 for n in precnodes:
546 if has_node(n):
545 if has_node(n):
547 for tag in unfi.nodetags(n):
546 for tag in unfi.nodetags(n):
548 m = _differentialrevisiontagre.match(tag)
547 m = _differentialrevisiontagre.match(tag)
549 if m:
548 if m:
550 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
549 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
551 break
550 break
552 else:
551 else:
553 continue # move to next predecessor
552 continue # move to next predecessor
554 break # found a tag, stop
553 break # found a tag, stop
555 else:
554 else:
556 # Check commit message
555 # Check commit message
557 m = _differentialrevisiondescre.search(ctx.description())
556 m = _differentialrevisiondescre.search(ctx.description())
558 if m:
557 if m:
559 toconfirm[node] = (1, set(precnodes), int(m.group('id')))
558 toconfirm[node] = (1, set(precnodes), int(m.group('id')))
560
559
561 # Double check if tags are genuine by collecting all old nodes from
560 # Double check if tags are genuine by collecting all old nodes from
562 # Phabricator, and expect precursors overlap with it.
561 # Phabricator, and expect precursors overlap with it.
563 if toconfirm:
562 if toconfirm:
564 drevs = [drev for force, precs, drev in toconfirm.values()]
563 drevs = [drev for force, precs, drev in toconfirm.values()]
565 alldiffs = callconduit(
564 alldiffs = callconduit(
566 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
565 unfi.ui, b'differential.querydiffs', {b'revisionIDs': drevs}
567 )
566 )
568
567
569 def getnodes(d, precset):
568 def getnodes(d, precset):
570 # Ignore other nodes that were combined into the Differential
569 # Ignore other nodes that were combined into the Differential
571 # that aren't predecessors of the current local node.
570 # that aren't predecessors of the current local node.
572 return [n for n in getlocalcommits(d) if n in precset]
571 return [n for n in getlocalcommits(d) if n in precset]
573
572
574 for newnode, (force, precset, drev) in toconfirm.items():
573 for newnode, (force, precset, drev) in toconfirm.items():
575 diffs = [
574 diffs = [
576 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
575 d for d in alldiffs.values() if int(d[b'revisionID']) == drev
577 ]
576 ]
578
577
579 # local predecessors known by Phabricator
578 # local predecessors known by Phabricator
580 phprecset = {n for d in diffs for n in getnodes(d, precset)}
579 phprecset = {n for d in diffs for n in getnodes(d, precset)}
581
580
582 # Ignore if precursors (Phabricator and local repo) do not overlap,
581 # Ignore if precursors (Phabricator and local repo) do not overlap,
583 # and force is not set (when commit message says nothing)
582 # and force is not set (when commit message says nothing)
584 if not force and not phprecset:
583 if not force and not phprecset:
585 tagname = b'D%d' % drev
584 tagname = b'D%d' % drev
586 tags.tag(
585 tags.tag(
587 repo,
586 repo,
588 tagname,
587 tagname,
589 repo.nullid,
588 repo.nullid,
590 message=None,
589 message=None,
591 user=None,
590 user=None,
592 date=None,
591 date=None,
593 local=True,
592 local=True,
594 )
593 )
595 unfi.ui.warn(
594 unfi.ui.warn(
596 _(
595 _(
597 b'D%d: local tag removed - does not match '
596 b'D%d: local tag removed - does not match '
598 b'Differential history\n'
597 b'Differential history\n'
599 )
598 )
600 % drev
599 % drev
601 )
600 )
602 continue
601 continue
603
602
604 # Find the last node using Phabricator metadata, and make sure it
603 # Find the last node using Phabricator metadata, and make sure it
605 # exists in the repo
604 # exists in the repo
606 oldnode = lastdiff = None
605 oldnode = lastdiff = None
607 if diffs:
606 if diffs:
608 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
607 lastdiff = max(diffs, key=lambda d: int(d[b'id']))
609 oldnodes = getnodes(lastdiff, precset)
608 oldnodes = getnodes(lastdiff, precset)
610
609
611 _debug(
610 _debug(
612 unfi.ui,
611 unfi.ui,
613 b"%s mapped to old nodes %s\n"
612 b"%s mapped to old nodes %s\n"
614 % (
613 % (
615 short(newnode),
614 short(newnode),
616 stringutil.pprint([short(n) for n in sorted(oldnodes)]),
615 stringutil.pprint([short(n) for n in sorted(oldnodes)]),
617 ),
616 ),
618 )
617 )
619
618
620 # If this commit was the result of `hg fold` after submission,
619 # If this commit was the result of `hg fold` after submission,
621 # and now resubmitted with --fold, the easiest thing to do is
620 # and now resubmitted with --fold, the easiest thing to do is
622 # to leave the node clear. This only results in creating a new
621 # to leave the node clear. This only results in creating a new
623 # diff for the _same_ Differential Revision if this commit is
622 # diff for the _same_ Differential Revision if this commit is
624 # the first or last in the selected range. If we picked a node
623 # the first or last in the selected range. If we picked a node
625 # from the list instead, it would have to be the lowest if at
624 # from the list instead, it would have to be the lowest if at
626 # the beginning of the --fold range, or the highest at the end.
625 # the beginning of the --fold range, or the highest at the end.
627 # Otherwise, one or more of the nodes wouldn't be considered in
626 # Otherwise, one or more of the nodes wouldn't be considered in
628 # the diff, and the Differential wouldn't be properly updated.
627 # the diff, and the Differential wouldn't be properly updated.
629 # If this commit is the result of `hg split` in the same
628 # If this commit is the result of `hg split` in the same
630 # scenario, there is a single oldnode here (and multiple
629 # scenario, there is a single oldnode here (and multiple
631 # newnodes mapped to it). That makes it the same as the normal
630 # newnodes mapped to it). That makes it the same as the normal
632 # case, as the edges of the newnode range cleanly maps to one
631 # case, as the edges of the newnode range cleanly maps to one
633 # oldnode each.
632 # oldnode each.
634 if len(oldnodes) == 1:
633 if len(oldnodes) == 1:
635 oldnode = oldnodes[0]
634 oldnode = oldnodes[0]
636 if oldnode and not has_node(oldnode):
635 if oldnode and not has_node(oldnode):
637 oldnode = None
636 oldnode = None
638
637
639 result[newnode] = (oldnode, lastdiff, drev)
638 result[newnode] = (oldnode, lastdiff, drev)
640
639
641 return result
640 return result
642
641
643
642
644 def getdrevmap(repo, revs):
643 def getdrevmap(repo, revs):
645 """Return a dict mapping each rev in `revs` to their Differential Revision
644 """Return a dict mapping each rev in `revs` to their Differential Revision
646 ID or None.
645 ID or None.
647 """
646 """
648 result = {}
647 result = {}
649 for rev in revs:
648 for rev in revs:
650 result[rev] = None
649 result[rev] = None
651 ctx = repo[rev]
650 ctx = repo[rev]
652 # Check commit message
651 # Check commit message
653 m = _differentialrevisiondescre.search(ctx.description())
652 m = _differentialrevisiondescre.search(ctx.description())
654 if m:
653 if m:
655 result[rev] = int(m.group('id'))
654 result[rev] = int(m.group('id'))
656 continue
655 continue
657 # Check tags
656 # Check tags
658 for tag in repo.nodetags(ctx.node()):
657 for tag in repo.nodetags(ctx.node()):
659 m = _differentialrevisiontagre.match(tag)
658 m = _differentialrevisiontagre.match(tag)
660 if m:
659 if m:
661 result[rev] = int(m.group(1))
660 result[rev] = int(m.group(1))
662 break
661 break
663
662
664 return result
663 return result
665
664
666
665
667 def getdiff(basectx, ctx, diffopts):
666 def getdiff(basectx, ctx, diffopts):
668 """plain-text diff without header (user, commit message, etc)"""
667 """plain-text diff without header (user, commit message, etc)"""
669 output = util.stringio()
668 output = util.stringio()
670 for chunk, _label in patch.diffui(
669 for chunk, _label in patch.diffui(
671 ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts
670 ctx.repo(), basectx.p1().node(), ctx.node(), None, opts=diffopts
672 ):
671 ):
673 output.write(chunk)
672 output.write(chunk)
674 return output.getvalue()
673 return output.getvalue()
675
674
676
675
677 class DiffChangeType:
676 class DiffChangeType:
678 ADD = 1
677 ADD = 1
679 CHANGE = 2
678 CHANGE = 2
680 DELETE = 3
679 DELETE = 3
681 MOVE_AWAY = 4
680 MOVE_AWAY = 4
682 COPY_AWAY = 5
681 COPY_AWAY = 5
683 MOVE_HERE = 6
682 MOVE_HERE = 6
684 COPY_HERE = 7
683 COPY_HERE = 7
685 MULTICOPY = 8
684 MULTICOPY = 8
686
685
687
686
688 class DiffFileType:
687 class DiffFileType:
689 TEXT = 1
688 TEXT = 1
690 IMAGE = 2
689 IMAGE = 2
691 BINARY = 3
690 BINARY = 3
692
691
693
692
694 @attr.s
693 @attr.s
695 class phabhunk(dict):
694 class phabhunk(dict):
696 """Represents a Differential hunk, which is owned by a Differential change"""
695 """Represents a Differential hunk, which is owned by a Differential change"""
697
696
698 oldOffset = attr.ib(default=0) # camelcase-required
697 oldOffset = attr.ib(default=0) # camelcase-required
699 oldLength = attr.ib(default=0) # camelcase-required
698 oldLength = attr.ib(default=0) # camelcase-required
700 newOffset = attr.ib(default=0) # camelcase-required
699 newOffset = attr.ib(default=0) # camelcase-required
701 newLength = attr.ib(default=0) # camelcase-required
700 newLength = attr.ib(default=0) # camelcase-required
702 corpus = attr.ib(default='')
701 corpus = attr.ib(default='')
703 # These get added to the phabchange's equivalents
702 # These get added to the phabchange's equivalents
704 addLines = attr.ib(default=0) # camelcase-required
703 addLines = attr.ib(default=0) # camelcase-required
705 delLines = attr.ib(default=0) # camelcase-required
704 delLines = attr.ib(default=0) # camelcase-required
706
705
707
706
708 @attr.s
707 @attr.s
709 class phabchange:
708 class phabchange:
710 """Represents a Differential change, owns Differential hunks and owned by a
709 """Represents a Differential change, owns Differential hunks and owned by a
711 Differential diff. Each one represents one file in a diff.
710 Differential diff. Each one represents one file in a diff.
712 """
711 """
713
712
714 currentPath = attr.ib(default=None) # camelcase-required
713 currentPath = attr.ib(default=None) # camelcase-required
715 oldPath = attr.ib(default=None) # camelcase-required
714 oldPath = attr.ib(default=None) # camelcase-required
716 awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required
715 awayPaths = attr.ib(default=attr.Factory(list)) # camelcase-required
717 metadata = attr.ib(default=attr.Factory(dict))
716 metadata = attr.ib(default=attr.Factory(dict))
718 oldProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
717 oldProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
719 newProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
718 newProperties = attr.ib(default=attr.Factory(dict)) # camelcase-required
720 type = attr.ib(default=DiffChangeType.CHANGE)
719 type = attr.ib(default=DiffChangeType.CHANGE)
721 fileType = attr.ib(default=DiffFileType.TEXT) # camelcase-required
720 fileType = attr.ib(default=DiffFileType.TEXT) # camelcase-required
722 commitHash = attr.ib(default=None) # camelcase-required
721 commitHash = attr.ib(default=None) # camelcase-required
723 addLines = attr.ib(default=0) # camelcase-required
722 addLines = attr.ib(default=0) # camelcase-required
724 delLines = attr.ib(default=0) # camelcase-required
723 delLines = attr.ib(default=0) # camelcase-required
725 hunks = attr.ib(default=attr.Factory(list))
724 hunks = attr.ib(default=attr.Factory(list))
726
725
727 def copynewmetadatatoold(self):
726 def copynewmetadatatoold(self):
728 for key in list(self.metadata.keys()):
727 for key in list(self.metadata.keys()):
729 newkey = key.replace(b'new:', b'old:')
728 newkey = key.replace(b'new:', b'old:')
730 self.metadata[newkey] = self.metadata[key]
729 self.metadata[newkey] = self.metadata[key]
731
730
732 def addoldmode(self, value):
731 def addoldmode(self, value):
733 self.oldProperties[b'unix:filemode'] = value
732 self.oldProperties[b'unix:filemode'] = value
734
733
735 def addnewmode(self, value):
734 def addnewmode(self, value):
736 self.newProperties[b'unix:filemode'] = value
735 self.newProperties[b'unix:filemode'] = value
737
736
738 def addhunk(self, hunk):
737 def addhunk(self, hunk):
739 if not isinstance(hunk, phabhunk):
738 if not isinstance(hunk, phabhunk):
740 raise error.Abort(b'phabchange.addhunk only takes phabhunks')
739 raise error.Abort(b'phabchange.addhunk only takes phabhunks')
741 self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
740 self.hunks.append(pycompat.byteskwargs(attr.asdict(hunk)))
742 # It's useful to include these stats since the Phab web UI shows them,
741 # It's useful to include these stats since the Phab web UI shows them,
743 # and uses them to estimate how large a change a Revision is. Also used
742 # and uses them to estimate how large a change a Revision is. Also used
744 # in email subjects for the [+++--] bit.
743 # in email subjects for the [+++--] bit.
745 self.addLines += hunk.addLines
744 self.addLines += hunk.addLines
746 self.delLines += hunk.delLines
745 self.delLines += hunk.delLines
747
746
748
747
749 @attr.s
748 @attr.s
750 class phabdiff:
749 class phabdiff:
751 """Represents a Differential diff, owns Differential changes. Corresponds
750 """Represents a Differential diff, owns Differential changes. Corresponds
752 to a commit.
751 to a commit.
753 """
752 """
754
753
755 # Doesn't seem to be any reason to send this (output of uname -n)
754 # Doesn't seem to be any reason to send this (output of uname -n)
756 sourceMachine = attr.ib(default=b'') # camelcase-required
755 sourceMachine = attr.ib(default=b'') # camelcase-required
757 sourcePath = attr.ib(default=b'/') # camelcase-required
756 sourcePath = attr.ib(default=b'/') # camelcase-required
758 sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required
757 sourceControlBaseRevision = attr.ib(default=b'0' * 40) # camelcase-required
759 sourceControlPath = attr.ib(default=b'/') # camelcase-required
758 sourceControlPath = attr.ib(default=b'/') # camelcase-required
760 sourceControlSystem = attr.ib(default=b'hg') # camelcase-required
759 sourceControlSystem = attr.ib(default=b'hg') # camelcase-required
761 branch = attr.ib(default=b'default')
760 branch = attr.ib(default=b'default')
762 bookmark = attr.ib(default=None)
761 bookmark = attr.ib(default=None)
763 creationMethod = attr.ib(default=b'phabsend') # camelcase-required
762 creationMethod = attr.ib(default=b'phabsend') # camelcase-required
764 lintStatus = attr.ib(default=b'none') # camelcase-required
763 lintStatus = attr.ib(default=b'none') # camelcase-required
765 unitStatus = attr.ib(default=b'none') # camelcase-required
764 unitStatus = attr.ib(default=b'none') # camelcase-required
766 changes = attr.ib(default=attr.Factory(dict))
765 changes = attr.ib(default=attr.Factory(dict))
767 repositoryPHID = attr.ib(default=None) # camelcase-required
766 repositoryPHID = attr.ib(default=None) # camelcase-required
768
767
769 def addchange(self, change):
768 def addchange(self, change):
770 if not isinstance(change, phabchange):
769 if not isinstance(change, phabchange):
771 raise error.Abort(b'phabdiff.addchange only takes phabchanges')
770 raise error.Abort(b'phabdiff.addchange only takes phabchanges')
772 self.changes[change.currentPath] = pycompat.byteskwargs(
771 self.changes[change.currentPath] = pycompat.byteskwargs(
773 attr.asdict(change)
772 attr.asdict(change)
774 )
773 )
775
774
776
775
777 def maketext(pchange, basectx, ctx, fname):
776 def maketext(pchange, basectx, ctx, fname):
778 """populate the phabchange for a text file"""
777 """populate the phabchange for a text file"""
779 repo = ctx.repo()
778 repo = ctx.repo()
780 fmatcher = match.exact([fname])
779 fmatcher = match.exact([fname])
781 diffopts = mdiff.diffopts(git=True, context=32767)
780 diffopts = mdiff.diffopts(git=True, context=32767)
782 _pfctx, _fctx, header, fhunks = next(
781 _pfctx, _fctx, header, fhunks = next(
783 patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts)
782 patch.diffhunks(repo, basectx.p1(), ctx, fmatcher, opts=diffopts)
784 )
783 )
785
784
786 for fhunk in fhunks:
785 for fhunk in fhunks:
787 (oldOffset, oldLength, newOffset, newLength), lines = fhunk
786 (oldOffset, oldLength, newOffset, newLength), lines = fhunk
788 corpus = b''.join(lines[1:])
787 corpus = b''.join(lines[1:])
789 shunk = list(header)
788 shunk = list(header)
790 shunk.extend(lines)
789 shunk.extend(lines)
791 _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
790 _mf, _mt, addLines, delLines, _hb = patch.diffstatsum(
792 patch.diffstatdata(util.iterlines(shunk))
791 patch.diffstatdata(util.iterlines(shunk))
793 )
792 )
794 pchange.addhunk(
793 pchange.addhunk(
795 phabhunk(
794 phabhunk(
796 oldOffset,
795 oldOffset,
797 oldLength,
796 oldLength,
798 newOffset,
797 newOffset,
799 newLength,
798 newLength,
800 corpus,
799 corpus,
801 addLines,
800 addLines,
802 delLines,
801 delLines,
803 )
802 )
804 )
803 )
805
804
806
805
807 def uploadchunks(fctx, fphid):
806 def uploadchunks(fctx, fphid):
808 """upload large binary files as separate chunks.
807 """upload large binary files as separate chunks.
809 Phab requests chunking over 8MiB, and splits into 4MiB chunks
808 Phab requests chunking over 8MiB, and splits into 4MiB chunks
810 """
809 """
811 ui = fctx.repo().ui
810 ui = fctx.repo().ui
812 chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
811 chunks = callconduit(ui, b'file.querychunks', {b'filePHID': fphid})
813 with ui.makeprogress(
812 with ui.makeprogress(
814 _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
813 _(b'uploading file chunks'), unit=_(b'chunks'), total=len(chunks)
815 ) as progress:
814 ) as progress:
816 for chunk in chunks:
815 for chunk in chunks:
817 progress.increment()
816 progress.increment()
818 if chunk[b'complete']:
817 if chunk[b'complete']:
819 continue
818 continue
820 bstart = int(chunk[b'byteStart'])
819 bstart = int(chunk[b'byteStart'])
821 bend = int(chunk[b'byteEnd'])
820 bend = int(chunk[b'byteEnd'])
822 callconduit(
821 callconduit(
823 ui,
822 ui,
824 b'file.uploadchunk',
823 b'file.uploadchunk',
825 {
824 {
826 b'filePHID': fphid,
825 b'filePHID': fphid,
827 b'byteStart': bstart,
826 b'byteStart': bstart,
828 b'data': base64.b64encode(fctx.data()[bstart:bend]),
827 b'data': base64.b64encode(fctx.data()[bstart:bend]),
829 b'dataEncoding': b'base64',
828 b'dataEncoding': b'base64',
830 },
829 },
831 )
830 )
832
831
833
832
834 def uploadfile(fctx):
833 def uploadfile(fctx):
835 """upload binary files to Phabricator"""
834 """upload binary files to Phabricator"""
836 repo = fctx.repo()
835 repo = fctx.repo()
837 ui = repo.ui
836 ui = repo.ui
838 fname = fctx.path()
837 fname = fctx.path()
839 size = fctx.size()
838 size = fctx.size()
840 fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
839 fhash = pycompat.bytestr(hashlib.sha256(fctx.data()).hexdigest())
841
840
842 # an allocate call is required first to see if an upload is even required
841 # an allocate call is required first to see if an upload is even required
843 # (Phab might already have it) and to determine if chunking is needed
842 # (Phab might already have it) and to determine if chunking is needed
844 allocateparams = {
843 allocateparams = {
845 b'name': fname,
844 b'name': fname,
846 b'contentLength': size,
845 b'contentLength': size,
847 b'contentHash': fhash,
846 b'contentHash': fhash,
848 }
847 }
849 filealloc = callconduit(ui, b'file.allocate', allocateparams)
848 filealloc = callconduit(ui, b'file.allocate', allocateparams)
850 fphid = filealloc[b'filePHID']
849 fphid = filealloc[b'filePHID']
851
850
852 if filealloc[b'upload']:
851 if filealloc[b'upload']:
853 ui.write(_(b'uploading %s\n') % bytes(fctx))
852 ui.write(_(b'uploading %s\n') % bytes(fctx))
854 if not fphid:
853 if not fphid:
855 uploadparams = {
854 uploadparams = {
856 b'name': fname,
855 b'name': fname,
857 b'data_base64': base64.b64encode(fctx.data()),
856 b'data_base64': base64.b64encode(fctx.data()),
858 }
857 }
859 fphid = callconduit(ui, b'file.upload', uploadparams)
858 fphid = callconduit(ui, b'file.upload', uploadparams)
860 else:
859 else:
861 uploadchunks(fctx, fphid)
860 uploadchunks(fctx, fphid)
862 else:
861 else:
863 ui.debug(b'server already has %s\n' % bytes(fctx))
862 ui.debug(b'server already has %s\n' % bytes(fctx))
864
863
865 if not fphid:
864 if not fphid:
866 raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
865 raise error.Abort(b'Upload of %s failed.' % bytes(fctx))
867
866
868 return fphid
867 return fphid
869
868
870
869
871 def addoldbinary(pchange, oldfctx, fctx):
870 def addoldbinary(pchange, oldfctx, fctx):
872 """add the metadata for the previous version of a binary file to the
871 """add the metadata for the previous version of a binary file to the
873 phabchange for the new version
872 phabchange for the new version
874
873
875 ``oldfctx`` is the previous version of the file; ``fctx`` is the new
874 ``oldfctx`` is the previous version of the file; ``fctx`` is the new
876 version of the file, or None if the file is being removed.
875 version of the file, or None if the file is being removed.
877 """
876 """
878 if not fctx or fctx.cmp(oldfctx):
877 if not fctx or fctx.cmp(oldfctx):
879 # Files differ, add the old one
878 # Files differ, add the old one
880 pchange.metadata[b'old:file:size'] = oldfctx.size()
879 pchange.metadata[b'old:file:size'] = oldfctx.size()
881 mimeguess, _enc = mimetypes.guess_type(
880 mimeguess, _enc = mimetypes.guess_type(
882 encoding.unifromlocal(oldfctx.path())
881 encoding.unifromlocal(oldfctx.path())
883 )
882 )
884 if mimeguess:
883 if mimeguess:
885 pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
884 pchange.metadata[b'old:file:mime-type'] = pycompat.bytestr(
886 mimeguess
885 mimeguess
887 )
886 )
888 fphid = uploadfile(oldfctx)
887 fphid = uploadfile(oldfctx)
889 pchange.metadata[b'old:binary-phid'] = fphid
888 pchange.metadata[b'old:binary-phid'] = fphid
890 else:
889 else:
891 # If it's left as IMAGE/BINARY web UI might try to display it
890 # If it's left as IMAGE/BINARY web UI might try to display it
892 pchange.fileType = DiffFileType.TEXT
891 pchange.fileType = DiffFileType.TEXT
893 pchange.copynewmetadatatoold()
892 pchange.copynewmetadatatoold()
894
893
895
894
896 def makebinary(pchange, fctx):
895 def makebinary(pchange, fctx):
897 """populate the phabchange for a binary file"""
896 """populate the phabchange for a binary file"""
898 pchange.fileType = DiffFileType.BINARY
897 pchange.fileType = DiffFileType.BINARY
899 fphid = uploadfile(fctx)
898 fphid = uploadfile(fctx)
900 pchange.metadata[b'new:binary-phid'] = fphid
899 pchange.metadata[b'new:binary-phid'] = fphid
901 pchange.metadata[b'new:file:size'] = fctx.size()
900 pchange.metadata[b'new:file:size'] = fctx.size()
902 mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
901 mimeguess, _enc = mimetypes.guess_type(encoding.unifromlocal(fctx.path()))
903 if mimeguess:
902 if mimeguess:
904 mimeguess = pycompat.bytestr(mimeguess)
903 mimeguess = pycompat.bytestr(mimeguess)
905 pchange.metadata[b'new:file:mime-type'] = mimeguess
904 pchange.metadata[b'new:file:mime-type'] = mimeguess
906 if mimeguess.startswith(b'image/'):
905 if mimeguess.startswith(b'image/'):
907 pchange.fileType = DiffFileType.IMAGE
906 pchange.fileType = DiffFileType.IMAGE
908
907
909
908
910 # Copied from mercurial/patch.py
909 # Copied from mercurial/patch.py
911 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
910 gitmode = {b'l': b'120000', b'x': b'100755', b'': b'100644'}
912
911
913
912
914 def notutf8(fctx):
913 def notutf8(fctx):
915 """detect non-UTF-8 text files since Phabricator requires them to be marked
914 """detect non-UTF-8 text files since Phabricator requires them to be marked
916 as binary
915 as binary
917 """
916 """
918 try:
917 try:
919 fctx.data().decode('utf-8')
918 fctx.data().decode('utf-8')
920 return False
919 return False
921 except UnicodeDecodeError:
920 except UnicodeDecodeError:
922 fctx.repo().ui.write(
921 fctx.repo().ui.write(
923 _(b'file %s detected as non-UTF-8, marked as binary\n')
922 _(b'file %s detected as non-UTF-8, marked as binary\n')
924 % fctx.path()
923 % fctx.path()
925 )
924 )
926 return True
925 return True
927
926
928
927
929 def addremoved(pdiff, basectx, ctx, removed):
928 def addremoved(pdiff, basectx, ctx, removed):
930 """add removed files to the phabdiff. Shouldn't include moves"""
929 """add removed files to the phabdiff. Shouldn't include moves"""
931 for fname in removed:
930 for fname in removed:
932 pchange = phabchange(
931 pchange = phabchange(
933 currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
932 currentPath=fname, oldPath=fname, type=DiffChangeType.DELETE
934 )
933 )
935 oldfctx = basectx.p1()[fname]
934 oldfctx = basectx.p1()[fname]
936 pchange.addoldmode(gitmode[oldfctx.flags()])
935 pchange.addoldmode(gitmode[oldfctx.flags()])
937 if not (oldfctx.isbinary() or notutf8(oldfctx)):
936 if not (oldfctx.isbinary() or notutf8(oldfctx)):
938 maketext(pchange, basectx, ctx, fname)
937 maketext(pchange, basectx, ctx, fname)
939
938
940 pdiff.addchange(pchange)
939 pdiff.addchange(pchange)
941
940
942
941
943 def addmodified(pdiff, basectx, ctx, modified):
942 def addmodified(pdiff, basectx, ctx, modified):
944 """add modified files to the phabdiff"""
943 """add modified files to the phabdiff"""
945 for fname in modified:
944 for fname in modified:
946 fctx = ctx[fname]
945 fctx = ctx[fname]
947 oldfctx = basectx.p1()[fname]
946 oldfctx = basectx.p1()[fname]
948 pchange = phabchange(currentPath=fname, oldPath=fname)
947 pchange = phabchange(currentPath=fname, oldPath=fname)
949 filemode = gitmode[fctx.flags()]
948 filemode = gitmode[fctx.flags()]
950 originalmode = gitmode[oldfctx.flags()]
949 originalmode = gitmode[oldfctx.flags()]
951 if filemode != originalmode:
950 if filemode != originalmode:
952 pchange.addoldmode(originalmode)
951 pchange.addoldmode(originalmode)
953 pchange.addnewmode(filemode)
952 pchange.addnewmode(filemode)
954
953
955 if (
954 if (
956 fctx.isbinary()
955 fctx.isbinary()
957 or notutf8(fctx)
956 or notutf8(fctx)
958 or oldfctx.isbinary()
957 or oldfctx.isbinary()
959 or notutf8(oldfctx)
958 or notutf8(oldfctx)
960 ):
959 ):
961 makebinary(pchange, fctx)
960 makebinary(pchange, fctx)
962 addoldbinary(pchange, oldfctx, fctx)
961 addoldbinary(pchange, oldfctx, fctx)
963 else:
962 else:
964 maketext(pchange, basectx, ctx, fname)
963 maketext(pchange, basectx, ctx, fname)
965
964
966 pdiff.addchange(pchange)
965 pdiff.addchange(pchange)
967
966
968
967
969 def addadded(pdiff, basectx, ctx, added, removed):
968 def addadded(pdiff, basectx, ctx, added, removed):
970 """add file adds to the phabdiff, both new files and copies/moves"""
969 """add file adds to the phabdiff, both new files and copies/moves"""
971 # Keep track of files that've been recorded as moved/copied, so if there are
970 # Keep track of files that've been recorded as moved/copied, so if there are
972 # additional copies we can mark them (moves get removed from removed)
971 # additional copies we can mark them (moves get removed from removed)
973 copiedchanges = {}
972 copiedchanges = {}
974 movedchanges = {}
973 movedchanges = {}
975
974
976 copy = {}
975 copy = {}
977 if basectx != ctx:
976 if basectx != ctx:
978 copy = copies.pathcopies(basectx.p1(), ctx)
977 copy = copies.pathcopies(basectx.p1(), ctx)
979
978
980 for fname in added:
979 for fname in added:
981 fctx = ctx[fname]
980 fctx = ctx[fname]
982 oldfctx = None
981 oldfctx = None
983 pchange = phabchange(currentPath=fname)
982 pchange = phabchange(currentPath=fname)
984
983
985 filemode = gitmode[fctx.flags()]
984 filemode = gitmode[fctx.flags()]
986
985
987 if copy:
986 if copy:
988 originalfname = copy.get(fname, fname)
987 originalfname = copy.get(fname, fname)
989 else:
988 else:
990 originalfname = fname
989 originalfname = fname
991 if fctx.renamed():
990 if fctx.renamed():
992 originalfname = fctx.renamed()[0]
991 originalfname = fctx.renamed()[0]
993
992
994 renamed = fname != originalfname
993 renamed = fname != originalfname
995
994
996 if renamed:
995 if renamed:
997 oldfctx = basectx.p1()[originalfname]
996 oldfctx = basectx.p1()[originalfname]
998 originalmode = gitmode[oldfctx.flags()]
997 originalmode = gitmode[oldfctx.flags()]
999 pchange.oldPath = originalfname
998 pchange.oldPath = originalfname
1000
999
1001 if originalfname in removed:
1000 if originalfname in removed:
1002 origpchange = phabchange(
1001 origpchange = phabchange(
1003 currentPath=originalfname,
1002 currentPath=originalfname,
1004 oldPath=originalfname,
1003 oldPath=originalfname,
1005 type=DiffChangeType.MOVE_AWAY,
1004 type=DiffChangeType.MOVE_AWAY,
1006 awayPaths=[fname],
1005 awayPaths=[fname],
1007 )
1006 )
1008 movedchanges[originalfname] = origpchange
1007 movedchanges[originalfname] = origpchange
1009 removed.remove(originalfname)
1008 removed.remove(originalfname)
1010 pchange.type = DiffChangeType.MOVE_HERE
1009 pchange.type = DiffChangeType.MOVE_HERE
1011 elif originalfname in movedchanges:
1010 elif originalfname in movedchanges:
1012 movedchanges[originalfname].type = DiffChangeType.MULTICOPY
1011 movedchanges[originalfname].type = DiffChangeType.MULTICOPY
1013 movedchanges[originalfname].awayPaths.append(fname)
1012 movedchanges[originalfname].awayPaths.append(fname)
1014 pchange.type = DiffChangeType.COPY_HERE
1013 pchange.type = DiffChangeType.COPY_HERE
1015 else: # pure copy
1014 else: # pure copy
1016 if originalfname not in copiedchanges:
1015 if originalfname not in copiedchanges:
1017 origpchange = phabchange(
1016 origpchange = phabchange(
1018 currentPath=originalfname, type=DiffChangeType.COPY_AWAY
1017 currentPath=originalfname, type=DiffChangeType.COPY_AWAY
1019 )
1018 )
1020 copiedchanges[originalfname] = origpchange
1019 copiedchanges[originalfname] = origpchange
1021 else:
1020 else:
1022 origpchange = copiedchanges[originalfname]
1021 origpchange = copiedchanges[originalfname]
1023 origpchange.awayPaths.append(fname)
1022 origpchange.awayPaths.append(fname)
1024 pchange.type = DiffChangeType.COPY_HERE
1023 pchange.type = DiffChangeType.COPY_HERE
1025
1024
1026 if filemode != originalmode:
1025 if filemode != originalmode:
1027 pchange.addoldmode(originalmode)
1026 pchange.addoldmode(originalmode)
1028 pchange.addnewmode(filemode)
1027 pchange.addnewmode(filemode)
1029 else: # Brand-new file
1028 else: # Brand-new file
1030 pchange.addnewmode(gitmode[fctx.flags()])
1029 pchange.addnewmode(gitmode[fctx.flags()])
1031 pchange.type = DiffChangeType.ADD
1030 pchange.type = DiffChangeType.ADD
1032
1031
1033 if (
1032 if (
1034 fctx.isbinary()
1033 fctx.isbinary()
1035 or notutf8(fctx)
1034 or notutf8(fctx)
1036 or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
1035 or (oldfctx and (oldfctx.isbinary() or notutf8(oldfctx)))
1037 ):
1036 ):
1038 makebinary(pchange, fctx)
1037 makebinary(pchange, fctx)
1039 if renamed:
1038 if renamed:
1040 addoldbinary(pchange, oldfctx, fctx)
1039 addoldbinary(pchange, oldfctx, fctx)
1041 else:
1040 else:
1042 maketext(pchange, basectx, ctx, fname)
1041 maketext(pchange, basectx, ctx, fname)
1043
1042
1044 pdiff.addchange(pchange)
1043 pdiff.addchange(pchange)
1045
1044
1046 for _path, copiedchange in copiedchanges.items():
1045 for _path, copiedchange in copiedchanges.items():
1047 pdiff.addchange(copiedchange)
1046 pdiff.addchange(copiedchange)
1048 for _path, movedchange in movedchanges.items():
1047 for _path, movedchange in movedchanges.items():
1049 pdiff.addchange(movedchange)
1048 pdiff.addchange(movedchange)
1050
1049
1051
1050
1052 def creatediff(basectx, ctx):
1051 def creatediff(basectx, ctx):
1053 """create a Differential Diff"""
1052 """create a Differential Diff"""
1054 repo = ctx.repo()
1053 repo = ctx.repo()
1055 repophid = getrepophid(repo)
1054 repophid = getrepophid(repo)
1056 # Create a "Differential Diff" via "differential.creatediff" API
1055 # Create a "Differential Diff" via "differential.creatediff" API
1057 pdiff = phabdiff(
1056 pdiff = phabdiff(
1058 sourceControlBaseRevision=b'%s' % basectx.p1().hex(),
1057 sourceControlBaseRevision=b'%s' % basectx.p1().hex(),
1059 branch=b'%s' % ctx.branch(),
1058 branch=b'%s' % ctx.branch(),
1060 )
1059 )
1061 modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx)
1060 modified, added, removed, _d, _u, _i, _c = basectx.p1().status(ctx)
1062 # addadded will remove moved files from removed, so addremoved won't get
1061 # addadded will remove moved files from removed, so addremoved won't get
1063 # them
1062 # them
1064 addadded(pdiff, basectx, ctx, added, removed)
1063 addadded(pdiff, basectx, ctx, added, removed)
1065 addmodified(pdiff, basectx, ctx, modified)
1064 addmodified(pdiff, basectx, ctx, modified)
1066 addremoved(pdiff, basectx, ctx, removed)
1065 addremoved(pdiff, basectx, ctx, removed)
1067 if repophid:
1066 if repophid:
1068 pdiff.repositoryPHID = repophid
1067 pdiff.repositoryPHID = repophid
1069 diff = callconduit(
1068 diff = callconduit(
1070 repo.ui,
1069 repo.ui,
1071 b'differential.creatediff',
1070 b'differential.creatediff',
1072 pycompat.byteskwargs(attr.asdict(pdiff)),
1071 pycompat.byteskwargs(attr.asdict(pdiff)),
1073 )
1072 )
1074 if not diff:
1073 if not diff:
1075 if basectx != ctx:
1074 if basectx != ctx:
1076 msg = _(b'cannot create diff for %s::%s') % (basectx, ctx)
1075 msg = _(b'cannot create diff for %s::%s') % (basectx, ctx)
1077 else:
1076 else:
1078 msg = _(b'cannot create diff for %s') % ctx
1077 msg = _(b'cannot create diff for %s') % ctx
1079 raise error.Abort(msg)
1078 raise error.Abort(msg)
1080 return diff
1079 return diff
1081
1080
1082
1081
1083 def writediffproperties(ctxs, diff):
1082 def writediffproperties(ctxs, diff):
1084 """write metadata to diff so patches could be applied losslessly
1083 """write metadata to diff so patches could be applied losslessly
1085
1084
1086 ``ctxs`` is the list of commits that created the diff, in ascending order.
1085 ``ctxs`` is the list of commits that created the diff, in ascending order.
1087 The list is generally a single commit, but may be several when using
1086 The list is generally a single commit, but may be several when using
1088 ``phabsend --fold``.
1087 ``phabsend --fold``.
1089 """
1088 """
1090 # creatediff returns with a diffid but query returns with an id
1089 # creatediff returns with a diffid but query returns with an id
1091 diffid = diff.get(b'diffid', diff.get(b'id'))
1090 diffid = diff.get(b'diffid', diff.get(b'id'))
1092 basectx = ctxs[0]
1091 basectx = ctxs[0]
1093 tipctx = ctxs[-1]
1092 tipctx = ctxs[-1]
1094
1093
1095 params = {
1094 params = {
1096 b'diff_id': diffid,
1095 b'diff_id': diffid,
1097 b'name': b'hg:meta',
1096 b'name': b'hg:meta',
1098 b'data': templatefilters.json(
1097 b'data': templatefilters.json(
1099 {
1098 {
1100 b'user': tipctx.user(),
1099 b'user': tipctx.user(),
1101 b'date': b'%d %d' % tipctx.date(),
1100 b'date': b'%d %d' % tipctx.date(),
1102 b'branch': tipctx.branch(),
1101 b'branch': tipctx.branch(),
1103 b'node': tipctx.hex(),
1102 b'node': tipctx.hex(),
1104 b'parent': basectx.p1().hex(),
1103 b'parent': basectx.p1().hex(),
1105 }
1104 }
1106 ),
1105 ),
1107 }
1106 }
1108 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1107 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1109
1108
1110 commits = {}
1109 commits = {}
1111 for ctx in ctxs:
1110 for ctx in ctxs:
1112 commits[ctx.hex()] = {
1111 commits[ctx.hex()] = {
1113 b'author': stringutil.person(ctx.user()),
1112 b'author': stringutil.person(ctx.user()),
1114 b'authorEmail': stringutil.email(ctx.user()),
1113 b'authorEmail': stringutil.email(ctx.user()),
1115 b'time': int(ctx.date()[0]),
1114 b'time': int(ctx.date()[0]),
1116 b'commit': ctx.hex(),
1115 b'commit': ctx.hex(),
1117 b'parents': [ctx.p1().hex()],
1116 b'parents': [ctx.p1().hex()],
1118 b'branch': ctx.branch(),
1117 b'branch': ctx.branch(),
1119 }
1118 }
1120 params = {
1119 params = {
1121 b'diff_id': diffid,
1120 b'diff_id': diffid,
1122 b'name': b'local:commits',
1121 b'name': b'local:commits',
1123 b'data': templatefilters.json(commits),
1122 b'data': templatefilters.json(commits),
1124 }
1123 }
1125 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1124 callconduit(basectx.repo().ui, b'differential.setdiffproperty', params)
1126
1125
1127
1126
1128 def createdifferentialrevision(
1127 def createdifferentialrevision(
1129 ctxs,
1128 ctxs,
1130 revid=None,
1129 revid=None,
1131 parentrevphid=None,
1130 parentrevphid=None,
1132 oldbasenode=None,
1131 oldbasenode=None,
1133 oldnode=None,
1132 oldnode=None,
1134 olddiff=None,
1133 olddiff=None,
1135 actions=None,
1134 actions=None,
1136 comment=None,
1135 comment=None,
1137 ):
1136 ):
1138 """create or update a Differential Revision
1137 """create or update a Differential Revision
1139
1138
1140 If revid is None, create a new Differential Revision, otherwise update
1139 If revid is None, create a new Differential Revision, otherwise update
1141 revid. If parentrevphid is not None, set it as a dependency.
1140 revid. If parentrevphid is not None, set it as a dependency.
1142
1141
1143 If there is a single commit for the new Differential Revision, ``ctxs`` will
1142 If there is a single commit for the new Differential Revision, ``ctxs`` will
1144 be a list of that single context. Otherwise, it is a list that covers the
1143 be a list of that single context. Otherwise, it is a list that covers the
1145 range of changes for the differential, where ``ctxs[0]`` is the first change
1144 range of changes for the differential, where ``ctxs[0]`` is the first change
1146 to include and ``ctxs[-1]`` is the last.
1145 to include and ``ctxs[-1]`` is the last.
1147
1146
1148 If oldnode is not None, check if the patch content (without commit message
1147 If oldnode is not None, check if the patch content (without commit message
1149 and metadata) has changed before creating another diff. For a Revision with
1148 and metadata) has changed before creating another diff. For a Revision with
1150 a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a
1149 a single commit, ``oldbasenode`` and ``oldnode`` have the same value. For a
1151 Revision covering multiple commits, ``oldbasenode`` corresponds to
1150 Revision covering multiple commits, ``oldbasenode`` corresponds to
1152 ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode``
1151 ``ctxs[0]`` the previous time this Revision was posted, and ``oldnode``
1153 corresponds to ``ctxs[-1]``.
1152 corresponds to ``ctxs[-1]``.
1154
1153
1155 If actions is not None, they will be appended to the transaction.
1154 If actions is not None, they will be appended to the transaction.
1156 """
1155 """
1157 ctx = ctxs[-1]
1156 ctx = ctxs[-1]
1158 basectx = ctxs[0]
1157 basectx = ctxs[0]
1159
1158
1160 repo = ctx.repo()
1159 repo = ctx.repo()
1161 if oldnode:
1160 if oldnode:
1162 diffopts = mdiff.diffopts(git=True, context=32767)
1161 diffopts = mdiff.diffopts(git=True, context=32767)
1163 unfi = repo.unfiltered()
1162 unfi = repo.unfiltered()
1164 oldctx = unfi[oldnode]
1163 oldctx = unfi[oldnode]
1165 oldbasectx = unfi[oldbasenode]
1164 oldbasectx = unfi[oldbasenode]
1166 neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff(
1165 neednewdiff = getdiff(basectx, ctx, diffopts) != getdiff(
1167 oldbasectx, oldctx, diffopts
1166 oldbasectx, oldctx, diffopts
1168 )
1167 )
1169 else:
1168 else:
1170 neednewdiff = True
1169 neednewdiff = True
1171
1170
1172 transactions = []
1171 transactions = []
1173 if neednewdiff:
1172 if neednewdiff:
1174 diff = creatediff(basectx, ctx)
1173 diff = creatediff(basectx, ctx)
1175 transactions.append({b'type': b'update', b'value': diff[b'phid']})
1174 transactions.append({b'type': b'update', b'value': diff[b'phid']})
1176 if comment:
1175 if comment:
1177 transactions.append({b'type': b'comment', b'value': comment})
1176 transactions.append({b'type': b'comment', b'value': comment})
1178 else:
1177 else:
1179 # Even if we don't need to upload a new diff because the patch content
1178 # Even if we don't need to upload a new diff because the patch content
1180 # does not change. We might still need to update its metadata so
1179 # does not change. We might still need to update its metadata so
1181 # pushers could know the correct node metadata.
1180 # pushers could know the correct node metadata.
1182 assert olddiff
1181 assert olddiff
1183 diff = olddiff
1182 diff = olddiff
1184 writediffproperties(ctxs, diff)
1183 writediffproperties(ctxs, diff)
1185
1184
1186 # Set the parent Revision every time, so commit re-ordering is picked-up
1185 # Set the parent Revision every time, so commit re-ordering is picked-up
1187 if parentrevphid:
1186 if parentrevphid:
1188 transactions.append(
1187 transactions.append(
1189 {b'type': b'parents.set', b'value': [parentrevphid]}
1188 {b'type': b'parents.set', b'value': [parentrevphid]}
1190 )
1189 )
1191
1190
1192 if actions:
1191 if actions:
1193 transactions += actions
1192 transactions += actions
1194
1193
1195 # When folding multiple local commits into a single review, arcanist will
1194 # When folding multiple local commits into a single review, arcanist will
1196 # take the summary line of the first commit as the title, and then
1195 # take the summary line of the first commit as the title, and then
1197 # concatenate the rest of the remaining messages (including each of their
1196 # concatenate the rest of the remaining messages (including each of their
1198 # first lines) to the rest of the first commit message (each separated by
1197 # first lines) to the rest of the first commit message (each separated by
1199 # an empty line), and use that as the summary field. Do the same here.
1198 # an empty line), and use that as the summary field. Do the same here.
1200 # For commits with only a one line message, there is no summary field, as
1199 # For commits with only a one line message, there is no summary field, as
1201 # this gets assigned to the title.
1200 # this gets assigned to the title.
1202 fields = util.sortdict() # sorted for stable wire protocol in tests
1201 fields = util.sortdict() # sorted for stable wire protocol in tests
1203
1202
1204 for i, _ctx in enumerate(ctxs):
1203 for i, _ctx in enumerate(ctxs):
1205 # Parse commit message and update related fields.
1204 # Parse commit message and update related fields.
1206 desc = _ctx.description()
1205 desc = _ctx.description()
1207 info = callconduit(
1206 info = callconduit(
1208 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
1207 repo.ui, b'differential.parsecommitmessage', {b'corpus': desc}
1209 )
1208 )
1210
1209
1211 for k in [b'title', b'summary', b'testPlan']:
1210 for k in [b'title', b'summary', b'testPlan']:
1212 v = info[b'fields'].get(k)
1211 v = info[b'fields'].get(k)
1213 if not v:
1212 if not v:
1214 continue
1213 continue
1215
1214
1216 if i == 0:
1215 if i == 0:
1217 # Title, summary and test plan (if present) are taken verbatim
1216 # Title, summary and test plan (if present) are taken verbatim
1218 # for the first commit.
1217 # for the first commit.
1219 fields[k] = v.rstrip()
1218 fields[k] = v.rstrip()
1220 continue
1219 continue
1221 elif k == b'title':
1220 elif k == b'title':
1222 # Add subsequent titles (i.e. the first line of the commit
1221 # Add subsequent titles (i.e. the first line of the commit
1223 # message) back to the summary.
1222 # message) back to the summary.
1224 k = b'summary'
1223 k = b'summary'
1225
1224
1226 # Append any current field to the existing composite field
1225 # Append any current field to the existing composite field
1227 fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()]))
1226 fields[k] = b'\n\n'.join(filter(None, [fields.get(k), v.rstrip()]))
1228
1227
1229 for k, v in fields.items():
1228 for k, v in fields.items():
1230 transactions.append({b'type': k, b'value': v})
1229 transactions.append({b'type': k, b'value': v})
1231
1230
1232 params = {b'transactions': transactions}
1231 params = {b'transactions': transactions}
1233 if revid is not None:
1232 if revid is not None:
1234 # Update an existing Differential Revision
1233 # Update an existing Differential Revision
1235 params[b'objectIdentifier'] = revid
1234 params[b'objectIdentifier'] = revid
1236
1235
1237 revision = callconduit(repo.ui, b'differential.revision.edit', params)
1236 revision = callconduit(repo.ui, b'differential.revision.edit', params)
1238 if not revision:
1237 if not revision:
1239 if len(ctxs) == 1:
1238 if len(ctxs) == 1:
1240 msg = _(b'cannot create revision for %s') % ctx
1239 msg = _(b'cannot create revision for %s') % ctx
1241 else:
1240 else:
1242 msg = _(b'cannot create revision for %s::%s') % (basectx, ctx)
1241 msg = _(b'cannot create revision for %s::%s') % (basectx, ctx)
1243 raise error.Abort(msg)
1242 raise error.Abort(msg)
1244
1243
1245 return revision, diff
1244 return revision, diff
1246
1245
1247
1246
1248 def userphids(ui, names):
1247 def userphids(ui, names):
1249 """convert user names to PHIDs"""
1248 """convert user names to PHIDs"""
1250 names = [name.lower() for name in names]
1249 names = [name.lower() for name in names]
1251 query = {b'constraints': {b'usernames': names}}
1250 query = {b'constraints': {b'usernames': names}}
1252 result = callconduit(ui, b'user.search', query)
1251 result = callconduit(ui, b'user.search', query)
1253 # username not found is not an error of the API. So check if we have missed
1252 # username not found is not an error of the API. So check if we have missed
1254 # some names here.
1253 # some names here.
1255 data = result[b'data']
1254 data = result[b'data']
1256 resolved = {entry[b'fields'][b'username'].lower() for entry in data}
1255 resolved = {entry[b'fields'][b'username'].lower() for entry in data}
1257 unresolved = set(names) - resolved
1256 unresolved = set(names) - resolved
1258 if unresolved:
1257 if unresolved:
1259 raise error.Abort(
1258 raise error.Abort(
1260 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
1259 _(b'unknown username: %s') % b' '.join(sorted(unresolved))
1261 )
1260 )
1262 return [entry[b'phid'] for entry in data]
1261 return [entry[b'phid'] for entry in data]
1263
1262
1264
1263
1265 def _print_phabsend_action(ui, ctx, newrevid, action):
1264 def _print_phabsend_action(ui, ctx, newrevid, action):
1266 """print the ``action`` that occurred when posting ``ctx`` for review
1265 """print the ``action`` that occurred when posting ``ctx`` for review
1267
1266
1268 This is a utility function for the sending phase of ``phabsend``, which
1267 This is a utility function for the sending phase of ``phabsend``, which
1269 makes it easier to show a status for all local commits with `--fold``.
1268 makes it easier to show a status for all local commits with `--fold``.
1270 """
1269 """
1271 actiondesc = ui.label(
1270 actiondesc = ui.label(
1272 {
1271 {
1273 b'created': _(b'created'),
1272 b'created': _(b'created'),
1274 b'skipped': _(b'skipped'),
1273 b'skipped': _(b'skipped'),
1275 b'updated': _(b'updated'),
1274 b'updated': _(b'updated'),
1276 }[action],
1275 }[action],
1277 b'phabricator.action.%s' % action,
1276 b'phabricator.action.%s' % action,
1278 )
1277 )
1279 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
1278 drevdesc = ui.label(b'D%d' % newrevid, b'phabricator.drev')
1280 summary = cmdutil.format_changeset_summary(ui, ctx, b'phabsend')
1279 summary = cmdutil.format_changeset_summary(ui, ctx, b'phabsend')
1281 ui.write(_(b'%s - %s - %s\n') % (drevdesc, actiondesc, summary))
1280 ui.write(_(b'%s - %s - %s\n') % (drevdesc, actiondesc, summary))
1282
1281
1283
1282
1284 def _amend_diff_properties(unfi, drevid, newnodes, diff):
1283 def _amend_diff_properties(unfi, drevid, newnodes, diff):
1285 """update the local commit list for the ``diff`` associated with ``drevid``
1284 """update the local commit list for the ``diff`` associated with ``drevid``
1286
1285
1287 This is a utility function for the amend phase of ``phabsend``, which
1286 This is a utility function for the amend phase of ``phabsend``, which
1288 converts failures to warning messages.
1287 converts failures to warning messages.
1289 """
1288 """
1290 _debug(
1289 _debug(
1291 unfi.ui,
1290 unfi.ui,
1292 b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]),
1291 b"new commits: %s\n" % stringutil.pprint([short(n) for n in newnodes]),
1293 )
1292 )
1294
1293
1295 try:
1294 try:
1296 writediffproperties([unfi[newnode] for newnode in newnodes], diff)
1295 writediffproperties([unfi[newnode] for newnode in newnodes], diff)
1297 except util.urlerr.urlerror:
1296 except util.urlerr.urlerror:
1298 # If it fails just warn and keep going, otherwise the DREV
1297 # If it fails just warn and keep going, otherwise the DREV
1299 # associations will be lost
1298 # associations will be lost
1300 unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid)
1299 unfi.ui.warnnoi18n(b'Failed to update metadata for D%d\n' % drevid)
1301
1300
1302
1301
1303 @vcrcommand(
1302 @vcrcommand(
1304 b'phabsend',
1303 b'phabsend',
1305 [
1304 [
1306 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
1305 (b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
1307 (b'', b'amend', True, _(b'update commit messages')),
1306 (b'', b'amend', True, _(b'update commit messages')),
1308 (b'', b'reviewer', [], _(b'specify reviewers')),
1307 (b'', b'reviewer', [], _(b'specify reviewers')),
1309 (b'', b'blocker', [], _(b'specify blocking reviewers')),
1308 (b'', b'blocker', [], _(b'specify blocking reviewers')),
1310 (
1309 (
1311 b'm',
1310 b'm',
1312 b'comment',
1311 b'comment',
1313 b'',
1312 b'',
1314 _(b'add a comment to Revisions with new/updated Diffs'),
1313 _(b'add a comment to Revisions with new/updated Diffs'),
1315 ),
1314 ),
1316 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
1315 (b'', b'confirm', None, _(b'ask for confirmation before sending')),
1317 (b'', b'fold', False, _(b'combine the revisions into one review')),
1316 (b'', b'fold', False, _(b'combine the revisions into one review')),
1318 ],
1317 ],
1319 _(b'REV [OPTIONS]'),
1318 _(b'REV [OPTIONS]'),
1320 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1319 helpcategory=command.CATEGORY_IMPORT_EXPORT,
1321 )
1320 )
1322 def phabsend(ui, repo, *revs, **opts):
1321 def phabsend(ui, repo, *revs, **opts):
1323 """upload changesets to Phabricator
1322 """upload changesets to Phabricator
1324
1323
1325 If there are multiple revisions specified, they will be send as a stack
1324 If there are multiple revisions specified, they will be send as a stack
1326 with a linear dependencies relationship using the order specified by the
1325 with a linear dependencies relationship using the order specified by the
1327 revset.
1326 revset.
1328
1327
1329 For the first time uploading changesets, local tags will be created to
1328 For the first time uploading changesets, local tags will be created to
1330 maintain the association. After the first time, phabsend will check
1329 maintain the association. After the first time, phabsend will check
1331 obsstore and tags information so it can figure out whether to update an
1330 obsstore and tags information so it can figure out whether to update an
1332 existing Differential Revision, or create a new one.
1331 existing Differential Revision, or create a new one.
1333
1332
1334 If --amend is set, update commit messages so they have the
1333 If --amend is set, update commit messages so they have the
1335 ``Differential Revision`` URL, remove related tags. This is similar to what
1334 ``Differential Revision`` URL, remove related tags. This is similar to what
1336 arcanist will do, and is more desired in author-push workflows. Otherwise,
1335 arcanist will do, and is more desired in author-push workflows. Otherwise,
1337 use local tags to record the ``Differential Revision`` association.
1336 use local tags to record the ``Differential Revision`` association.
1338
1337
1339 The --confirm option lets you confirm changesets before sending them. You
1338 The --confirm option lets you confirm changesets before sending them. You
1340 can also add following to your configuration file to make it default
1339 can also add following to your configuration file to make it default
1341 behaviour::
1340 behaviour::
1342
1341
1343 [phabsend]
1342 [phabsend]
1344 confirm = true
1343 confirm = true
1345
1344
1346 By default, a separate review will be created for each commit that is
1345 By default, a separate review will be created for each commit that is
1347 selected, and will have the same parent/child relationship in Phabricator.
1346 selected, and will have the same parent/child relationship in Phabricator.
1348 If ``--fold`` is set, multiple commits are rolled up into a single review
1347 If ``--fold`` is set, multiple commits are rolled up into a single review
1349 as if diffed from the parent of the first revision to the last. The commit
1348 as if diffed from the parent of the first revision to the last. The commit
1350 messages are concatenated in the summary field on Phabricator.
1349 messages are concatenated in the summary field on Phabricator.
1351
1350
1352 phabsend will check obsstore and the above association to decide whether to
1351 phabsend will check obsstore and the above association to decide whether to
1353 update an existing Differential Revision, or create a new one.
1352 update an existing Differential Revision, or create a new one.
1354 """
1353 """
1355 opts = pycompat.byteskwargs(opts)
1354 opts = pycompat.byteskwargs(opts)
1356 revs = list(revs) + opts.get(b'rev', [])
1355 revs = list(revs) + opts.get(b'rev', [])
1357 revs = logcmdutil.revrange(repo, revs)
1356 revs = logcmdutil.revrange(repo, revs)
1358 revs.sort() # ascending order to preserve topological parent/child in phab
1357 revs.sort() # ascending order to preserve topological parent/child in phab
1359
1358
1360 if not revs:
1359 if not revs:
1361 raise error.Abort(_(b'phabsend requires at least one changeset'))
1360 raise error.Abort(_(b'phabsend requires at least one changeset'))
1362 if opts.get(b'amend'):
1361 if opts.get(b'amend'):
1363 cmdutil.checkunfinished(repo)
1362 cmdutil.checkunfinished(repo)
1364
1363
1365 ctxs = [repo[rev] for rev in revs]
1364 ctxs = [repo[rev] for rev in revs]
1366
1365
1367 if any(c for c in ctxs if c.obsolete()):
1366 if any(c for c in ctxs if c.obsolete()):
1368 raise error.Abort(_(b"obsolete commits cannot be posted for review"))
1367 raise error.Abort(_(b"obsolete commits cannot be posted for review"))
1369
1368
1370 # Ensure the local commits are an unbroken range. The semantics of the
1369 # Ensure the local commits are an unbroken range. The semantics of the
1371 # --fold option implies this, and the auto restacking of orphans requires
1370 # --fold option implies this, and the auto restacking of orphans requires
1372 # it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
1371 # it. Otherwise A+C in A->B->C will cause B to be orphaned, and C' to
1373 # get A' as a parent.
1372 # get A' as a parent.
1374 def _fail_nonlinear_revs(revs, revtype):
1373 def _fail_nonlinear_revs(revs, revtype):
1375 badnodes = [repo[r].node() for r in revs]
1374 badnodes = [repo[r].node() for r in revs]
1376 raise error.Abort(
1375 raise error.Abort(
1377 _(b"cannot phabsend multiple %s revisions: %s")
1376 _(b"cannot phabsend multiple %s revisions: %s")
1378 % (revtype, scmutil.nodesummaries(repo, badnodes)),
1377 % (revtype, scmutil.nodesummaries(repo, badnodes)),
1379 hint=_(b"the revisions must form a linear chain"),
1378 hint=_(b"the revisions must form a linear chain"),
1380 )
1379 )
1381
1380
1382 heads = repo.revs(b'heads(%ld)', revs)
1381 heads = repo.revs(b'heads(%ld)', revs)
1383 if len(heads) > 1:
1382 if len(heads) > 1:
1384 _fail_nonlinear_revs(heads, b"head")
1383 _fail_nonlinear_revs(heads, b"head")
1385
1384
1386 roots = repo.revs(b'roots(%ld)', revs)
1385 roots = repo.revs(b'roots(%ld)', revs)
1387 if len(roots) > 1:
1386 if len(roots) > 1:
1388 _fail_nonlinear_revs(roots, b"root")
1387 _fail_nonlinear_revs(roots, b"root")
1389
1388
1390 fold = opts.get(b'fold')
1389 fold = opts.get(b'fold')
1391 if fold:
1390 if fold:
1392 if len(revs) == 1:
1391 if len(revs) == 1:
1393 # TODO: just switch to --no-fold instead?
1392 # TODO: just switch to --no-fold instead?
1394 raise error.Abort(_(b"cannot fold a single revision"))
1393 raise error.Abort(_(b"cannot fold a single revision"))
1395
1394
1396 # There's no clear way to manage multiple commits with a Dxxx tag, so
1395 # There's no clear way to manage multiple commits with a Dxxx tag, so
1397 # require the amend option. (We could append "_nnn", but then it
1396 # require the amend option. (We could append "_nnn", but then it
1398 # becomes jumbled if earlier commits are added to an update.) It should
1397 # becomes jumbled if earlier commits are added to an update.) It should
1399 # lock the repo and ensure that the range is editable, but that would
1398 # lock the repo and ensure that the range is editable, but that would
1400 # make the code pretty convoluted. The default behavior of `arc` is to
1399 # make the code pretty convoluted. The default behavior of `arc` is to
1401 # create a new review anyway.
1400 # create a new review anyway.
1402 if not opts.get(b"amend"):
1401 if not opts.get(b"amend"):
1403 raise error.Abort(_(b"cannot fold with --no-amend"))
1402 raise error.Abort(_(b"cannot fold with --no-amend"))
1404
1403
1405 # It might be possible to bucketize the revisions by the DREV value, and
1404 # It might be possible to bucketize the revisions by the DREV value, and
1406 # iterate over those groups when posting, and then again when amending.
1405 # iterate over those groups when posting, and then again when amending.
1407 # But for simplicity, require all selected revisions to be for the same
1406 # But for simplicity, require all selected revisions to be for the same
1408 # DREV (if present). Adding local revisions to an existing DREV is
1407 # DREV (if present). Adding local revisions to an existing DREV is
1409 # acceptable.
1408 # acceptable.
1410 drevmatchers = [
1409 drevmatchers = [
1411 _differentialrevisiondescre.search(ctx.description())
1410 _differentialrevisiondescre.search(ctx.description())
1412 for ctx in ctxs
1411 for ctx in ctxs
1413 ]
1412 ]
1414 if len({m.group('url') for m in drevmatchers if m}) > 1:
1413 if len({m.group('url') for m in drevmatchers if m}) > 1:
1415 raise error.Abort(
1414 raise error.Abort(
1416 _(b"cannot fold revisions with different DREV values")
1415 _(b"cannot fold revisions with different DREV values")
1417 )
1416 )
1418
1417
1419 # {newnode: (oldnode, olddiff, olddrev}
1418 # {newnode: (oldnode, olddiff, olddrev}
1420 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
1419 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
1421
1420
1422 confirm = ui.configbool(b'phabsend', b'confirm')
1421 confirm = ui.configbool(b'phabsend', b'confirm')
1423 confirm |= bool(opts.get(b'confirm'))
1422 confirm |= bool(opts.get(b'confirm'))
1424 if confirm:
1423 if confirm:
1425 confirmed = _confirmbeforesend(repo, revs, oldmap)
1424 confirmed = _confirmbeforesend(repo, revs, oldmap)
1426 if not confirmed:
1425 if not confirmed:
1427 raise error.Abort(_(b'phabsend cancelled'))
1426 raise error.Abort(_(b'phabsend cancelled'))
1428
1427
1429 actions = []
1428 actions = []
1430 reviewers = opts.get(b'reviewer', [])
1429 reviewers = opts.get(b'reviewer', [])
1431 blockers = opts.get(b'blocker', [])
1430 blockers = opts.get(b'blocker', [])
1432 phids = []
1431 phids = []
1433 if reviewers:
1432 if reviewers:
1434 phids.extend(userphids(repo.ui, reviewers))
1433 phids.extend(userphids(repo.ui, reviewers))
1435 if blockers:
1434 if blockers:
1436 phids.extend(
1435 phids.extend(
1437 map(
1436 map(
1438 lambda phid: b'blocking(%s)' % phid,
1437 lambda phid: b'blocking(%s)' % phid,
1439 userphids(repo.ui, blockers),
1438 userphids(repo.ui, blockers),
1440 )
1439 )
1441 )
1440 )
1442 if phids:
1441 if phids:
1443 actions.append({b'type': b'reviewers.add', b'value': phids})
1442 actions.append({b'type': b'reviewers.add', b'value': phids})
1444
1443
1445 drevids = [] # [int]
1444 drevids = [] # [int]
1446 diffmap = {} # {newnode: diff}
1445 diffmap = {} # {newnode: diff}
1447
1446
1448 # Send patches one by one so we know their Differential Revision PHIDs and
1447 # Send patches one by one so we know their Differential Revision PHIDs and
1449 # can provide dependency relationship
1448 # can provide dependency relationship
1450 lastrevphid = None
1449 lastrevphid = None
1451 for ctx in ctxs:
1450 for ctx in ctxs:
1452 if fold:
1451 if fold:
1453 ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev()))
1452 ui.debug(b'sending rev %d::%d\n' % (ctx.rev(), ctxs[-1].rev()))
1454 else:
1453 else:
1455 ui.debug(b'sending rev %d\n' % ctx.rev())
1454 ui.debug(b'sending rev %d\n' % ctx.rev())
1456
1455
1457 # Get Differential Revision ID
1456 # Get Differential Revision ID
1458 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
1457 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
1459 oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid
1458 oldbasenode, oldbasediff, oldbaserevid = oldnode, olddiff, revid
1460
1459
1461 if fold:
1460 if fold:
1462 oldbasenode, oldbasediff, oldbaserevid = oldmap.get(
1461 oldbasenode, oldbasediff, oldbaserevid = oldmap.get(
1463 ctxs[-1].node(), (None, None, None)
1462 ctxs[-1].node(), (None, None, None)
1464 )
1463 )
1465
1464
1466 if oldnode != ctx.node() or opts.get(b'amend'):
1465 if oldnode != ctx.node() or opts.get(b'amend'):
1467 # Create or update Differential Revision
1466 # Create or update Differential Revision
1468 revision, diff = createdifferentialrevision(
1467 revision, diff = createdifferentialrevision(
1469 ctxs if fold else [ctx],
1468 ctxs if fold else [ctx],
1470 revid,
1469 revid,
1471 lastrevphid,
1470 lastrevphid,
1472 oldbasenode,
1471 oldbasenode,
1473 oldnode,
1472 oldnode,
1474 olddiff,
1473 olddiff,
1475 actions,
1474 actions,
1476 opts.get(b'comment'),
1475 opts.get(b'comment'),
1477 )
1476 )
1478
1477
1479 if fold:
1478 if fold:
1480 for ctx in ctxs:
1479 for ctx in ctxs:
1481 diffmap[ctx.node()] = diff
1480 diffmap[ctx.node()] = diff
1482 else:
1481 else:
1483 diffmap[ctx.node()] = diff
1482 diffmap[ctx.node()] = diff
1484
1483
1485 newrevid = int(revision[b'object'][b'id'])
1484 newrevid = int(revision[b'object'][b'id'])
1486 newrevphid = revision[b'object'][b'phid']
1485 newrevphid = revision[b'object'][b'phid']
1487 if revid:
1486 if revid:
1488 action = b'updated'
1487 action = b'updated'
1489 else:
1488 else:
1490 action = b'created'
1489 action = b'created'
1491
1490
1492 # Create a local tag to note the association, if commit message
1491 # Create a local tag to note the association, if commit message
1493 # does not have it already
1492 # does not have it already
1494 if not fold:
1493 if not fold:
1495 m = _differentialrevisiondescre.search(ctx.description())
1494 m = _differentialrevisiondescre.search(ctx.description())
1496 if not m or int(m.group('id')) != newrevid:
1495 if not m or int(m.group('id')) != newrevid:
1497 tagname = b'D%d' % newrevid
1496 tagname = b'D%d' % newrevid
1498 tags.tag(
1497 tags.tag(
1499 repo,
1498 repo,
1500 tagname,
1499 tagname,
1501 ctx.node(),
1500 ctx.node(),
1502 message=None,
1501 message=None,
1503 user=None,
1502 user=None,
1504 date=None,
1503 date=None,
1505 local=True,
1504 local=True,
1506 )
1505 )
1507 else:
1506 else:
1508 # Nothing changed. But still set "newrevphid" so the next revision
1507 # Nothing changed. But still set "newrevphid" so the next revision
1509 # could depend on this one and "newrevid" for the summary line.
1508 # could depend on this one and "newrevid" for the summary line.
1510 newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
1509 newrevphid = querydrev(repo.ui, b'%d' % revid)[0][b'phid']
1511 newrevid = revid
1510 newrevid = revid
1512 action = b'skipped'
1511 action = b'skipped'
1513
1512
1514 drevids.append(newrevid)
1513 drevids.append(newrevid)
1515 lastrevphid = newrevphid
1514 lastrevphid = newrevphid
1516
1515
1517 if fold:
1516 if fold:
1518 for c in ctxs:
1517 for c in ctxs:
1519 if oldmap.get(c.node(), (None, None, None))[2]:
1518 if oldmap.get(c.node(), (None, None, None))[2]:
1520 action = b'updated'
1519 action = b'updated'
1521 else:
1520 else:
1522 action = b'created'
1521 action = b'created'
1523 _print_phabsend_action(ui, c, newrevid, action)
1522 _print_phabsend_action(ui, c, newrevid, action)
1524 break
1523 break
1525
1524
1526 _print_phabsend_action(ui, ctx, newrevid, action)
1525 _print_phabsend_action(ui, ctx, newrevid, action)
1527
1526
1528 # Update commit messages and remove tags
1527 # Update commit messages and remove tags
1529 if opts.get(b'amend'):
1528 if opts.get(b'amend'):
1530 unfi = repo.unfiltered()
1529 unfi = repo.unfiltered()
1531 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
1530 drevs = callconduit(ui, b'differential.query', {b'ids': drevids})
1532 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
1531 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
1533 # Eagerly evaluate commits to restabilize before creating new
1532 # Eagerly evaluate commits to restabilize before creating new
1534 # commits. The selected revisions are excluded because they are
1533 # commits. The selected revisions are excluded because they are
1535 # automatically restacked as part of the submission process.
1534 # automatically restacked as part of the submission process.
1536 restack = [
1535 restack = [
1537 c
1536 c
1538 for c in repo.set(
1537 for c in repo.set(
1539 b"(%ld::) - (%ld) - unstable() - obsolete() - public()",
1538 b"(%ld::) - (%ld) - unstable() - obsolete() - public()",
1540 revs,
1539 revs,
1541 revs,
1540 revs,
1542 )
1541 )
1543 ]
1542 ]
1544 wnode = unfi[b'.'].node()
1543 wnode = unfi[b'.'].node()
1545 mapping = {} # {oldnode: [newnode]}
1544 mapping = {} # {oldnode: [newnode]}
1546 newnodes = []
1545 newnodes = []
1547
1546
1548 drevid = drevids[0]
1547 drevid = drevids[0]
1549
1548
1550 for i, rev in enumerate(revs):
1549 for i, rev in enumerate(revs):
1551 old = unfi[rev]
1550 old = unfi[rev]
1552 if not fold:
1551 if not fold:
1553 drevid = drevids[i]
1552 drevid = drevids[i]
1554 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
1553 drev = [d for d in drevs if int(d[b'id']) == drevid][0]
1555
1554
1556 newdesc = get_amended_desc(drev, old, fold)
1555 newdesc = get_amended_desc(drev, old, fold)
1557 # Make sure commit message contain "Differential Revision"
1556 # Make sure commit message contain "Differential Revision"
1558 if (
1557 if (
1559 old.description() != newdesc
1558 old.description() != newdesc
1560 or old.p1().node() in mapping
1559 or old.p1().node() in mapping
1561 or old.p2().node() in mapping
1560 or old.p2().node() in mapping
1562 ):
1561 ):
1563 if old.phase() == phases.public:
1562 if old.phase() == phases.public:
1564 ui.warn(
1563 ui.warn(
1565 _(b"warning: not updating public commit %s\n")
1564 _(b"warning: not updating public commit %s\n")
1566 % scmutil.formatchangeid(old)
1565 % scmutil.formatchangeid(old)
1567 )
1566 )
1568 continue
1567 continue
1569 parents = [
1568 parents = [
1570 mapping.get(old.p1().node(), (old.p1(),))[0],
1569 mapping.get(old.p1().node(), (old.p1(),))[0],
1571 mapping.get(old.p2().node(), (old.p2(),))[0],
1570 mapping.get(old.p2().node(), (old.p2(),))[0],
1572 ]
1571 ]
1573 newdesc = rewriteutil.update_hash_refs(
1572 newdesc = rewriteutil.update_hash_refs(
1574 repo,
1573 repo,
1575 newdesc,
1574 newdesc,
1576 mapping,
1575 mapping,
1577 )
1576 )
1578 new = context.metadataonlyctx(
1577 new = context.metadataonlyctx(
1579 repo,
1578 repo,
1580 old,
1579 old,
1581 parents=parents,
1580 parents=parents,
1582 text=newdesc,
1581 text=newdesc,
1583 user=old.user(),
1582 user=old.user(),
1584 date=old.date(),
1583 date=old.date(),
1585 extra=old.extra(),
1584 extra=old.extra(),
1586 )
1585 )
1587
1586
1588 newnode = new.commit()
1587 newnode = new.commit()
1589
1588
1590 mapping[old.node()] = [newnode]
1589 mapping[old.node()] = [newnode]
1591
1590
1592 if fold:
1591 if fold:
1593 # Defer updating the (single) Diff until all nodes are
1592 # Defer updating the (single) Diff until all nodes are
1594 # collected. No tags were created, so none need to be
1593 # collected. No tags were created, so none need to be
1595 # removed.
1594 # removed.
1596 newnodes.append(newnode)
1595 newnodes.append(newnode)
1597 continue
1596 continue
1598
1597
1599 _amend_diff_properties(
1598 _amend_diff_properties(
1600 unfi, drevid, [newnode], diffmap[old.node()]
1599 unfi, drevid, [newnode], diffmap[old.node()]
1601 )
1600 )
1602
1601
1603 # Remove local tags since it's no longer necessary
1602 # Remove local tags since it's no longer necessary
1604 tagname = b'D%d' % drevid
1603 tagname = b'D%d' % drevid
1605 if tagname in repo.tags():
1604 if tagname in repo.tags():
1606 tags.tag(
1605 tags.tag(
1607 repo,
1606 repo,
1608 tagname,
1607 tagname,
1609 repo.nullid,
1608 repo.nullid,
1610 message=None,
1609 message=None,
1611 user=None,
1610 user=None,
1612 date=None,
1611 date=None,
1613 local=True,
1612 local=True,
1614 )
1613 )
1615 elif fold:
1614 elif fold:
1616 # When folding multiple commits into one review with
1615 # When folding multiple commits into one review with
1617 # --fold, track even the commits that weren't amended, so
1616 # --fold, track even the commits that weren't amended, so
1618 # that their association isn't lost if the properties are
1617 # that their association isn't lost if the properties are
1619 # rewritten below.
1618 # rewritten below.
1620 newnodes.append(old.node())
1619 newnodes.append(old.node())
1621
1620
1622 # If the submitted commits are public, no amend takes place so
1621 # If the submitted commits are public, no amend takes place so
1623 # there are no newnodes and therefore no diff update to do.
1622 # there are no newnodes and therefore no diff update to do.
1624 if fold and newnodes:
1623 if fold and newnodes:
1625 diff = diffmap[old.node()]
1624 diff = diffmap[old.node()]
1626
1625
1627 # The diff object in diffmap doesn't have the local commits
1626 # The diff object in diffmap doesn't have the local commits
1628 # because that could be returned from differential.creatediff,
1627 # because that could be returned from differential.creatediff,
1629 # not differential.querydiffs. So use the queried diff (if
1628 # not differential.querydiffs. So use the queried diff (if
1630 # present), or force the amend (a new revision is being posted.)
1629 # present), or force the amend (a new revision is being posted.)
1631 if not olddiff or set(newnodes) != getlocalcommits(olddiff):
1630 if not olddiff or set(newnodes) != getlocalcommits(olddiff):
1632 _debug(ui, b"updating local commit list for D%d\n" % drevid)
1631 _debug(ui, b"updating local commit list for D%d\n" % drevid)
1633 _amend_diff_properties(unfi, drevid, newnodes, diff)
1632 _amend_diff_properties(unfi, drevid, newnodes, diff)
1634 else:
1633 else:
1635 _debug(
1634 _debug(
1636 ui,
1635 ui,
1637 b"local commit list for D%d is already up-to-date\n"
1636 b"local commit list for D%d is already up-to-date\n"
1638 % drevid,
1637 % drevid,
1639 )
1638 )
1640 elif fold:
1639 elif fold:
1641 _debug(ui, b"no newnodes to update\n")
1640 _debug(ui, b"no newnodes to update\n")
1642
1641
1643 # Restack any children of first-time submissions that were orphaned
1642 # Restack any children of first-time submissions that were orphaned
1644 # in the process. The ctx won't report that it is an orphan until
1643 # in the process. The ctx won't report that it is an orphan until
1645 # the cleanup takes place below.
1644 # the cleanup takes place below.
1646 for old in restack:
1645 for old in restack:
1647 parents = [
1646 parents = [
1648 mapping.get(old.p1().node(), (old.p1(),))[0],
1647 mapping.get(old.p1().node(), (old.p1(),))[0],
1649 mapping.get(old.p2().node(), (old.p2(),))[0],
1648 mapping.get(old.p2().node(), (old.p2(),))[0],
1650 ]
1649 ]
1651 new = context.metadataonlyctx(
1650 new = context.metadataonlyctx(
1652 repo,
1651 repo,
1653 old,
1652 old,
1654 parents=parents,
1653 parents=parents,
1655 text=rewriteutil.update_hash_refs(
1654 text=rewriteutil.update_hash_refs(
1656 repo, old.description(), mapping
1655 repo, old.description(), mapping
1657 ),
1656 ),
1658 user=old.user(),
1657 user=old.user(),
1659 date=old.date(),
1658 date=old.date(),
1660 extra=old.extra(),
1659 extra=old.extra(),
1661 )
1660 )
1662
1661
1663 newnode = new.commit()
1662 newnode = new.commit()
1664
1663
1665 # Don't obsolete unselected descendants of nodes that have not
1664 # Don't obsolete unselected descendants of nodes that have not
1666 # been changed in this transaction- that results in an error.
1665 # been changed in this transaction- that results in an error.
1667 if newnode != old.node():
1666 if newnode != old.node():
1668 mapping[old.node()] = [newnode]
1667 mapping[old.node()] = [newnode]
1669 _debug(
1668 _debug(
1670 ui,
1669 ui,
1671 b"restabilizing %s as %s\n"
1670 b"restabilizing %s as %s\n"
1672 % (short(old.node()), short(newnode)),
1671 % (short(old.node()), short(newnode)),
1673 )
1672 )
1674 else:
1673 else:
1675 _debug(
1674 _debug(
1676 ui,
1675 ui,
1677 b"not restabilizing unchanged %s\n" % short(old.node()),
1676 b"not restabilizing unchanged %s\n" % short(old.node()),
1678 )
1677 )
1679
1678
1680 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
1679 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
1681 if wnode in mapping:
1680 if wnode in mapping:
1682 unfi.setparents(mapping[wnode][0])
1681 unfi.setparents(mapping[wnode][0])
1683
1682
1684
1683
1685 # Map from "hg:meta" keys to header understood by "hg import". The order is
1684 # Map from "hg:meta" keys to header understood by "hg import". The order is
1686 # consistent with "hg export" output.
1685 # consistent with "hg export" output.
1687 _metanamemap = util.sortdict(
1686 _metanamemap = util.sortdict(
1688 [
1687 [
1689 (b'user', b'User'),
1688 (b'user', b'User'),
1690 (b'date', b'Date'),
1689 (b'date', b'Date'),
1691 (b'branch', b'Branch'),
1690 (b'branch', b'Branch'),
1692 (b'node', b'Node ID'),
1691 (b'node', b'Node ID'),
1693 (b'parent', b'Parent '),
1692 (b'parent', b'Parent '),
1694 ]
1693 ]
1695 )
1694 )
1696
1695
1697
1696
1698 def _confirmbeforesend(repo, revs, oldmap):
1697 def _confirmbeforesend(repo, revs, oldmap):
1699 url, token = readurltoken(repo.ui)
1698 url, token = readurltoken(repo.ui)
1700 ui = repo.ui
1699 ui = repo.ui
1701 for rev in revs:
1700 for rev in revs:
1702 ctx = repo[rev]
1701 ctx = repo[rev]
1703 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
1702 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
1704 if drevid:
1703 if drevid:
1705 drevdesc = ui.label(b'D%d' % drevid, b'phabricator.drev')
1704 drevdesc = ui.label(b'D%d' % drevid, b'phabricator.drev')
1706 else:
1705 else:
1707 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
1706 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
1708
1707
1709 ui.write(
1708 ui.write(
1710 _(b'%s - %s\n')
1709 _(b'%s - %s\n')
1711 % (
1710 % (
1712 drevdesc,
1711 drevdesc,
1713 cmdutil.format_changeset_summary(ui, ctx, b'phabsend'),
1712 cmdutil.format_changeset_summary(ui, ctx, b'phabsend'),
1714 )
1713 )
1715 )
1714 )
1716
1715
1717 if ui.promptchoice(
1716 if ui.promptchoice(
1718 _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
1717 _(b'Send the above changes to %s (Y/n)?$$ &Yes $$ &No') % url
1719 ):
1718 ):
1720 return False
1719 return False
1721
1720
1722 return True
1721 return True
1723
1722
1724
1723
1725 _knownstatusnames = {
1724 _knownstatusnames = {
1726 b'accepted',
1725 b'accepted',
1727 b'needsreview',
1726 b'needsreview',
1728 b'needsrevision',
1727 b'needsrevision',
1729 b'closed',
1728 b'closed',
1730 b'abandoned',
1729 b'abandoned',
1731 b'changesplanned',
1730 b'changesplanned',
1732 }
1731 }
1733
1732
1734
1733
1735 def _getstatusname(drev):
1734 def _getstatusname(drev):
1736 """get normalized status name from a Differential Revision"""
1735 """get normalized status name from a Differential Revision"""
1737 return drev[b'statusName'].replace(b' ', b'').lower()
1736 return drev[b'statusName'].replace(b' ', b'').lower()
1738
1737
1739
1738
1740 # Small language to specify differential revisions. Support symbols: (), :X,
1739 # Small language to specify differential revisions. Support symbols: (), :X,
1741 # +, and -.
1740 # +, and -.
1742
1741
1743 _elements = {
1742 _elements = {
1744 # token-type: binding-strength, primary, prefix, infix, suffix
1743 # token-type: binding-strength, primary, prefix, infix, suffix
1745 b'(': (12, None, (b'group', 1, b')'), None, None),
1744 b'(': (12, None, (b'group', 1, b')'), None, None),
1746 b':': (8, None, (b'ancestors', 8), None, None),
1745 b':': (8, None, (b'ancestors', 8), None, None),
1747 b'&': (5, None, None, (b'and_', 5), None),
1746 b'&': (5, None, None, (b'and_', 5), None),
1748 b'+': (4, None, None, (b'add', 4), None),
1747 b'+': (4, None, None, (b'add', 4), None),
1749 b'-': (4, None, None, (b'sub', 4), None),
1748 b'-': (4, None, None, (b'sub', 4), None),
1750 b')': (0, None, None, None, None),
1749 b')': (0, None, None, None, None),
1751 b'symbol': (0, b'symbol', None, None, None),
1750 b'symbol': (0, b'symbol', None, None, None),
1752 b'end': (0, None, None, None, None),
1751 b'end': (0, None, None, None, None),
1753 }
1752 }
1754
1753
1755
1754
1756 def _tokenize(text):
1755 def _tokenize(text):
1757 view = memoryview(text) # zero-copy slice
1756 view = memoryview(text) # zero-copy slice
1758 special = b'():+-& '
1757 special = b'():+-& '
1759 pos = 0
1758 pos = 0
1760 length = len(text)
1759 length = len(text)
1761 while pos < length:
1760 while pos < length:
1762 symbol = b''.join(
1761 symbol = b''.join(
1763 itertools.takewhile(
1762 itertools.takewhile(
1764 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
1763 lambda ch: ch not in special, pycompat.iterbytestr(view[pos:])
1765 )
1764 )
1766 )
1765 )
1767 if symbol:
1766 if symbol:
1768 yield (b'symbol', symbol, pos)
1767 yield (b'symbol', symbol, pos)
1769 pos += len(symbol)
1768 pos += len(symbol)
1770 else: # special char, ignore space
1769 else: # special char, ignore space
1771 if text[pos : pos + 1] != b' ':
1770 if text[pos : pos + 1] != b' ':
1772 yield (text[pos : pos + 1], None, pos)
1771 yield (text[pos : pos + 1], None, pos)
1773 pos += 1
1772 pos += 1
1774 yield (b'end', None, pos)
1773 yield (b'end', None, pos)
1775
1774
1776
1775
1777 def _parse(text):
1776 def _parse(text):
1778 tree, pos = parser.parser(_elements).parse(_tokenize(text))
1777 tree, pos = parser.parser(_elements).parse(_tokenize(text))
1779 if pos != len(text):
1778 if pos != len(text):
1780 raise error.ParseError(b'invalid token', pos)
1779 raise error.ParseError(b'invalid token', pos)
1781 return tree
1780 return tree
1782
1781
1783
1782
1784 def _parsedrev(symbol):
1783 def _parsedrev(symbol):
1785 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
1784 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
1786 if symbol.startswith(b'D') and symbol[1:].isdigit():
1785 if symbol.startswith(b'D') and symbol[1:].isdigit():
1787 return int(symbol[1:])
1786 return int(symbol[1:])
1788 if symbol.isdigit():
1787 if symbol.isdigit():
1789 return int(symbol)
1788 return int(symbol)
1790
1789
1791
1790
1792 def _prefetchdrevs(tree):
1791 def _prefetchdrevs(tree):
1793 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
1792 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
1794 drevs = set()
1793 drevs = set()
1795 ancestordrevs = set()
1794 ancestordrevs = set()
1796 op = tree[0]
1795 op = tree[0]
1797 if op == b'symbol':
1796 if op == b'symbol':
1798 r = _parsedrev(tree[1])
1797 r = _parsedrev(tree[1])
1799 if r:
1798 if r:
1800 drevs.add(r)
1799 drevs.add(r)
1801 elif op == b'ancestors':
1800 elif op == b'ancestors':
1802 r, a = _prefetchdrevs(tree[1])
1801 r, a = _prefetchdrevs(tree[1])
1803 drevs.update(r)
1802 drevs.update(r)
1804 ancestordrevs.update(r)
1803 ancestordrevs.update(r)
1805 ancestordrevs.update(a)
1804 ancestordrevs.update(a)
1806 else:
1805 else:
1807 for t in tree[1:]:
1806 for t in tree[1:]:
1808 r, a = _prefetchdrevs(t)
1807 r, a = _prefetchdrevs(t)
1809 drevs.update(r)
1808 drevs.update(r)
1810 ancestordrevs.update(a)
1809 ancestordrevs.update(a)
1811 return drevs, ancestordrevs
1810 return drevs, ancestordrevs
1812
1811
1813
1812
1814 def querydrev(ui, spec):
1813 def querydrev(ui, spec):
1815 """return a list of "Differential Revision" dicts
1814 """return a list of "Differential Revision" dicts
1816
1815
1817 spec is a string using a simple query language, see docstring in phabread
1816 spec is a string using a simple query language, see docstring in phabread
1818 for details.
1817 for details.
1819
1818
1820 A "Differential Revision dict" looks like:
1819 A "Differential Revision dict" looks like:
1821
1820
1822 {
1821 {
1823 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
1822 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
1824 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
1823 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
1825 "auxiliary": {
1824 "auxiliary": {
1826 "phabricator:depends-on": [
1825 "phabricator:depends-on": [
1827 "PHID-DREV-gbapp366kutjebt7agcd"
1826 "PHID-DREV-gbapp366kutjebt7agcd"
1828 ]
1827 ]
1829 "phabricator:projects": [],
1828 "phabricator:projects": [],
1830 },
1829 },
1831 "branch": "default",
1830 "branch": "default",
1832 "ccs": [],
1831 "ccs": [],
1833 "commits": [],
1832 "commits": [],
1834 "dateCreated": "1499181406",
1833 "dateCreated": "1499181406",
1835 "dateModified": "1499182103",
1834 "dateModified": "1499182103",
1836 "diffs": [
1835 "diffs": [
1837 "3",
1836 "3",
1838 "4",
1837 "4",
1839 ],
1838 ],
1840 "hashes": [],
1839 "hashes": [],
1841 "id": "2",
1840 "id": "2",
1842 "lineCount": "2",
1841 "lineCount": "2",
1843 "phid": "PHID-DREV-672qvysjcczopag46qty",
1842 "phid": "PHID-DREV-672qvysjcczopag46qty",
1844 "properties": {},
1843 "properties": {},
1845 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
1844 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
1846 "reviewers": [],
1845 "reviewers": [],
1847 "sourcePath": null
1846 "sourcePath": null
1848 "status": "0",
1847 "status": "0",
1849 "statusName": "Needs Review",
1848 "statusName": "Needs Review",
1850 "summary": "",
1849 "summary": "",
1851 "testPlan": "",
1850 "testPlan": "",
1852 "title": "example",
1851 "title": "example",
1853 "uri": "https://phab.example.com/D2",
1852 "uri": "https://phab.example.com/D2",
1854 }
1853 }
1855 """
1854 """
1856 # TODO: replace differential.query and differential.querydiffs with
1855 # TODO: replace differential.query and differential.querydiffs with
1857 # differential.diff.search because the former (and their output) are
1856 # differential.diff.search because the former (and their output) are
1858 # frozen, and planned to be deprecated and removed.
1857 # frozen, and planned to be deprecated and removed.
1859
1858
1860 def fetch(params):
1859 def fetch(params):
1861 """params -> single drev or None"""
1860 """params -> single drev or None"""
1862 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
1861 key = (params.get(b'ids') or params.get(b'phids') or [None])[0]
1863 if key in prefetched:
1862 if key in prefetched:
1864 return prefetched[key]
1863 return prefetched[key]
1865 drevs = callconduit(ui, b'differential.query', params)
1864 drevs = callconduit(ui, b'differential.query', params)
1866 # Fill prefetched with the result
1865 # Fill prefetched with the result
1867 for drev in drevs:
1866 for drev in drevs:
1868 prefetched[drev[b'phid']] = drev
1867 prefetched[drev[b'phid']] = drev
1869 prefetched[int(drev[b'id'])] = drev
1868 prefetched[int(drev[b'id'])] = drev
1870 if key not in prefetched:
1869 if key not in prefetched:
1871 raise error.Abort(
1870 raise error.Abort(
1872 _(b'cannot get Differential Revision %r') % params
1871 _(b'cannot get Differential Revision %r') % params
1873 )
1872 )
1874 return prefetched[key]
1873 return prefetched[key]
1875
1874
1876 def getstack(topdrevids):
1875 def getstack(topdrevids):
1877 """given a top, get a stack from the bottom, [id] -> [id]"""
1876 """given a top, get a stack from the bottom, [id] -> [id]"""
1878 visited = set()
1877 visited = set()
1879 result = []
1878 result = []
1880 queue = [{b'ids': [i]} for i in topdrevids]
1879 queue = [{b'ids': [i]} for i in topdrevids]
1881 while queue:
1880 while queue:
1882 params = queue.pop()
1881 params = queue.pop()
1883 drev = fetch(params)
1882 drev = fetch(params)
1884 if drev[b'id'] in visited:
1883 if drev[b'id'] in visited:
1885 continue
1884 continue
1886 visited.add(drev[b'id'])
1885 visited.add(drev[b'id'])
1887 result.append(int(drev[b'id']))
1886 result.append(int(drev[b'id']))
1888 auxiliary = drev.get(b'auxiliary', {})
1887 auxiliary = drev.get(b'auxiliary', {})
1889 depends = auxiliary.get(b'phabricator:depends-on', [])
1888 depends = auxiliary.get(b'phabricator:depends-on', [])
1890 for phid in depends:
1889 for phid in depends:
1891 queue.append({b'phids': [phid]})
1890 queue.append({b'phids': [phid]})
1892 result.reverse()
1891 result.reverse()
1893 return smartset.baseset(result)
1892 return smartset.baseset(result)
1894
1893
1895 # Initialize prefetch cache
1894 # Initialize prefetch cache
1896 prefetched = {} # {id or phid: drev}
1895 prefetched = {} # {id or phid: drev}
1897
1896
1898 tree = _parse(spec)
1897 tree = _parse(spec)
1899 drevs, ancestordrevs = _prefetchdrevs(tree)
1898 drevs, ancestordrevs = _prefetchdrevs(tree)
1900
1899
1901 # developer config: phabricator.batchsize
1900 # developer config: phabricator.batchsize
1902 batchsize = ui.configint(b'phabricator', b'batchsize')
1901 batchsize = ui.configint(b'phabricator', b'batchsize')
1903
1902
1904 # Prefetch Differential Revisions in batch
1903 # Prefetch Differential Revisions in batch
1905 tofetch = set(drevs)
1904 tofetch = set(drevs)
1906 for r in ancestordrevs:
1905 for r in ancestordrevs:
1907 tofetch.update(range(max(1, r - batchsize), r + 1))
1906 tofetch.update(range(max(1, r - batchsize), r + 1))
1908 if drevs:
1907 if drevs:
1909 fetch({b'ids': list(tofetch)})
1908 fetch({b'ids': list(tofetch)})
1910 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1909 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
1911
1910
1912 # Walk through the tree, return smartsets
1911 # Walk through the tree, return smartsets
1913 def walk(tree):
1912 def walk(tree):
1914 op = tree[0]
1913 op = tree[0]
1915 if op == b'symbol':
1914 if op == b'symbol':
1916 drev = _parsedrev(tree[1])
1915 drev = _parsedrev(tree[1])
1917 if drev:
1916 if drev:
1918 return smartset.baseset([drev])
1917 return smartset.baseset([drev])
1919 elif tree[1] in _knownstatusnames:
1918 elif tree[1] in _knownstatusnames:
1920 drevs = [
1919 drevs = [
1921 r
1920 r
1922 for r in validids
1921 for r in validids
1923 if _getstatusname(prefetched[r]) == tree[1]
1922 if _getstatusname(prefetched[r]) == tree[1]
1924 ]
1923 ]
1925 return smartset.baseset(drevs)
1924 return smartset.baseset(drevs)
1926 else:
1925 else:
1927 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1926 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
1928 elif op in {b'and_', b'add', b'sub'}:
1927 elif op in {b'and_', b'add', b'sub'}:
1929 assert len(tree) == 3
1928 assert len(tree) == 3
1930 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1929 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
1931 elif op == b'group':
1930 elif op == b'group':
1932 return walk(tree[1])
1931 return walk(tree[1])
1933 elif op == b'ancestors':
1932 elif op == b'ancestors':
1934 return getstack(walk(tree[1]))
1933 return getstack(walk(tree[1]))
1935 else:
1934 else:
1936 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1935 raise error.ProgrammingError(b'illegal tree: %r' % tree)
1937
1936
1938 return [prefetched[r] for r in walk(tree)]
1937 return [prefetched[r] for r in walk(tree)]
1939
1938
1940
1939
1941 def getdescfromdrev(drev):
1940 def getdescfromdrev(drev):
1942 """get description (commit message) from "Differential Revision"
1941 """get description (commit message) from "Differential Revision"
1943
1942
1944 This is similar to differential.getcommitmessage API. But we only care
1943 This is similar to differential.getcommitmessage API. But we only care
1945 about limited fields: title, summary, test plan, and URL.
1944 about limited fields: title, summary, test plan, and URL.
1946 """
1945 """
1947 title = drev[b'title']
1946 title = drev[b'title']
1948 summary = drev[b'summary'].rstrip()
1947 summary = drev[b'summary'].rstrip()
1949 testplan = drev[b'testPlan'].rstrip()
1948 testplan = drev[b'testPlan'].rstrip()
1950 if testplan:
1949 if testplan:
1951 testplan = b'Test Plan:\n%s' % testplan
1950 testplan = b'Test Plan:\n%s' % testplan
1952 uri = b'Differential Revision: %s' % drev[b'uri']
1951 uri = b'Differential Revision: %s' % drev[b'uri']
1953 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1952 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
1954
1953
1955
1954
1956 def get_amended_desc(drev, ctx, folded):
1955 def get_amended_desc(drev, ctx, folded):
1957 """similar to ``getdescfromdrev``, but supports a folded series of commits
1956 """similar to ``getdescfromdrev``, but supports a folded series of commits
1958
1957
1959 This is used when determining if an individual commit needs to have its
1958 This is used when determining if an individual commit needs to have its
1960 message amended after posting it for review. The determination is made for
1959 message amended after posting it for review. The determination is made for
1961 each individual commit, even when they were folded into one review.
1960 each individual commit, even when they were folded into one review.
1962 """
1961 """
1963 if not folded:
1962 if not folded:
1964 return getdescfromdrev(drev)
1963 return getdescfromdrev(drev)
1965
1964
1966 uri = b'Differential Revision: %s' % drev[b'uri']
1965 uri = b'Differential Revision: %s' % drev[b'uri']
1967
1966
1968 # Since the commit messages were combined when posting multiple commits
1967 # Since the commit messages were combined when posting multiple commits
1969 # with --fold, the fields can't be read from Phabricator here, or *all*
1968 # with --fold, the fields can't be read from Phabricator here, or *all*
1970 # affected local revisions will end up with the same commit message after
1969 # affected local revisions will end up with the same commit message after
1971 # the URI is amended in. Append in the DREV line, or update it if it
1970 # the URI is amended in. Append in the DREV line, or update it if it
1972 # exists. At worst, this means commit message or test plan updates on
1971 # exists. At worst, this means commit message or test plan updates on
1973 # Phabricator aren't propagated back to the repository, but that seems
1972 # Phabricator aren't propagated back to the repository, but that seems
1974 # reasonable for the case where local commits are effectively combined
1973 # reasonable for the case where local commits are effectively combined
1975 # in Phabricator.
1974 # in Phabricator.
1976 m = _differentialrevisiondescre.search(ctx.description())
1975 m = _differentialrevisiondescre.search(ctx.description())
1977 if not m:
1976 if not m:
1978 return b'\n\n'.join([ctx.description(), uri])
1977 return b'\n\n'.join([ctx.description(), uri])
1979
1978
1980 return _differentialrevisiondescre.sub(uri, ctx.description())
1979 return _differentialrevisiondescre.sub(uri, ctx.description())
1981
1980
1982
1981
1983 def getlocalcommits(diff):
1982 def getlocalcommits(diff):
1984 """get the set of local commits from a diff object
1983 """get the set of local commits from a diff object
1985
1984
1986 See ``getdiffmeta()`` for an example diff object.
1985 See ``getdiffmeta()`` for an example diff object.
1987 """
1986 """
1988 props = diff.get(b'properties') or {}
1987 props = diff.get(b'properties') or {}
1989 commits = props.get(b'local:commits') or {}
1988 commits = props.get(b'local:commits') or {}
1990 if len(commits) > 1:
1989 if len(commits) > 1:
1991 return {bin(c) for c in commits.keys()}
1990 return {bin(c) for c in commits.keys()}
1992
1991
1993 # Storing the diff metadata predates storing `local:commits`, so continue
1992 # Storing the diff metadata predates storing `local:commits`, so continue
1994 # to use that in the --no-fold case.
1993 # to use that in the --no-fold case.
1995 return {bin(getdiffmeta(diff).get(b'node', b'')) or None}
1994 return {bin(getdiffmeta(diff).get(b'node', b'')) or None}
1996
1995
1997
1996
1998 def getdiffmeta(diff):
1997 def getdiffmeta(diff):
1999 """get commit metadata (date, node, user, p1) from a diff object
1998 """get commit metadata (date, node, user, p1) from a diff object
2000
1999
2001 The metadata could be "hg:meta", sent by phabsend, like:
2000 The metadata could be "hg:meta", sent by phabsend, like:
2002
2001
2003 "properties": {
2002 "properties": {
2004 "hg:meta": {
2003 "hg:meta": {
2005 "branch": "default",
2004 "branch": "default",
2006 "date": "1499571514 25200",
2005 "date": "1499571514 25200",
2007 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
2006 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
2008 "user": "Foo Bar <foo@example.com>",
2007 "user": "Foo Bar <foo@example.com>",
2009 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
2008 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
2010 }
2009 }
2011 }
2010 }
2012
2011
2013 Or converted from "local:commits", sent by "arc", like:
2012 Or converted from "local:commits", sent by "arc", like:
2014
2013
2015 "properties": {
2014 "properties": {
2016 "local:commits": {
2015 "local:commits": {
2017 "98c08acae292b2faf60a279b4189beb6cff1414d": {
2016 "98c08acae292b2faf60a279b4189beb6cff1414d": {
2018 "author": "Foo Bar",
2017 "author": "Foo Bar",
2019 "authorEmail": "foo@example.com"
2018 "authorEmail": "foo@example.com"
2020 "branch": "default",
2019 "branch": "default",
2021 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
2020 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
2022 "local": "1000",
2021 "local": "1000",
2023 "message": "...",
2022 "message": "...",
2024 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
2023 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
2025 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
2024 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
2026 "summary": "...",
2025 "summary": "...",
2027 "tag": "",
2026 "tag": "",
2028 "time": 1499546314,
2027 "time": 1499546314,
2029 }
2028 }
2030 }
2029 }
2031 }
2030 }
2032
2031
2033 Note: metadata extracted from "local:commits" will lose time zone
2032 Note: metadata extracted from "local:commits" will lose time zone
2034 information.
2033 information.
2035 """
2034 """
2036 props = diff.get(b'properties') or {}
2035 props = diff.get(b'properties') or {}
2037 meta = props.get(b'hg:meta')
2036 meta = props.get(b'hg:meta')
2038 if not meta:
2037 if not meta:
2039 if props.get(b'local:commits'):
2038 if props.get(b'local:commits'):
2040 commit = sorted(props[b'local:commits'].values())[0]
2039 commit = sorted(props[b'local:commits'].values())[0]
2041 meta = {}
2040 meta = {}
2042 if b'author' in commit and b'authorEmail' in commit:
2041 if b'author' in commit and b'authorEmail' in commit:
2043 meta[b'user'] = b'%s <%s>' % (
2042 meta[b'user'] = b'%s <%s>' % (
2044 commit[b'author'],
2043 commit[b'author'],
2045 commit[b'authorEmail'],
2044 commit[b'authorEmail'],
2046 )
2045 )
2047 if b'time' in commit:
2046 if b'time' in commit:
2048 meta[b'date'] = b'%d 0' % int(commit[b'time'])
2047 meta[b'date'] = b'%d 0' % int(commit[b'time'])
2049 if b'branch' in commit:
2048 if b'branch' in commit:
2050 meta[b'branch'] = commit[b'branch']
2049 meta[b'branch'] = commit[b'branch']
2051 node = commit.get(b'commit', commit.get(b'rev'))
2050 node = commit.get(b'commit', commit.get(b'rev'))
2052 if node:
2051 if node:
2053 meta[b'node'] = node
2052 meta[b'node'] = node
2054 if len(commit.get(b'parents', ())) >= 1:
2053 if len(commit.get(b'parents', ())) >= 1:
2055 meta[b'parent'] = commit[b'parents'][0]
2054 meta[b'parent'] = commit[b'parents'][0]
2056 else:
2055 else:
2057 meta = {}
2056 meta = {}
2058 if b'date' not in meta and b'dateCreated' in diff:
2057 if b'date' not in meta and b'dateCreated' in diff:
2059 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
2058 meta[b'date'] = b'%s 0' % diff[b'dateCreated']
2060 if b'branch' not in meta and diff.get(b'branch'):
2059 if b'branch' not in meta and diff.get(b'branch'):
2061 meta[b'branch'] = diff[b'branch']
2060 meta[b'branch'] = diff[b'branch']
2062 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
2061 if b'parent' not in meta and diff.get(b'sourceControlBaseRevision'):
2063 meta[b'parent'] = diff[b'sourceControlBaseRevision']
2062 meta[b'parent'] = diff[b'sourceControlBaseRevision']
2064 return meta
2063 return meta
2065
2064
2066
2065
2067 def _getdrevs(ui, stack, specs):
2066 def _getdrevs(ui, stack, specs):
2068 """convert user supplied DREVSPECs into "Differential Revision" dicts
2067 """convert user supplied DREVSPECs into "Differential Revision" dicts
2069
2068
2070 See ``hg help phabread`` for how to specify each DREVSPEC.
2069 See ``hg help phabread`` for how to specify each DREVSPEC.
2071 """
2070 """
2072 if len(specs) > 0:
2071 if len(specs) > 0:
2073
2072
2074 def _formatspec(s):
2073 def _formatspec(s):
2075 if stack:
2074 if stack:
2076 s = b':(%s)' % s
2075 s = b':(%s)' % s
2077 return b'(%s)' % s
2076 return b'(%s)' % s
2078
2077
2079 spec = b'+'.join(pycompat.maplist(_formatspec, specs))
2078 spec = b'+'.join(pycompat.maplist(_formatspec, specs))
2080
2079
2081 drevs = querydrev(ui, spec)
2080 drevs = querydrev(ui, spec)
2082 if drevs:
2081 if drevs:
2083 return drevs
2082 return drevs
2084
2083
2085 raise error.Abort(_(b"empty DREVSPEC set"))
2084 raise error.Abort(_(b"empty DREVSPEC set"))
2086
2085
2087
2086
2088 def readpatch(ui, drevs, write):
2087 def readpatch(ui, drevs, write):
2089 """generate plain-text patch readable by 'hg import'
2088 """generate plain-text patch readable by 'hg import'
2090
2089
2091 write takes a list of (DREV, bytes), where DREV is the differential number
2090 write takes a list of (DREV, bytes), where DREV is the differential number
2092 (as bytes, without the "D" prefix) and the bytes are the text of a patch
2091 (as bytes, without the "D" prefix) and the bytes are the text of a patch
2093 to be imported. drevs is what "querydrev" returns, results of
2092 to be imported. drevs is what "querydrev" returns, results of
2094 "differential.query".
2093 "differential.query".
2095 """
2094 """
2096 # Prefetch hg:meta property for all diffs
2095 # Prefetch hg:meta property for all diffs
2097 diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
2096 diffids = sorted({max(int(v) for v in drev[b'diffs']) for drev in drevs})
2098 diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
2097 diffs = callconduit(ui, b'differential.querydiffs', {b'ids': diffids})
2099
2098
2100 patches = []
2099 patches = []
2101
2100
2102 # Generate patch for each drev
2101 # Generate patch for each drev
2103 for drev in drevs:
2102 for drev in drevs:
2104 ui.note(_(b'reading D%s\n') % drev[b'id'])
2103 ui.note(_(b'reading D%s\n') % drev[b'id'])
2105
2104
2106 diffid = max(int(v) for v in drev[b'diffs'])
2105 diffid = max(int(v) for v in drev[b'diffs'])
2107 body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
2106 body = callconduit(ui, b'differential.getrawdiff', {b'diffID': diffid})
2108 desc = getdescfromdrev(drev)
2107 desc = getdescfromdrev(drev)
2109 header = b'# HG changeset patch\n'
2108 header = b'# HG changeset patch\n'
2110
2109
2111 # Try to preserve metadata from hg:meta property. Write hg patch
2110 # Try to preserve metadata from hg:meta property. Write hg patch
2112 # headers that can be read by the "import" command. See patchheadermap
2111 # headers that can be read by the "import" command. See patchheadermap
2113 # and extract in mercurial/patch.py for supported headers.
2112 # and extract in mercurial/patch.py for supported headers.
2114 meta = getdiffmeta(diffs[b'%d' % diffid])
2113 meta = getdiffmeta(diffs[b'%d' % diffid])
2115 for k in _metanamemap.keys():
2114 for k in _metanamemap.keys():
2116 if k in meta:
2115 if k in meta:
2117 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
2116 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
2118
2117
2119 content = b'%s%s\n%s' % (header, desc, body)
2118 content = b'%s%s\n%s' % (header, desc, body)
2120 patches.append((drev[b'id'], content))
2119 patches.append((drev[b'id'], content))
2121
2120
2122 # Write patches to the supplied callback
2121 # Write patches to the supplied callback
2123 write(patches)
2122 write(patches)
2124
2123
2125
2124
2126 @vcrcommand(
2125 @vcrcommand(
2127 b'phabread',
2126 b'phabread',
2128 [(b'', b'stack', False, _(b'read dependencies'))],
2127 [(b'', b'stack', False, _(b'read dependencies'))],
2129 _(b'DREVSPEC... [OPTIONS]'),
2128 _(b'DREVSPEC... [OPTIONS]'),
2130 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2129 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2131 optionalrepo=True,
2130 optionalrepo=True,
2132 )
2131 )
2133 def phabread(ui, repo, *specs, **opts):
2132 def phabread(ui, repo, *specs, **opts):
2134 """print patches from Phabricator suitable for importing
2133 """print patches from Phabricator suitable for importing
2135
2134
2136 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
2135 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
2137 the number ``123``. It could also have common operators like ``+``, ``-``,
2136 the number ``123``. It could also have common operators like ``+``, ``-``,
2138 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
2137 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
2139 select a stack. If multiple DREVSPEC values are given, the result is the
2138 select a stack. If multiple DREVSPEC values are given, the result is the
2140 union of each individually evaluated value. No attempt is currently made
2139 union of each individually evaluated value. No attempt is currently made
2141 to reorder the values to run from parent to child.
2140 to reorder the values to run from parent to child.
2142
2141
2143 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
2142 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
2144 could be used to filter patches by status. For performance reason, they
2143 could be used to filter patches by status. For performance reason, they
2145 only represent a subset of non-status selections and cannot be used alone.
2144 only represent a subset of non-status selections and cannot be used alone.
2146
2145
2147 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
2146 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
2148 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
2147 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
2149 stack up to D9.
2148 stack up to D9.
2150
2149
2151 If --stack is given, follow dependencies information and read all patches.
2150 If --stack is given, follow dependencies information and read all patches.
2152 It is equivalent to the ``:`` operator.
2151 It is equivalent to the ``:`` operator.
2153 """
2152 """
2154 opts = pycompat.byteskwargs(opts)
2153 opts = pycompat.byteskwargs(opts)
2155 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2154 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2156
2155
2157 def _write(patches):
2156 def _write(patches):
2158 for drev, content in patches:
2157 for drev, content in patches:
2159 ui.write(content)
2158 ui.write(content)
2160
2159
2161 readpatch(ui, drevs, _write)
2160 readpatch(ui, drevs, _write)
2162
2161
2163
2162
2164 @vcrcommand(
2163 @vcrcommand(
2165 b'phabimport',
2164 b'phabimport',
2166 [(b'', b'stack', False, _(b'import dependencies as well'))],
2165 [(b'', b'stack', False, _(b'import dependencies as well'))],
2167 _(b'DREVSPEC... [OPTIONS]'),
2166 _(b'DREVSPEC... [OPTIONS]'),
2168 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2167 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2169 )
2168 )
2170 def phabimport(ui, repo, *specs, **opts):
2169 def phabimport(ui, repo, *specs, **opts):
2171 """import patches from Phabricator for the specified Differential Revisions
2170 """import patches from Phabricator for the specified Differential Revisions
2172
2171
2173 The patches are read and applied starting at the parent of the working
2172 The patches are read and applied starting at the parent of the working
2174 directory.
2173 directory.
2175
2174
2176 See ``hg help phabread`` for how to specify DREVSPEC.
2175 See ``hg help phabread`` for how to specify DREVSPEC.
2177 """
2176 """
2178 opts = pycompat.byteskwargs(opts)
2177 opts = pycompat.byteskwargs(opts)
2179
2178
2180 # --bypass avoids losing exec and symlink bits when importing on Windows,
2179 # --bypass avoids losing exec and symlink bits when importing on Windows,
2181 # and allows importing with a dirty wdir. It also aborts instead of leaving
2180 # and allows importing with a dirty wdir. It also aborts instead of leaving
2182 # rejects.
2181 # rejects.
2183 opts[b'bypass'] = True
2182 opts[b'bypass'] = True
2184
2183
2185 # Mandatory default values, synced with commands.import
2184 # Mandatory default values, synced with commands.import
2186 opts[b'strip'] = 1
2185 opts[b'strip'] = 1
2187 opts[b'prefix'] = b''
2186 opts[b'prefix'] = b''
2188 # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone()
2187 # Evolve 9.3.0 assumes this key is present in cmdutil.tryimportone()
2189 opts[b'obsolete'] = False
2188 opts[b'obsolete'] = False
2190
2189
2191 if ui.configbool(b'phabimport', b'secret'):
2190 if ui.configbool(b'phabimport', b'secret'):
2192 opts[b'secret'] = True
2191 opts[b'secret'] = True
2193 if ui.configbool(b'phabimport', b'obsolete'):
2192 if ui.configbool(b'phabimport', b'obsolete'):
2194 opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone()
2193 opts[b'obsolete'] = True # Handled by evolve wrapping tryimportone()
2195
2194
2196 def _write(patches):
2195 def _write(patches):
2197 parents = repo[None].parents()
2196 parents = repo[None].parents()
2198
2197
2199 with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'):
2198 with repo.wlock(), repo.lock(), repo.transaction(b'phabimport'):
2200 for drev, contents in patches:
2199 for drev, contents in patches:
2201 ui.status(_(b'applying patch from D%s\n') % drev)
2200 ui.status(_(b'applying patch from D%s\n') % drev)
2202
2201
2203 with patch.extract(ui, io.BytesIO(contents)) as patchdata:
2202 with patch.extract(ui, io.BytesIO(contents)) as patchdata:
2204 msg, node, rej = cmdutil.tryimportone(
2203 msg, node, rej = cmdutil.tryimportone(
2205 ui,
2204 ui,
2206 repo,
2205 repo,
2207 patchdata,
2206 patchdata,
2208 parents,
2207 parents,
2209 opts,
2208 opts,
2210 [],
2209 [],
2211 None, # Never update wdir to another revision
2210 None, # Never update wdir to another revision
2212 )
2211 )
2213
2212
2214 if not node:
2213 if not node:
2215 raise error.Abort(_(b'D%s: no diffs found') % drev)
2214 raise error.Abort(_(b'D%s: no diffs found') % drev)
2216
2215
2217 ui.note(msg + b'\n')
2216 ui.note(msg + b'\n')
2218 parents = [repo[node]]
2217 parents = [repo[node]]
2219
2218
2220 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2219 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2221
2220
2222 readpatch(repo.ui, drevs, _write)
2221 readpatch(repo.ui, drevs, _write)
2223
2222
2224
2223
2225 @vcrcommand(
2224 @vcrcommand(
2226 b'phabupdate',
2225 b'phabupdate',
2227 [
2226 [
2228 (b'', b'accept', False, _(b'accept revisions')),
2227 (b'', b'accept', False, _(b'accept revisions')),
2229 (b'', b'reject', False, _(b'reject revisions')),
2228 (b'', b'reject', False, _(b'reject revisions')),
2230 (b'', b'request-review', False, _(b'request review on revisions')),
2229 (b'', b'request-review', False, _(b'request review on revisions')),
2231 (b'', b'abandon', False, _(b'abandon revisions')),
2230 (b'', b'abandon', False, _(b'abandon revisions')),
2232 (b'', b'reclaim', False, _(b'reclaim revisions')),
2231 (b'', b'reclaim', False, _(b'reclaim revisions')),
2233 (b'', b'close', False, _(b'close revisions')),
2232 (b'', b'close', False, _(b'close revisions')),
2234 (b'', b'reopen', False, _(b'reopen revisions')),
2233 (b'', b'reopen', False, _(b'reopen revisions')),
2235 (b'', b'plan-changes', False, _(b'plan changes for revisions')),
2234 (b'', b'plan-changes', False, _(b'plan changes for revisions')),
2236 (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
2235 (b'', b'resign', False, _(b'resign as a reviewer from revisions')),
2237 (b'', b'commandeer', False, _(b'commandeer revisions')),
2236 (b'', b'commandeer', False, _(b'commandeer revisions')),
2238 (b'm', b'comment', b'', _(b'comment on the last revision')),
2237 (b'm', b'comment', b'', _(b'comment on the last revision')),
2239 (b'r', b'rev', b'', _(b'local revision to update'), _(b'REV')),
2238 (b'r', b'rev', b'', _(b'local revision to update'), _(b'REV')),
2240 ],
2239 ],
2241 _(b'[DREVSPEC...| -r REV...] [OPTIONS]'),
2240 _(b'[DREVSPEC...| -r REV...] [OPTIONS]'),
2242 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2241 helpcategory=command.CATEGORY_IMPORT_EXPORT,
2243 optionalrepo=True,
2242 optionalrepo=True,
2244 )
2243 )
2245 def phabupdate(ui, repo, *specs, **opts):
2244 def phabupdate(ui, repo, *specs, **opts):
2246 """update Differential Revision in batch
2245 """update Differential Revision in batch
2247
2246
2248 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
2247 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
2249 """
2248 """
2250 opts = pycompat.byteskwargs(opts)
2249 opts = pycompat.byteskwargs(opts)
2251 transactions = [
2250 transactions = [
2252 b'abandon',
2251 b'abandon',
2253 b'accept',
2252 b'accept',
2254 b'close',
2253 b'close',
2255 b'commandeer',
2254 b'commandeer',
2256 b'plan-changes',
2255 b'plan-changes',
2257 b'reclaim',
2256 b'reclaim',
2258 b'reject',
2257 b'reject',
2259 b'reopen',
2258 b'reopen',
2260 b'request-review',
2259 b'request-review',
2261 b'resign',
2260 b'resign',
2262 ]
2261 ]
2263 flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
2262 flags = [n for n in transactions if opts.get(n.replace(b'-', b'_'))]
2264 if len(flags) > 1:
2263 if len(flags) > 1:
2265 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
2264 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
2266
2265
2267 actions = []
2266 actions = []
2268 for f in flags:
2267 for f in flags:
2269 actions.append({b'type': f, b'value': True})
2268 actions.append({b'type': f, b'value': True})
2270
2269
2271 revs = opts.get(b'rev')
2270 revs = opts.get(b'rev')
2272 if revs:
2271 if revs:
2273 if not repo:
2272 if not repo:
2274 raise error.InputError(_(b'--rev requires a repository'))
2273 raise error.InputError(_(b'--rev requires a repository'))
2275
2274
2276 if specs:
2275 if specs:
2277 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2276 raise error.InputError(_(b'cannot specify both DREVSPEC and --rev'))
2278
2277
2279 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2278 drevmap = getdrevmap(repo, logcmdutil.revrange(repo, [revs]))
2280 specs = []
2279 specs = []
2281 unknown = []
2280 unknown = []
2282 for r, d in drevmap.items():
2281 for r, d in drevmap.items():
2283 if d is None:
2282 if d is None:
2284 unknown.append(repo[r])
2283 unknown.append(repo[r])
2285 else:
2284 else:
2286 specs.append(b'D%d' % d)
2285 specs.append(b'D%d' % d)
2287 if unknown:
2286 if unknown:
2288 raise error.InputError(
2287 raise error.InputError(
2289 _(b'selected revisions without a Differential: %s')
2288 _(b'selected revisions without a Differential: %s')
2290 % scmutil.nodesummaries(repo, unknown)
2289 % scmutil.nodesummaries(repo, unknown)
2291 )
2290 )
2292
2291
2293 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2292 drevs = _getdrevs(ui, opts.get(b'stack'), specs)
2294 for i, drev in enumerate(drevs):
2293 for i, drev in enumerate(drevs):
2295 if i + 1 == len(drevs) and opts.get(b'comment'):
2294 if i + 1 == len(drevs) and opts.get(b'comment'):
2296 actions.append({b'type': b'comment', b'value': opts[b'comment']})
2295 actions.append({b'type': b'comment', b'value': opts[b'comment']})
2297 if actions:
2296 if actions:
2298 params = {
2297 params = {
2299 b'objectIdentifier': drev[b'phid'],
2298 b'objectIdentifier': drev[b'phid'],
2300 b'transactions': actions,
2299 b'transactions': actions,
2301 }
2300 }
2302 callconduit(ui, b'differential.revision.edit', params)
2301 callconduit(ui, b'differential.revision.edit', params)
2303
2302
2304
2303
2305 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
2304 @eh.templatekeyword(b'phabreview', requires={b'ctx'})
2306 def template_review(context, mapping):
2305 def template_review(context, mapping):
2307 """:phabreview: Object describing the review for this changeset.
2306 """:phabreview: Object describing the review for this changeset.
2308 Has attributes `url` and `id`.
2307 Has attributes `url` and `id`.
2309 """
2308 """
2310 ctx = context.resource(mapping, b'ctx')
2309 ctx = context.resource(mapping, b'ctx')
2311 m = _differentialrevisiondescre.search(ctx.description())
2310 m = _differentialrevisiondescre.search(ctx.description())
2312 if m:
2311 if m:
2313 return templateutil.hybriddict(
2312 return templateutil.hybriddict(
2314 {
2313 {
2315 b'url': m.group('url'),
2314 b'url': m.group('url'),
2316 b'id': b"D%s" % m.group('id'),
2315 b'id': b"D%s" % m.group('id'),
2317 }
2316 }
2318 )
2317 )
2319 else:
2318 else:
2320 tags = ctx.repo().nodetags(ctx.node())
2319 tags = ctx.repo().nodetags(ctx.node())
2321 for t in tags:
2320 for t in tags:
2322 if _differentialrevisiontagre.match(t):
2321 if _differentialrevisiontagre.match(t):
2323 url = ctx.repo().ui.config(b'phabricator', b'url')
2322 url = ctx.repo().ui.config(b'phabricator', b'url')
2324 if not url.endswith(b'/'):
2323 if not url.endswith(b'/'):
2325 url += b'/'
2324 url += b'/'
2326 url += t
2325 url += t
2327
2326
2328 return templateutil.hybriddict(
2327 return templateutil.hybriddict(
2329 {
2328 {
2330 b'url': url,
2329 b'url': url,
2331 b'id': t,
2330 b'id': t,
2332 }
2331 }
2333 )
2332 )
2334 return None
2333 return None
2335
2334
2336
2335
2337 @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
2336 @eh.templatekeyword(b'phabstatus', requires={b'ctx', b'repo', b'ui'})
2338 def template_status(context, mapping):
2337 def template_status(context, mapping):
2339 """:phabstatus: String. Status of Phabricator differential."""
2338 """:phabstatus: String. Status of Phabricator differential."""
2340 ctx = context.resource(mapping, b'ctx')
2339 ctx = context.resource(mapping, b'ctx')
2341 repo = context.resource(mapping, b'repo')
2340 repo = context.resource(mapping, b'repo')
2342 ui = context.resource(mapping, b'ui')
2341 ui = context.resource(mapping, b'ui')
2343
2342
2344 rev = ctx.rev()
2343 rev = ctx.rev()
2345 try:
2344 try:
2346 drevid = getdrevmap(repo, [rev])[rev]
2345 drevid = getdrevmap(repo, [rev])[rev]
2347 except KeyError:
2346 except KeyError:
2348 return None
2347 return None
2349 drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
2348 drevs = callconduit(ui, b'differential.query', {b'ids': [drevid]})
2350 for drev in drevs:
2349 for drev in drevs:
2351 if int(drev[b'id']) == drevid:
2350 if int(drev[b'id']) == drevid:
2352 return templateutil.hybriddict(
2351 return templateutil.hybriddict(
2353 {
2352 {
2354 b'url': drev[b'uri'],
2353 b'url': drev[b'uri'],
2355 b'status': drev[b'statusName'],
2354 b'status': drev[b'statusName'],
2356 }
2355 }
2357 )
2356 )
2358 return None
2357 return None
2359
2358
2360
2359
2361 @show.showview(b'phabstatus', csettopic=b'work')
2360 @show.showview(b'phabstatus', csettopic=b'work')
2362 def phabstatusshowview(ui, repo, displayer):
2361 def phabstatusshowview(ui, repo, displayer):
2363 """Phabricator differiential status"""
2362 """Phabricator differiential status"""
2364 revs = repo.revs('sort(_underway(), topo)')
2363 revs = repo.revs('sort(_underway(), topo)')
2365 drevmap = getdrevmap(repo, revs)
2364 drevmap = getdrevmap(repo, revs)
2366 unknownrevs, drevids, revsbydrevid = [], set(), {}
2365 unknownrevs, drevids, revsbydrevid = [], set(), {}
2367 for rev, drevid in drevmap.items():
2366 for rev, drevid in drevmap.items():
2368 if drevid is not None:
2367 if drevid is not None:
2369 drevids.add(drevid)
2368 drevids.add(drevid)
2370 revsbydrevid.setdefault(drevid, set()).add(rev)
2369 revsbydrevid.setdefault(drevid, set()).add(rev)
2371 else:
2370 else:
2372 unknownrevs.append(rev)
2371 unknownrevs.append(rev)
2373
2372
2374 drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
2373 drevs = callconduit(ui, b'differential.query', {b'ids': list(drevids)})
2375 drevsbyrev = {}
2374 drevsbyrev = {}
2376 for drev in drevs:
2375 for drev in drevs:
2377 for rev in revsbydrevid[int(drev[b'id'])]:
2376 for rev in revsbydrevid[int(drev[b'id'])]:
2378 drevsbyrev[rev] = drev
2377 drevsbyrev[rev] = drev
2379
2378
2380 def phabstatus(ctx):
2379 def phabstatus(ctx):
2381 drev = drevsbyrev[ctx.rev()]
2380 drev = drevsbyrev[ctx.rev()]
2382 status = ui.label(
2381 status = ui.label(
2383 b'%(statusName)s' % drev,
2382 b'%(statusName)s' % drev,
2384 b'phabricator.status.%s' % _getstatusname(drev),
2383 b'phabricator.status.%s' % _getstatusname(drev),
2385 )
2384 )
2386 ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
2385 ui.write(b"\n%s %s\n" % (drev[b'uri'], status))
2387
2386
2388 revs -= smartset.baseset(unknownrevs)
2387 revs -= smartset.baseset(unknownrevs)
2389 revdag = graphmod.dagwalker(repo, revs)
2388 revdag = graphmod.dagwalker(repo, revs)
2390
2389
2391 ui.setconfig(b'experimental', b'graphshorten', True)
2390 ui.setconfig(b'experimental', b'graphshorten', True)
2392 displayer._exthook = phabstatus
2391 displayer._exthook = phabstatus
2393 nodelen = show.longestshortest(repo, revs)
2392 nodelen = show.longestshortest(repo, revs)
2394 logcmdutil.displaygraph(
2393 logcmdutil.displaygraph(
2395 ui,
2394 ui,
2396 repo,
2395 repo,
2397 revdag,
2396 revdag,
2398 displayer,
2397 displayer,
2399 graphmod.asciiedges,
2398 graphmod.asciiedges,
2400 props={b'nodelen': nodelen},
2399 props={b'nodelen': nodelen},
2401 )
2400 )
@@ -1,549 +1,548 b''
1 import collections
1 import collections
2 import errno
2 import errno
3 import mmap
3 import mmap
4 import os
4 import os
5 import struct
5 import struct
6 import time
6 import time
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.pycompat import (
9 from mercurial.pycompat import (
10 getattr,
11 open,
10 open,
12 )
11 )
13 from mercurial.node import hex
12 from mercurial.node import hex
14 from mercurial import (
13 from mercurial import (
15 policy,
14 policy,
16 util,
15 util,
17 vfs as vfsmod,
16 vfs as vfsmod,
18 )
17 )
19 from mercurial.utils import hashutil
18 from mercurial.utils import hashutil
20 from . import shallowutil
19 from . import shallowutil
21
20
22 osutil = policy.importmod('osutil')
21 osutil = policy.importmod('osutil')
23
22
24 # The pack version supported by this implementation. This will need to be
23 # The pack version supported by this implementation. This will need to be
25 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
24 # rev'd whenever the byte format changes. Ex: changing the fanout prefix,
26 # changing any of the int sizes, changing the delta algorithm, etc.
25 # changing any of the int sizes, changing the delta algorithm, etc.
27 PACKVERSIONSIZE = 1
26 PACKVERSIONSIZE = 1
28 INDEXVERSIONSIZE = 2
27 INDEXVERSIONSIZE = 2
29
28
30 FANOUTSTART = INDEXVERSIONSIZE
29 FANOUTSTART = INDEXVERSIONSIZE
31
30
32 # Constant that indicates a fanout table entry hasn't been filled in. (This does
31 # Constant that indicates a fanout table entry hasn't been filled in. (This does
33 # not get serialized)
32 # not get serialized)
34 EMPTYFANOUT = -1
33 EMPTYFANOUT = -1
35
34
36 # The fanout prefix is the number of bytes that can be addressed by the fanout
35 # The fanout prefix is the number of bytes that can be addressed by the fanout
37 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
36 # table. Example: a fanout prefix of 1 means we use the first byte of a hash to
38 # look in the fanout table (which will be 2^8 entries long).
37 # look in the fanout table (which will be 2^8 entries long).
39 SMALLFANOUTPREFIX = 1
38 SMALLFANOUTPREFIX = 1
40 LARGEFANOUTPREFIX = 2
39 LARGEFANOUTPREFIX = 2
41
40
42 # The number of entries in the index at which point we switch to a large fanout.
41 # The number of entries in the index at which point we switch to a large fanout.
43 # It is chosen to balance the linear scan through a sparse fanout, with the
42 # It is chosen to balance the linear scan through a sparse fanout, with the
44 # size of the bisect in actual index.
43 # size of the bisect in actual index.
45 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
44 # 2^16 / 8 was chosen because it trades off (1 step fanout scan + 5 step
46 # bisect) with (8 step fanout scan + 1 step bisect)
45 # bisect) with (8 step fanout scan + 1 step bisect)
47 # 5 step bisect = log(2^16 / 8 / 255) # fanout
46 # 5 step bisect = log(2^16 / 8 / 255) # fanout
48 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
47 # 10 step fanout scan = 2^16 / (2^16 / 8) # fanout space divided by entries
49 SMALLFANOUTCUTOFF = 2 ** 16 // 8
48 SMALLFANOUTCUTOFF = 2 ** 16 // 8
50
49
51 # The amount of time to wait between checking for new packs. This prevents an
50 # The amount of time to wait between checking for new packs. This prevents an
52 # exception when data is moved to a new pack after the process has already
51 # exception when data is moved to a new pack after the process has already
53 # loaded the pack list.
52 # loaded the pack list.
54 REFRESHRATE = 0.1
53 REFRESHRATE = 0.1
55
54
56
55
57 class _cachebackedpacks:
56 class _cachebackedpacks:
58 def __init__(self, packs, cachesize):
57 def __init__(self, packs, cachesize):
59 self._packs = set(packs)
58 self._packs = set(packs)
60 self._lrucache = util.lrucachedict(cachesize)
59 self._lrucache = util.lrucachedict(cachesize)
61 self._lastpack = None
60 self._lastpack = None
62
61
63 # Avoid cold start of the cache by populating the most recent packs
62 # Avoid cold start of the cache by populating the most recent packs
64 # in the cache.
63 # in the cache.
65 for i in reversed(range(min(cachesize, len(packs)))):
64 for i in reversed(range(min(cachesize, len(packs)))):
66 self._movetofront(packs[i])
65 self._movetofront(packs[i])
67
66
68 def _movetofront(self, pack):
67 def _movetofront(self, pack):
69 # This effectively makes pack the first entry in the cache.
68 # This effectively makes pack the first entry in the cache.
70 self._lrucache[pack] = True
69 self._lrucache[pack] = True
71
70
72 def _registerlastpackusage(self):
71 def _registerlastpackusage(self):
73 if self._lastpack is not None:
72 if self._lastpack is not None:
74 self._movetofront(self._lastpack)
73 self._movetofront(self._lastpack)
75 self._lastpack = None
74 self._lastpack = None
76
75
77 def add(self, pack):
76 def add(self, pack):
78 self._registerlastpackusage()
77 self._registerlastpackusage()
79
78
80 # This method will mostly be called when packs are not in cache.
79 # This method will mostly be called when packs are not in cache.
81 # Therefore, adding pack to the cache.
80 # Therefore, adding pack to the cache.
82 self._movetofront(pack)
81 self._movetofront(pack)
83 self._packs.add(pack)
82 self._packs.add(pack)
84
83
85 def __iter__(self):
84 def __iter__(self):
86 self._registerlastpackusage()
85 self._registerlastpackusage()
87
86
88 # Cache iteration is based on LRU.
87 # Cache iteration is based on LRU.
89 for pack in self._lrucache:
88 for pack in self._lrucache:
90 self._lastpack = pack
89 self._lastpack = pack
91 yield pack
90 yield pack
92
91
93 cachedpacks = {pack for pack in self._lrucache}
92 cachedpacks = {pack for pack in self._lrucache}
94 # Yield for paths not in the cache.
93 # Yield for paths not in the cache.
95 for pack in self._packs - cachedpacks:
94 for pack in self._packs - cachedpacks:
96 self._lastpack = pack
95 self._lastpack = pack
97 yield pack
96 yield pack
98
97
99 # Data not found in any pack.
98 # Data not found in any pack.
100 self._lastpack = None
99 self._lastpack = None
101
100
102
101
103 class basepackstore:
102 class basepackstore:
104 # Default cache size limit for the pack files.
103 # Default cache size limit for the pack files.
105 DEFAULTCACHESIZE = 100
104 DEFAULTCACHESIZE = 100
106
105
107 def __init__(self, ui, path):
106 def __init__(self, ui, path):
108 self.ui = ui
107 self.ui = ui
109 self.path = path
108 self.path = path
110
109
111 # lastrefesh is 0 so we'll immediately check for new packs on the first
110 # lastrefesh is 0 so we'll immediately check for new packs on the first
112 # failure.
111 # failure.
113 self.lastrefresh = 0
112 self.lastrefresh = 0
114
113
115 packs = []
114 packs = []
116 for filepath, __, __ in self._getavailablepackfilessorted():
115 for filepath, __, __ in self._getavailablepackfilessorted():
117 try:
116 try:
118 pack = self.getpack(filepath)
117 pack = self.getpack(filepath)
119 except Exception as ex:
118 except Exception as ex:
120 # An exception may be thrown if the pack file is corrupted
119 # An exception may be thrown if the pack file is corrupted
121 # somehow. Log a warning but keep going in this case, just
120 # somehow. Log a warning but keep going in this case, just
122 # skipping this pack file.
121 # skipping this pack file.
123 #
122 #
124 # If this is an ENOENT error then don't even bother logging.
123 # If this is an ENOENT error then don't even bother logging.
125 # Someone could have removed the file since we retrieved the
124 # Someone could have removed the file since we retrieved the
126 # list of paths.
125 # list of paths.
127 if getattr(ex, 'errno', None) != errno.ENOENT:
126 if getattr(ex, 'errno', None) != errno.ENOENT:
128 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
127 ui.warn(_(b'unable to load pack %s: %s\n') % (filepath, ex))
129 continue
128 continue
130 packs.append(pack)
129 packs.append(pack)
131
130
132 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
131 self.packs = _cachebackedpacks(packs, self.DEFAULTCACHESIZE)
133
132
134 def _getavailablepackfiles(self):
133 def _getavailablepackfiles(self):
135 """For each pack file (a index/data file combo), yields:
134 """For each pack file (a index/data file combo), yields:
136 (full path without extension, mtime, size)
135 (full path without extension, mtime, size)
137
136
138 mtime will be the mtime of the index/data file (whichever is newer)
137 mtime will be the mtime of the index/data file (whichever is newer)
139 size is the combined size of index/data file
138 size is the combined size of index/data file
140 """
139 """
141 indexsuffixlen = len(self.INDEXSUFFIX)
140 indexsuffixlen = len(self.INDEXSUFFIX)
142 packsuffixlen = len(self.PACKSUFFIX)
141 packsuffixlen = len(self.PACKSUFFIX)
143
142
144 ids = set()
143 ids = set()
145 sizes = collections.defaultdict(lambda: 0)
144 sizes = collections.defaultdict(lambda: 0)
146 mtimes = collections.defaultdict(lambda: [])
145 mtimes = collections.defaultdict(lambda: [])
147 try:
146 try:
148 for filename, type, stat in osutil.listdir(self.path, stat=True):
147 for filename, type, stat in osutil.listdir(self.path, stat=True):
149 id = None
148 id = None
150 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
149 if filename[-indexsuffixlen:] == self.INDEXSUFFIX:
151 id = filename[:-indexsuffixlen]
150 id = filename[:-indexsuffixlen]
152 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
151 elif filename[-packsuffixlen:] == self.PACKSUFFIX:
153 id = filename[:-packsuffixlen]
152 id = filename[:-packsuffixlen]
154
153
155 # Since we expect to have two files corresponding to each ID
154 # Since we expect to have two files corresponding to each ID
156 # (the index file and the pack file), we can yield once we see
155 # (the index file and the pack file), we can yield once we see
157 # it twice.
156 # it twice.
158 if id:
157 if id:
159 sizes[id] += stat.st_size # Sum both files' sizes together
158 sizes[id] += stat.st_size # Sum both files' sizes together
160 mtimes[id].append(stat.st_mtime)
159 mtimes[id].append(stat.st_mtime)
161 if id in ids:
160 if id in ids:
162 yield (
161 yield (
163 os.path.join(self.path, id),
162 os.path.join(self.path, id),
164 max(mtimes[id]),
163 max(mtimes[id]),
165 sizes[id],
164 sizes[id],
166 )
165 )
167 else:
166 else:
168 ids.add(id)
167 ids.add(id)
169 except FileNotFoundError:
168 except FileNotFoundError:
170 pass
169 pass
171
170
172 def _getavailablepackfilessorted(self):
171 def _getavailablepackfilessorted(self):
173 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
172 """Like `_getavailablepackfiles`, but also sorts the files by mtime,
174 yielding newest files first.
173 yielding newest files first.
175
174
176 This is desirable, since it is more likely newer packfiles have more
175 This is desirable, since it is more likely newer packfiles have more
177 desirable data.
176 desirable data.
178 """
177 """
179 files = []
178 files = []
180 for path, mtime, size in self._getavailablepackfiles():
179 for path, mtime, size in self._getavailablepackfiles():
181 files.append((mtime, size, path))
180 files.append((mtime, size, path))
182 files = sorted(files, reverse=True)
181 files = sorted(files, reverse=True)
183 for mtime, size, path in files:
182 for mtime, size, path in files:
184 yield path, mtime, size
183 yield path, mtime, size
185
184
186 def gettotalsizeandcount(self):
185 def gettotalsizeandcount(self):
187 """Returns the total disk size (in bytes) of all the pack files in
186 """Returns the total disk size (in bytes) of all the pack files in
188 this store, and the count of pack files.
187 this store, and the count of pack files.
189
188
190 (This might be smaller than the total size of the ``self.path``
189 (This might be smaller than the total size of the ``self.path``
191 directory, since this only considers fuly-writen pack files, and not
190 directory, since this only considers fuly-writen pack files, and not
192 temporary files or other detritus on the directory.)
191 temporary files or other detritus on the directory.)
193 """
192 """
194 totalsize = 0
193 totalsize = 0
195 count = 0
194 count = 0
196 for __, __, size in self._getavailablepackfiles():
195 for __, __, size in self._getavailablepackfiles():
197 totalsize += size
196 totalsize += size
198 count += 1
197 count += 1
199 return totalsize, count
198 return totalsize, count
200
199
201 def getmetrics(self):
200 def getmetrics(self):
202 """Returns metrics on the state of this store."""
201 """Returns metrics on the state of this store."""
203 size, count = self.gettotalsizeandcount()
202 size, count = self.gettotalsizeandcount()
204 return {
203 return {
205 b'numpacks': count,
204 b'numpacks': count,
206 b'totalpacksize': size,
205 b'totalpacksize': size,
207 }
206 }
208
207
209 def getpack(self, path):
208 def getpack(self, path):
210 raise NotImplementedError()
209 raise NotImplementedError()
211
210
212 def getmissing(self, keys):
211 def getmissing(self, keys):
213 missing = keys
212 missing = keys
214 for pack in self.packs:
213 for pack in self.packs:
215 missing = pack.getmissing(missing)
214 missing = pack.getmissing(missing)
216
215
217 # Ensures better performance of the cache by keeping the most
216 # Ensures better performance of the cache by keeping the most
218 # recently accessed pack at the beginning in subsequent iterations.
217 # recently accessed pack at the beginning in subsequent iterations.
219 if not missing:
218 if not missing:
220 return missing
219 return missing
221
220
222 if missing:
221 if missing:
223 for pack in self.refresh():
222 for pack in self.refresh():
224 missing = pack.getmissing(missing)
223 missing = pack.getmissing(missing)
225
224
226 return missing
225 return missing
227
226
228 def markledger(self, ledger, options=None):
227 def markledger(self, ledger, options=None):
229 for pack in self.packs:
228 for pack in self.packs:
230 pack.markledger(ledger)
229 pack.markledger(ledger)
231
230
232 def markforrefresh(self):
231 def markforrefresh(self):
233 """Tells the store that there may be new pack files, so the next time it
232 """Tells the store that there may be new pack files, so the next time it
234 has a lookup miss it should check for new files."""
233 has a lookup miss it should check for new files."""
235 self.lastrefresh = 0
234 self.lastrefresh = 0
236
235
237 def refresh(self):
236 def refresh(self):
238 """Checks for any new packs on disk, adds them to the main pack list,
237 """Checks for any new packs on disk, adds them to the main pack list,
239 and returns a list of just the new packs."""
238 and returns a list of just the new packs."""
240 now = time.time()
239 now = time.time()
241
240
242 # If we experience a lot of misses (like in the case of getmissing() on
241 # If we experience a lot of misses (like in the case of getmissing() on
243 # new objects), let's only actually check disk for new stuff every once
242 # new objects), let's only actually check disk for new stuff every once
244 # in a while. Generally this code path should only ever matter when a
243 # in a while. Generally this code path should only ever matter when a
245 # repack is going on in the background, and that should be pretty rare
244 # repack is going on in the background, and that should be pretty rare
246 # to have that happen twice in quick succession.
245 # to have that happen twice in quick succession.
247 newpacks = []
246 newpacks = []
248 if now > self.lastrefresh + REFRESHRATE:
247 if now > self.lastrefresh + REFRESHRATE:
249 self.lastrefresh = now
248 self.lastrefresh = now
250 previous = {p.path for p in self.packs}
249 previous = {p.path for p in self.packs}
251 for filepath, __, __ in self._getavailablepackfilessorted():
250 for filepath, __, __ in self._getavailablepackfilessorted():
252 if filepath not in previous:
251 if filepath not in previous:
253 newpack = self.getpack(filepath)
252 newpack = self.getpack(filepath)
254 newpacks.append(newpack)
253 newpacks.append(newpack)
255 self.packs.add(newpack)
254 self.packs.add(newpack)
256
255
257 return newpacks
256 return newpacks
258
257
259
258
260 class versionmixin:
259 class versionmixin:
261 # Mix-in for classes with multiple supported versions
260 # Mix-in for classes with multiple supported versions
262 VERSION = None
261 VERSION = None
263 SUPPORTED_VERSIONS = [2]
262 SUPPORTED_VERSIONS = [2]
264
263
265 def _checkversion(self, version):
264 def _checkversion(self, version):
266 if version in self.SUPPORTED_VERSIONS:
265 if version in self.SUPPORTED_VERSIONS:
267 if self.VERSION is None:
266 if self.VERSION is None:
268 # only affect this instance
267 # only affect this instance
269 self.VERSION = version
268 self.VERSION = version
270 elif self.VERSION != version:
269 elif self.VERSION != version:
271 raise RuntimeError(b'inconsistent version: %d' % version)
270 raise RuntimeError(b'inconsistent version: %d' % version)
272 else:
271 else:
273 raise RuntimeError(b'unsupported version: %d' % version)
272 raise RuntimeError(b'unsupported version: %d' % version)
274
273
275
274
276 class basepack(versionmixin):
275 class basepack(versionmixin):
277 # The maximum amount we should read via mmap before remmaping so the old
276 # The maximum amount we should read via mmap before remmaping so the old
278 # pages can be released (100MB)
277 # pages can be released (100MB)
279 MAXPAGEDIN = 100 * 1024 ** 2
278 MAXPAGEDIN = 100 * 1024 ** 2
280
279
281 SUPPORTED_VERSIONS = [2]
280 SUPPORTED_VERSIONS = [2]
282
281
283 def __init__(self, path):
282 def __init__(self, path):
284 self.path = path
283 self.path = path
285 self.packpath = path + self.PACKSUFFIX
284 self.packpath = path + self.PACKSUFFIX
286 self.indexpath = path + self.INDEXSUFFIX
285 self.indexpath = path + self.INDEXSUFFIX
287
286
288 self.indexsize = os.stat(self.indexpath).st_size
287 self.indexsize = os.stat(self.indexpath).st_size
289 self.datasize = os.stat(self.packpath).st_size
288 self.datasize = os.stat(self.packpath).st_size
290
289
291 self._index = None
290 self._index = None
292 self._data = None
291 self._data = None
293 self.freememory() # initialize the mmap
292 self.freememory() # initialize the mmap
294
293
295 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
294 version = struct.unpack(b'!B', self._data[:PACKVERSIONSIZE])[0]
296 self._checkversion(version)
295 self._checkversion(version)
297
296
298 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
297 version, config = struct.unpack(b'!BB', self._index[:INDEXVERSIONSIZE])
299 self._checkversion(version)
298 self._checkversion(version)
300
299
301 if 0b10000000 & config:
300 if 0b10000000 & config:
302 self.params = indexparams(LARGEFANOUTPREFIX, version)
301 self.params = indexparams(LARGEFANOUTPREFIX, version)
303 else:
302 else:
304 self.params = indexparams(SMALLFANOUTPREFIX, version)
303 self.params = indexparams(SMALLFANOUTPREFIX, version)
305
304
306 @util.propertycache
305 @util.propertycache
307 def _fanouttable(self):
306 def _fanouttable(self):
308 params = self.params
307 params = self.params
309 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
308 rawfanout = self._index[FANOUTSTART : FANOUTSTART + params.fanoutsize]
310 fanouttable = []
309 fanouttable = []
311 for i in range(0, params.fanoutcount):
310 for i in range(0, params.fanoutcount):
312 loc = i * 4
311 loc = i * 4
313 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
312 fanoutentry = struct.unpack(b'!I', rawfanout[loc : loc + 4])[0]
314 fanouttable.append(fanoutentry)
313 fanouttable.append(fanoutentry)
315 return fanouttable
314 return fanouttable
316
315
317 @util.propertycache
316 @util.propertycache
318 def _indexend(self):
317 def _indexend(self):
319 nodecount = struct.unpack_from(
318 nodecount = struct.unpack_from(
320 b'!Q', self._index, self.params.indexstart - 8
319 b'!Q', self._index, self.params.indexstart - 8
321 )[0]
320 )[0]
322 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
321 return self.params.indexstart + nodecount * self.INDEXENTRYLENGTH
323
322
324 def freememory(self):
323 def freememory(self):
325 """Unmap and remap the memory to free it up after known expensive
324 """Unmap and remap the memory to free it up after known expensive
326 operations. Return True if self._data and self._index were reloaded.
325 operations. Return True if self._data and self._index were reloaded.
327 """
326 """
328 if self._index:
327 if self._index:
329 if self._pagedin < self.MAXPAGEDIN:
328 if self._pagedin < self.MAXPAGEDIN:
330 return False
329 return False
331
330
332 self._index.close()
331 self._index.close()
333 self._data.close()
332 self._data.close()
334
333
335 # TODO: use an opener/vfs to access these paths
334 # TODO: use an opener/vfs to access these paths
336 with open(self.indexpath, b'rb') as indexfp:
335 with open(self.indexpath, b'rb') as indexfp:
337 # memory-map the file, size 0 means whole file
336 # memory-map the file, size 0 means whole file
338 self._index = mmap.mmap(
337 self._index = mmap.mmap(
339 indexfp.fileno(), 0, access=mmap.ACCESS_READ
338 indexfp.fileno(), 0, access=mmap.ACCESS_READ
340 )
339 )
341 with open(self.packpath, b'rb') as datafp:
340 with open(self.packpath, b'rb') as datafp:
342 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
341 self._data = mmap.mmap(datafp.fileno(), 0, access=mmap.ACCESS_READ)
343
342
344 self._pagedin = 0
343 self._pagedin = 0
345 return True
344 return True
346
345
347 def getmissing(self, keys):
346 def getmissing(self, keys):
348 raise NotImplementedError()
347 raise NotImplementedError()
349
348
350 def markledger(self, ledger, options=None):
349 def markledger(self, ledger, options=None):
351 raise NotImplementedError()
350 raise NotImplementedError()
352
351
353 def cleanup(self, ledger):
352 def cleanup(self, ledger):
354 raise NotImplementedError()
353 raise NotImplementedError()
355
354
356 def __iter__(self):
355 def __iter__(self):
357 raise NotImplementedError()
356 raise NotImplementedError()
358
357
359 def iterentries(self):
358 def iterentries(self):
360 raise NotImplementedError()
359 raise NotImplementedError()
361
360
362
361
363 class mutablebasepack(versionmixin):
362 class mutablebasepack(versionmixin):
364 def __init__(self, ui, packdir, version=2):
363 def __init__(self, ui, packdir, version=2):
365 self._checkversion(version)
364 self._checkversion(version)
366 # TODO(augie): make this configurable
365 # TODO(augie): make this configurable
367 self._compressor = b'GZ'
366 self._compressor = b'GZ'
368 opener = vfsmod.vfs(packdir)
367 opener = vfsmod.vfs(packdir)
369 opener.createmode = 0o444
368 opener.createmode = 0o444
370 self.opener = opener
369 self.opener = opener
371
370
372 self.entries = {}
371 self.entries = {}
373
372
374 shallowutil.mkstickygroupdir(ui, packdir)
373 shallowutil.mkstickygroupdir(ui, packdir)
375 self.packfp, self.packpath = opener.mkstemp(
374 self.packfp, self.packpath = opener.mkstemp(
376 suffix=self.PACKSUFFIX + b'-tmp'
375 suffix=self.PACKSUFFIX + b'-tmp'
377 )
376 )
378 self.idxfp, self.idxpath = opener.mkstemp(
377 self.idxfp, self.idxpath = opener.mkstemp(
379 suffix=self.INDEXSUFFIX + b'-tmp'
378 suffix=self.INDEXSUFFIX + b'-tmp'
380 )
379 )
381 self.packfp = os.fdopen(self.packfp, 'wb+')
380 self.packfp = os.fdopen(self.packfp, 'wb+')
382 self.idxfp = os.fdopen(self.idxfp, 'wb+')
381 self.idxfp = os.fdopen(self.idxfp, 'wb+')
383 self.sha = hashutil.sha1()
382 self.sha = hashutil.sha1()
384 self._closed = False
383 self._closed = False
385
384
386 # The opener provides no way of doing permission fixup on files created
385 # The opener provides no way of doing permission fixup on files created
387 # via mkstemp, so we must fix it ourselves. We can probably fix this
386 # via mkstemp, so we must fix it ourselves. We can probably fix this
388 # upstream in vfs.mkstemp so we don't need to use the private method.
387 # upstream in vfs.mkstemp so we don't need to use the private method.
389 opener._fixfilemode(opener.join(self.packpath))
388 opener._fixfilemode(opener.join(self.packpath))
390 opener._fixfilemode(opener.join(self.idxpath))
389 opener._fixfilemode(opener.join(self.idxpath))
391
390
392 # Write header
391 # Write header
393 # TODO: make it extensible (ex: allow specifying compression algorithm,
392 # TODO: make it extensible (ex: allow specifying compression algorithm,
394 # a flexible key/value header, delta algorithm, fanout size, etc)
393 # a flexible key/value header, delta algorithm, fanout size, etc)
395 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
394 versionbuf = struct.pack(b'!B', self.VERSION) # unsigned 1 byte int
396 self.writeraw(versionbuf)
395 self.writeraw(versionbuf)
397
396
398 def __enter__(self):
397 def __enter__(self):
399 return self
398 return self
400
399
401 def __exit__(self, exc_type, exc_value, traceback):
400 def __exit__(self, exc_type, exc_value, traceback):
402 if exc_type is None:
401 if exc_type is None:
403 self.close()
402 self.close()
404 else:
403 else:
405 self.abort()
404 self.abort()
406
405
407 def abort(self):
406 def abort(self):
408 # Unclean exit
407 # Unclean exit
409 self._cleantemppacks()
408 self._cleantemppacks()
410
409
411 def writeraw(self, data):
410 def writeraw(self, data):
412 self.packfp.write(data)
411 self.packfp.write(data)
413 self.sha.update(data)
412 self.sha.update(data)
414
413
415 def close(self, ledger=None):
414 def close(self, ledger=None):
416 if self._closed:
415 if self._closed:
417 return
416 return
418
417
419 try:
418 try:
420 sha = hex(self.sha.digest())
419 sha = hex(self.sha.digest())
421 self.packfp.close()
420 self.packfp.close()
422 self.writeindex()
421 self.writeindex()
423
422
424 if len(self.entries) == 0:
423 if len(self.entries) == 0:
425 # Empty pack
424 # Empty pack
426 self._cleantemppacks()
425 self._cleantemppacks()
427 self._closed = True
426 self._closed = True
428 return None
427 return None
429
428
430 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
429 self.opener.rename(self.packpath, sha + self.PACKSUFFIX)
431 try:
430 try:
432 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
431 self.opener.rename(self.idxpath, sha + self.INDEXSUFFIX)
433 except Exception as ex:
432 except Exception as ex:
434 try:
433 try:
435 self.opener.unlink(sha + self.PACKSUFFIX)
434 self.opener.unlink(sha + self.PACKSUFFIX)
436 except Exception:
435 except Exception:
437 pass
436 pass
438 # Throw exception 'ex' explicitly since a normal 'raise' would
437 # Throw exception 'ex' explicitly since a normal 'raise' would
439 # potentially throw an exception from the unlink cleanup.
438 # potentially throw an exception from the unlink cleanup.
440 raise ex
439 raise ex
441 except Exception:
440 except Exception:
442 # Clean up temp packs in all exception cases
441 # Clean up temp packs in all exception cases
443 self._cleantemppacks()
442 self._cleantemppacks()
444 raise
443 raise
445
444
446 self._closed = True
445 self._closed = True
447 result = self.opener.join(sha)
446 result = self.opener.join(sha)
448 if ledger:
447 if ledger:
449 ledger.addcreated(result)
448 ledger.addcreated(result)
450 return result
449 return result
451
450
452 def _cleantemppacks(self):
451 def _cleantemppacks(self):
453 try:
452 try:
454 self.opener.unlink(self.packpath)
453 self.opener.unlink(self.packpath)
455 except Exception:
454 except Exception:
456 pass
455 pass
457 try:
456 try:
458 self.opener.unlink(self.idxpath)
457 self.opener.unlink(self.idxpath)
459 except Exception:
458 except Exception:
460 pass
459 pass
461
460
462 def writeindex(self):
461 def writeindex(self):
463 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
462 largefanout = len(self.entries) > SMALLFANOUTCUTOFF
464 if largefanout:
463 if largefanout:
465 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
464 params = indexparams(LARGEFANOUTPREFIX, self.VERSION)
466 else:
465 else:
467 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
466 params = indexparams(SMALLFANOUTPREFIX, self.VERSION)
468
467
469 fanouttable = [EMPTYFANOUT] * params.fanoutcount
468 fanouttable = [EMPTYFANOUT] * params.fanoutcount
470
469
471 # Precompute the location of each entry
470 # Precompute the location of each entry
472 locations = {}
471 locations = {}
473 count = 0
472 count = 0
474 for node in sorted(self.entries):
473 for node in sorted(self.entries):
475 location = count * self.INDEXENTRYLENGTH
474 location = count * self.INDEXENTRYLENGTH
476 locations[node] = location
475 locations[node] = location
477 count += 1
476 count += 1
478
477
479 # Must use [0] on the unpack result since it's always a tuple.
478 # Must use [0] on the unpack result since it's always a tuple.
480 fanoutkey = struct.unpack(
479 fanoutkey = struct.unpack(
481 params.fanoutstruct, node[: params.fanoutprefix]
480 params.fanoutstruct, node[: params.fanoutprefix]
482 )[0]
481 )[0]
483 if fanouttable[fanoutkey] == EMPTYFANOUT:
482 if fanouttable[fanoutkey] == EMPTYFANOUT:
484 fanouttable[fanoutkey] = location
483 fanouttable[fanoutkey] = location
485
484
486 rawfanouttable = b''
485 rawfanouttable = b''
487 last = 0
486 last = 0
488 for offset in fanouttable:
487 for offset in fanouttable:
489 offset = offset if offset != EMPTYFANOUT else last
488 offset = offset if offset != EMPTYFANOUT else last
490 last = offset
489 last = offset
491 rawfanouttable += struct.pack(b'!I', offset)
490 rawfanouttable += struct.pack(b'!I', offset)
492
491
493 rawentrieslength = struct.pack(b'!Q', len(self.entries))
492 rawentrieslength = struct.pack(b'!Q', len(self.entries))
494
493
495 # The index offset is the it's location in the file. So after the 2 byte
494 # The index offset is the it's location in the file. So after the 2 byte
496 # header and the fanouttable.
495 # header and the fanouttable.
497 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
496 rawindex = self.createindex(locations, 2 + len(rawfanouttable))
498
497
499 self._writeheader(params)
498 self._writeheader(params)
500 self.idxfp.write(rawfanouttable)
499 self.idxfp.write(rawfanouttable)
501 self.idxfp.write(rawentrieslength)
500 self.idxfp.write(rawentrieslength)
502 self.idxfp.write(rawindex)
501 self.idxfp.write(rawindex)
503 self.idxfp.close()
502 self.idxfp.close()
504
503
505 def createindex(self, nodelocations):
504 def createindex(self, nodelocations):
506 raise NotImplementedError()
505 raise NotImplementedError()
507
506
508 def _writeheader(self, indexparams):
507 def _writeheader(self, indexparams):
509 # Index header
508 # Index header
510 # <version: 1 byte>
509 # <version: 1 byte>
511 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
510 # <large fanout: 1 bit> # 1 means 2^16, 0 means 2^8
512 # <unused: 7 bit> # future use (compression, delta format, etc)
511 # <unused: 7 bit> # future use (compression, delta format, etc)
513 config = 0
512 config = 0
514 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
513 if indexparams.fanoutprefix == LARGEFANOUTPREFIX:
515 config = 0b10000000
514 config = 0b10000000
516 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
515 self.idxfp.write(struct.pack(b'!BB', self.VERSION, config))
517
516
518
517
519 class indexparams:
518 class indexparams:
520 __slots__ = (
519 __slots__ = (
521 'fanoutprefix',
520 'fanoutprefix',
522 'fanoutstruct',
521 'fanoutstruct',
523 'fanoutcount',
522 'fanoutcount',
524 'fanoutsize',
523 'fanoutsize',
525 'indexstart',
524 'indexstart',
526 )
525 )
527
526
528 def __init__(self, prefixsize, version):
527 def __init__(self, prefixsize, version):
529 self.fanoutprefix = prefixsize
528 self.fanoutprefix = prefixsize
530
529
531 # The struct pack format for fanout table location (i.e. the format that
530 # The struct pack format for fanout table location (i.e. the format that
532 # converts the node prefix into an integer location in the fanout
531 # converts the node prefix into an integer location in the fanout
533 # table).
532 # table).
534 if prefixsize == SMALLFANOUTPREFIX:
533 if prefixsize == SMALLFANOUTPREFIX:
535 self.fanoutstruct = b'!B'
534 self.fanoutstruct = b'!B'
536 elif prefixsize == LARGEFANOUTPREFIX:
535 elif prefixsize == LARGEFANOUTPREFIX:
537 self.fanoutstruct = b'!H'
536 self.fanoutstruct = b'!H'
538 else:
537 else:
539 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
538 raise ValueError(b"invalid fanout prefix size: %s" % prefixsize)
540
539
541 # The number of fanout table entries
540 # The number of fanout table entries
542 self.fanoutcount = 2 ** (prefixsize * 8)
541 self.fanoutcount = 2 ** (prefixsize * 8)
543
542
544 # The total bytes used by the fanout table
543 # The total bytes used by the fanout table
545 self.fanoutsize = self.fanoutcount * 4
544 self.fanoutsize = self.fanoutcount * 4
546
545
547 self.indexstart = FANOUTSTART + self.fanoutsize
546 self.indexstart = FANOUTSTART + self.fanoutsize
548 # Skip the index length
547 # Skip the index length
549 self.indexstart += 8
548 self.indexstart += 8
@@ -1,396 +1,395 b''
1 import threading
1 import threading
2
2
3 from mercurial.node import (
3 from mercurial.node import (
4 hex,
4 hex,
5 sha1nodeconstants,
5 sha1nodeconstants,
6 )
6 )
7 from mercurial.pycompat import getattr
8 from mercurial import (
7 from mercurial import (
9 mdiff,
8 mdiff,
10 revlog,
9 revlog,
11 )
10 )
12 from . import (
11 from . import (
13 basestore,
12 basestore,
14 constants,
13 constants,
15 shallowutil,
14 shallowutil,
16 )
15 )
17
16
18
17
19 class ChainIndicies:
18 class ChainIndicies:
20 """A static class for easy reference to the delta chain indicies."""
19 """A static class for easy reference to the delta chain indicies."""
21
20
22 # The filename of this revision delta
21 # The filename of this revision delta
23 NAME = 0
22 NAME = 0
24 # The mercurial file node for this revision delta
23 # The mercurial file node for this revision delta
25 NODE = 1
24 NODE = 1
26 # The filename of the delta base's revision. This is useful when delta
25 # The filename of the delta base's revision. This is useful when delta
27 # between different files (like in the case of a move or copy, we can delta
26 # between different files (like in the case of a move or copy, we can delta
28 # against the original file content).
27 # against the original file content).
29 BASENAME = 2
28 BASENAME = 2
30 # The mercurial file node for the delta base revision. This is the nullid if
29 # The mercurial file node for the delta base revision. This is the nullid if
31 # this delta is a full text.
30 # this delta is a full text.
32 BASENODE = 3
31 BASENODE = 3
33 # The actual delta or full text data.
32 # The actual delta or full text data.
34 DATA = 4
33 DATA = 4
35
34
36
35
37 class unioncontentstore(basestore.baseunionstore):
36 class unioncontentstore(basestore.baseunionstore):
38 def __init__(self, *args, **kwargs):
37 def __init__(self, *args, **kwargs):
39 super(unioncontentstore, self).__init__(*args, **kwargs)
38 super(unioncontentstore, self).__init__(*args, **kwargs)
40
39
41 self.stores = args
40 self.stores = args
42 self.writestore = kwargs.get('writestore')
41 self.writestore = kwargs.get('writestore')
43
42
44 # If allowincomplete==True then the union store can return partial
43 # If allowincomplete==True then the union store can return partial
45 # delta chains, otherwise it will throw a KeyError if a full
44 # delta chains, otherwise it will throw a KeyError if a full
46 # deltachain can't be found.
45 # deltachain can't be found.
47 self.allowincomplete = kwargs.get('allowincomplete', False)
46 self.allowincomplete = kwargs.get('allowincomplete', False)
48
47
49 def get(self, name, node):
48 def get(self, name, node):
50 """Fetches the full text revision contents of the given name+node pair.
49 """Fetches the full text revision contents of the given name+node pair.
51 If the full text doesn't exist, throws a KeyError.
50 If the full text doesn't exist, throws a KeyError.
52
51
53 Under the hood, this uses getdeltachain() across all the stores to build
52 Under the hood, this uses getdeltachain() across all the stores to build
54 up a full chain to produce the full text.
53 up a full chain to produce the full text.
55 """
54 """
56 chain = self.getdeltachain(name, node)
55 chain = self.getdeltachain(name, node)
57
56
58 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
57 if chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
59 # If we didn't receive a full chain, throw
58 # If we didn't receive a full chain, throw
60 raise KeyError((name, hex(node)))
59 raise KeyError((name, hex(node)))
61
60
62 # The last entry in the chain is a full text, so we start our delta
61 # The last entry in the chain is a full text, so we start our delta
63 # applies with that.
62 # applies with that.
64 fulltext = chain.pop()[ChainIndicies.DATA]
63 fulltext = chain.pop()[ChainIndicies.DATA]
65
64
66 text = fulltext
65 text = fulltext
67 while chain:
66 while chain:
68 delta = chain.pop()[ChainIndicies.DATA]
67 delta = chain.pop()[ChainIndicies.DATA]
69 text = mdiff.patches(text, [delta])
68 text = mdiff.patches(text, [delta])
70
69
71 return text
70 return text
72
71
73 @basestore.baseunionstore.retriable
72 @basestore.baseunionstore.retriable
74 def getdelta(self, name, node):
73 def getdelta(self, name, node):
75 """Return the single delta entry for the given name/node pair."""
74 """Return the single delta entry for the given name/node pair."""
76 for store in self.stores:
75 for store in self.stores:
77 try:
76 try:
78 return store.getdelta(name, node)
77 return store.getdelta(name, node)
79 except KeyError:
78 except KeyError:
80 pass
79 pass
81
80
82 raise KeyError((name, hex(node)))
81 raise KeyError((name, hex(node)))
83
82
84 def getdeltachain(self, name, node):
83 def getdeltachain(self, name, node):
85 """Returns the deltachain for the given name/node pair.
84 """Returns the deltachain for the given name/node pair.
86
85
87 Returns an ordered list of:
86 Returns an ordered list of:
88
87
89 [(name, node, deltabasename, deltabasenode, deltacontent),...]
88 [(name, node, deltabasename, deltabasenode, deltacontent),...]
90
89
91 where the chain is terminated by a full text entry with a nullid
90 where the chain is terminated by a full text entry with a nullid
92 deltabasenode.
91 deltabasenode.
93 """
92 """
94 chain = self._getpartialchain(name, node)
93 chain = self._getpartialchain(name, node)
95 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
94 while chain[-1][ChainIndicies.BASENODE] != sha1nodeconstants.nullid:
96 x, x, deltabasename, deltabasenode, x = chain[-1]
95 x, x, deltabasename, deltabasenode, x = chain[-1]
97 try:
96 try:
98 morechain = self._getpartialchain(deltabasename, deltabasenode)
97 morechain = self._getpartialchain(deltabasename, deltabasenode)
99 chain.extend(morechain)
98 chain.extend(morechain)
100 except KeyError:
99 except KeyError:
101 # If we allow incomplete chains, don't throw.
100 # If we allow incomplete chains, don't throw.
102 if not self.allowincomplete:
101 if not self.allowincomplete:
103 raise
102 raise
104 break
103 break
105
104
106 return chain
105 return chain
107
106
108 @basestore.baseunionstore.retriable
107 @basestore.baseunionstore.retriable
109 def getmeta(self, name, node):
108 def getmeta(self, name, node):
110 """Returns the metadata dict for given node."""
109 """Returns the metadata dict for given node."""
111 for store in self.stores:
110 for store in self.stores:
112 try:
111 try:
113 return store.getmeta(name, node)
112 return store.getmeta(name, node)
114 except KeyError:
113 except KeyError:
115 pass
114 pass
116 raise KeyError((name, hex(node)))
115 raise KeyError((name, hex(node)))
117
116
118 def getmetrics(self):
117 def getmetrics(self):
119 metrics = [s.getmetrics() for s in self.stores]
118 metrics = [s.getmetrics() for s in self.stores]
120 return shallowutil.sumdicts(*metrics)
119 return shallowutil.sumdicts(*metrics)
121
120
122 @basestore.baseunionstore.retriable
121 @basestore.baseunionstore.retriable
123 def _getpartialchain(self, name, node):
122 def _getpartialchain(self, name, node):
124 """Returns a partial delta chain for the given name/node pair.
123 """Returns a partial delta chain for the given name/node pair.
125
124
126 A partial chain is a chain that may not be terminated in a full-text.
125 A partial chain is a chain that may not be terminated in a full-text.
127 """
126 """
128 for store in self.stores:
127 for store in self.stores:
129 try:
128 try:
130 return store.getdeltachain(name, node)
129 return store.getdeltachain(name, node)
131 except KeyError:
130 except KeyError:
132 pass
131 pass
133
132
134 raise KeyError((name, hex(node)))
133 raise KeyError((name, hex(node)))
135
134
136 def add(self, name, node, data):
135 def add(self, name, node, data):
137 raise RuntimeError(
136 raise RuntimeError(
138 b"cannot add content only to remotefilelog contentstore"
137 b"cannot add content only to remotefilelog contentstore"
139 )
138 )
140
139
141 def getmissing(self, keys):
140 def getmissing(self, keys):
142 missing = keys
141 missing = keys
143 for store in self.stores:
142 for store in self.stores:
144 if missing:
143 if missing:
145 missing = store.getmissing(missing)
144 missing = store.getmissing(missing)
146 return missing
145 return missing
147
146
148 def addremotefilelognode(self, name, node, data):
147 def addremotefilelognode(self, name, node, data):
149 if self.writestore:
148 if self.writestore:
150 self.writestore.addremotefilelognode(name, node, data)
149 self.writestore.addremotefilelognode(name, node, data)
151 else:
150 else:
152 raise RuntimeError(b"no writable store configured")
151 raise RuntimeError(b"no writable store configured")
153
152
154 def markledger(self, ledger, options=None):
153 def markledger(self, ledger, options=None):
155 for store in self.stores:
154 for store in self.stores:
156 store.markledger(ledger, options)
155 store.markledger(ledger, options)
157
156
158
157
159 class remotefilelogcontentstore(basestore.basestore):
158 class remotefilelogcontentstore(basestore.basestore):
160 def __init__(self, *args, **kwargs):
159 def __init__(self, *args, **kwargs):
161 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
160 super(remotefilelogcontentstore, self).__init__(*args, **kwargs)
162 self._threaddata = threading.local()
161 self._threaddata = threading.local()
163
162
164 def get(self, name, node):
163 def get(self, name, node):
165 # return raw revision text
164 # return raw revision text
166 data = self._getdata(name, node)
165 data = self._getdata(name, node)
167
166
168 offset, size, flags = shallowutil.parsesizeflags(data)
167 offset, size, flags = shallowutil.parsesizeflags(data)
169 content = data[offset : offset + size]
168 content = data[offset : offset + size]
170
169
171 ancestormap = shallowutil.ancestormap(data)
170 ancestormap = shallowutil.ancestormap(data)
172 p1, p2, linknode, copyfrom = ancestormap[node]
171 p1, p2, linknode, copyfrom = ancestormap[node]
173 copyrev = None
172 copyrev = None
174 if copyfrom:
173 if copyfrom:
175 copyrev = hex(p1)
174 copyrev = hex(p1)
176
175
177 self._updatemetacache(node, size, flags)
176 self._updatemetacache(node, size, flags)
178
177
179 # lfs tracks renames in its own metadata, remove hg copy metadata,
178 # lfs tracks renames in its own metadata, remove hg copy metadata,
180 # because copy metadata will be re-added by lfs flag processor.
179 # because copy metadata will be re-added by lfs flag processor.
181 if flags & revlog.REVIDX_EXTSTORED:
180 if flags & revlog.REVIDX_EXTSTORED:
182 copyrev = copyfrom = None
181 copyrev = copyfrom = None
183 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
182 revision = shallowutil.createrevlogtext(content, copyfrom, copyrev)
184 return revision
183 return revision
185
184
186 def getdelta(self, name, node):
185 def getdelta(self, name, node):
187 # Since remotefilelog content stores only contain full texts, just
186 # Since remotefilelog content stores only contain full texts, just
188 # return that.
187 # return that.
189 revision = self.get(name, node)
188 revision = self.get(name, node)
190 return (
189 return (
191 revision,
190 revision,
192 name,
191 name,
193 sha1nodeconstants.nullid,
192 sha1nodeconstants.nullid,
194 self.getmeta(name, node),
193 self.getmeta(name, node),
195 )
194 )
196
195
197 def getdeltachain(self, name, node):
196 def getdeltachain(self, name, node):
198 # Since remotefilelog content stores just contain full texts, we return
197 # Since remotefilelog content stores just contain full texts, we return
199 # a fake delta chain that just consists of a single full text revision.
198 # a fake delta chain that just consists of a single full text revision.
200 # The nullid in the deltabasenode slot indicates that the revision is a
199 # The nullid in the deltabasenode slot indicates that the revision is a
201 # fulltext.
200 # fulltext.
202 revision = self.get(name, node)
201 revision = self.get(name, node)
203 return [(name, node, None, sha1nodeconstants.nullid, revision)]
202 return [(name, node, None, sha1nodeconstants.nullid, revision)]
204
203
205 def getmeta(self, name, node):
204 def getmeta(self, name, node):
206 self._sanitizemetacache()
205 self._sanitizemetacache()
207 if node != self._threaddata.metacache[0]:
206 if node != self._threaddata.metacache[0]:
208 data = self._getdata(name, node)
207 data = self._getdata(name, node)
209 offset, size, flags = shallowutil.parsesizeflags(data)
208 offset, size, flags = shallowutil.parsesizeflags(data)
210 self._updatemetacache(node, size, flags)
209 self._updatemetacache(node, size, flags)
211 return self._threaddata.metacache[1]
210 return self._threaddata.metacache[1]
212
211
213 def add(self, name, node, data):
212 def add(self, name, node, data):
214 raise RuntimeError(
213 raise RuntimeError(
215 b"cannot add content only to remotefilelog contentstore"
214 b"cannot add content only to remotefilelog contentstore"
216 )
215 )
217
216
218 def _sanitizemetacache(self):
217 def _sanitizemetacache(self):
219 metacache = getattr(self._threaddata, 'metacache', None)
218 metacache = getattr(self._threaddata, 'metacache', None)
220 if metacache is None:
219 if metacache is None:
221 self._threaddata.metacache = (None, None) # (node, meta)
220 self._threaddata.metacache = (None, None) # (node, meta)
222
221
223 def _updatemetacache(self, node, size, flags):
222 def _updatemetacache(self, node, size, flags):
224 self._sanitizemetacache()
223 self._sanitizemetacache()
225 if node == self._threaddata.metacache[0]:
224 if node == self._threaddata.metacache[0]:
226 return
225 return
227 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
226 meta = {constants.METAKEYFLAG: flags, constants.METAKEYSIZE: size}
228 self._threaddata.metacache = (node, meta)
227 self._threaddata.metacache = (node, meta)
229
228
230
229
231 class remotecontentstore:
230 class remotecontentstore:
232 def __init__(self, ui, fileservice, shared):
231 def __init__(self, ui, fileservice, shared):
233 self._fileservice = fileservice
232 self._fileservice = fileservice
234 # type(shared) is usually remotefilelogcontentstore
233 # type(shared) is usually remotefilelogcontentstore
235 self._shared = shared
234 self._shared = shared
236
235
237 def get(self, name, node):
236 def get(self, name, node):
238 self._fileservice.prefetch(
237 self._fileservice.prefetch(
239 [(name, hex(node))], force=True, fetchdata=True
238 [(name, hex(node))], force=True, fetchdata=True
240 )
239 )
241 return self._shared.get(name, node)
240 return self._shared.get(name, node)
242
241
243 def getdelta(self, name, node):
242 def getdelta(self, name, node):
244 revision = self.get(name, node)
243 revision = self.get(name, node)
245 return (
244 return (
246 revision,
245 revision,
247 name,
246 name,
248 sha1nodeconstants.nullid,
247 sha1nodeconstants.nullid,
249 self._shared.getmeta(name, node),
248 self._shared.getmeta(name, node),
250 )
249 )
251
250
252 def getdeltachain(self, name, node):
251 def getdeltachain(self, name, node):
253 # Since our remote content stores just contain full texts, we return a
252 # Since our remote content stores just contain full texts, we return a
254 # fake delta chain that just consists of a single full text revision.
253 # fake delta chain that just consists of a single full text revision.
255 # The nullid in the deltabasenode slot indicates that the revision is a
254 # The nullid in the deltabasenode slot indicates that the revision is a
256 # fulltext.
255 # fulltext.
257 revision = self.get(name, node)
256 revision = self.get(name, node)
258 return [(name, node, None, sha1nodeconstants.nullid, revision)]
257 return [(name, node, None, sha1nodeconstants.nullid, revision)]
259
258
260 def getmeta(self, name, node):
259 def getmeta(self, name, node):
261 self._fileservice.prefetch(
260 self._fileservice.prefetch(
262 [(name, hex(node))], force=True, fetchdata=True
261 [(name, hex(node))], force=True, fetchdata=True
263 )
262 )
264 return self._shared.getmeta(name, node)
263 return self._shared.getmeta(name, node)
265
264
266 def add(self, name, node, data):
265 def add(self, name, node, data):
267 raise RuntimeError(b"cannot add to a remote store")
266 raise RuntimeError(b"cannot add to a remote store")
268
267
269 def getmissing(self, keys):
268 def getmissing(self, keys):
270 return keys
269 return keys
271
270
272 def markledger(self, ledger, options=None):
271 def markledger(self, ledger, options=None):
273 pass
272 pass
274
273
275
274
276 class manifestrevlogstore:
275 class manifestrevlogstore:
277 def __init__(self, repo):
276 def __init__(self, repo):
278 self._store = repo.store
277 self._store = repo.store
279 self._svfs = repo.svfs
278 self._svfs = repo.svfs
280 self._revlogs = dict()
279 self._revlogs = dict()
281 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
280 self._cl = revlog.revlog(self._svfs, radix=b'00changelog.i')
282 self._repackstartlinkrev = 0
281 self._repackstartlinkrev = 0
283
282
284 def get(self, name, node):
283 def get(self, name, node):
285 return self._revlog(name).rawdata(node)
284 return self._revlog(name).rawdata(node)
286
285
287 def getdelta(self, name, node):
286 def getdelta(self, name, node):
288 revision = self.get(name, node)
287 revision = self.get(name, node)
289 return revision, name, self._cl.nullid, self.getmeta(name, node)
288 return revision, name, self._cl.nullid, self.getmeta(name, node)
290
289
291 def getdeltachain(self, name, node):
290 def getdeltachain(self, name, node):
292 revision = self.get(name, node)
291 revision = self.get(name, node)
293 return [(name, node, None, self._cl.nullid, revision)]
292 return [(name, node, None, self._cl.nullid, revision)]
294
293
295 def getmeta(self, name, node):
294 def getmeta(self, name, node):
296 rl = self._revlog(name)
295 rl = self._revlog(name)
297 rev = rl.rev(node)
296 rev = rl.rev(node)
298 return {
297 return {
299 constants.METAKEYFLAG: rl.flags(rev),
298 constants.METAKEYFLAG: rl.flags(rev),
300 constants.METAKEYSIZE: rl.rawsize(rev),
299 constants.METAKEYSIZE: rl.rawsize(rev),
301 }
300 }
302
301
303 def getancestors(self, name, node, known=None):
302 def getancestors(self, name, node, known=None):
304 if known is None:
303 if known is None:
305 known = set()
304 known = set()
306 if node in known:
305 if node in known:
307 return []
306 return []
308
307
309 rl = self._revlog(name)
308 rl = self._revlog(name)
310 ancestors = {}
309 ancestors = {}
311 missing = {node}
310 missing = {node}
312 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
311 for ancrev in rl.ancestors([rl.rev(node)], inclusive=True):
313 ancnode = rl.node(ancrev)
312 ancnode = rl.node(ancrev)
314 missing.discard(ancnode)
313 missing.discard(ancnode)
315
314
316 p1, p2 = rl.parents(ancnode)
315 p1, p2 = rl.parents(ancnode)
317 if p1 != self._cl.nullid and p1 not in known:
316 if p1 != self._cl.nullid and p1 not in known:
318 missing.add(p1)
317 missing.add(p1)
319 if p2 != self._cl.nullid and p2 not in known:
318 if p2 != self._cl.nullid and p2 not in known:
320 missing.add(p2)
319 missing.add(p2)
321
320
322 linknode = self._cl.node(rl.linkrev(ancrev))
321 linknode = self._cl.node(rl.linkrev(ancrev))
323 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
322 ancestors[rl.node(ancrev)] = (p1, p2, linknode, b'')
324 if not missing:
323 if not missing:
325 break
324 break
326 return ancestors
325 return ancestors
327
326
328 def getnodeinfo(self, name, node):
327 def getnodeinfo(self, name, node):
329 cl = self._cl
328 cl = self._cl
330 rl = self._revlog(name)
329 rl = self._revlog(name)
331 parents = rl.parents(node)
330 parents = rl.parents(node)
332 linkrev = rl.linkrev(rl.rev(node))
331 linkrev = rl.linkrev(rl.rev(node))
333 return (parents[0], parents[1], cl.node(linkrev), None)
332 return (parents[0], parents[1], cl.node(linkrev), None)
334
333
335 def add(self, *args):
334 def add(self, *args):
336 raise RuntimeError(b"cannot add to a revlog store")
335 raise RuntimeError(b"cannot add to a revlog store")
337
336
338 def _revlog(self, name):
337 def _revlog(self, name):
339 rl = self._revlogs.get(name)
338 rl = self._revlogs.get(name)
340 if rl is None:
339 if rl is None:
341 revlogname = b'00manifesttree'
340 revlogname = b'00manifesttree'
342 if name != b'':
341 if name != b'':
343 revlogname = b'meta/%s/00manifest' % name
342 revlogname = b'meta/%s/00manifest' % name
344 rl = revlog.revlog(self._svfs, radix=revlogname)
343 rl = revlog.revlog(self._svfs, radix=revlogname)
345 self._revlogs[name] = rl
344 self._revlogs[name] = rl
346 return rl
345 return rl
347
346
348 def getmissing(self, keys):
347 def getmissing(self, keys):
349 missing = []
348 missing = []
350 for name, node in keys:
349 for name, node in keys:
351 mfrevlog = self._revlog(name)
350 mfrevlog = self._revlog(name)
352 if node not in mfrevlog.nodemap:
351 if node not in mfrevlog.nodemap:
353 missing.append((name, node))
352 missing.append((name, node))
354
353
355 return missing
354 return missing
356
355
357 def setrepacklinkrevrange(self, startrev, endrev):
356 def setrepacklinkrevrange(self, startrev, endrev):
358 self._repackstartlinkrev = startrev
357 self._repackstartlinkrev = startrev
359 self._repackendlinkrev = endrev
358 self._repackendlinkrev = endrev
360
359
361 def markledger(self, ledger, options=None):
360 def markledger(self, ledger, options=None):
362 if options and options.get(constants.OPTION_PACKSONLY):
361 if options and options.get(constants.OPTION_PACKSONLY):
363 return
362 return
364 treename = b''
363 treename = b''
365 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
364 rl = revlog.revlog(self._svfs, radix=b'00manifesttree')
366 startlinkrev = self._repackstartlinkrev
365 startlinkrev = self._repackstartlinkrev
367 endlinkrev = self._repackendlinkrev
366 endlinkrev = self._repackendlinkrev
368 for rev in range(len(rl) - 1, -1, -1):
367 for rev in range(len(rl) - 1, -1, -1):
369 linkrev = rl.linkrev(rev)
368 linkrev = rl.linkrev(rev)
370 if linkrev < startlinkrev:
369 if linkrev < startlinkrev:
371 break
370 break
372 if linkrev > endlinkrev:
371 if linkrev > endlinkrev:
373 continue
372 continue
374 node = rl.node(rev)
373 node = rl.node(rev)
375 ledger.markdataentry(self, treename, node)
374 ledger.markdataentry(self, treename, node)
376 ledger.markhistoryentry(self, treename, node)
375 ledger.markhistoryentry(self, treename, node)
377
376
378 for t, path, size in self._store.data_entries():
377 for t, path, size in self._store.data_entries():
379 if path[:5] != b'meta/' or path[-2:] != b'.i':
378 if path[:5] != b'meta/' or path[-2:] != b'.i':
380 continue
379 continue
381
380
382 treename = path[5 : -len(b'/00manifest')]
381 treename = path[5 : -len(b'/00manifest')]
383
382
384 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
383 rl = revlog.revlog(self._svfs, indexfile=path[:-2])
385 for rev in range(len(rl) - 1, -1, -1):
384 for rev in range(len(rl) - 1, -1, -1):
386 linkrev = rl.linkrev(rev)
385 linkrev = rl.linkrev(rev)
387 if linkrev < startlinkrev:
386 if linkrev < startlinkrev:
388 break
387 break
389 if linkrev > endlinkrev:
388 if linkrev > endlinkrev:
390 continue
389 continue
391 node = rl.node(rev)
390 node = rl.node(rev)
392 ledger.markdataentry(self, treename, node)
391 ledger.markdataentry(self, treename, node)
393 ledger.markhistoryentry(self, treename, node)
392 ledger.markhistoryentry(self, treename, node)
394
393
395 def cleanup(self, ledger):
394 def cleanup(self, ledger):
396 pass
395 pass
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now